diff --git a/.changelog/6991.txt b/.changelog/6991.txt new file mode 100644 index 00000000000..6f0c5cd9c70 --- /dev/null +++ b/.changelog/6991.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +google_alloydb_cluster (GA only) +``` diff --git a/google/alloydb_operation.go b/google/alloydb_operation.go new file mode 100644 index 00000000000..8321e74bed1 --- /dev/null +++ b/google/alloydb_operation.go @@ -0,0 +1,62 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "time" +) + +type AlloydbOperationWaiter struct { + Config *Config + UserAgent string + Project string + CommonOperationWaiter +} + +func (w *AlloydbOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.AlloydbBasePath, w.CommonOperationWaiter.Op.Name) + + return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) +} + +func createAlloydbWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*AlloydbOperationWaiter, error) { + w := &AlloydbOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func alloydbOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createAlloydbWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/google/config.go b/google/config.go index 4a27bba11a5..1fe8383ee2e 100644 --- a/google/config.go +++ b/google/config.go @@ -174,6 +174,7 @@ type Config struct { AccessApprovalBasePath string AccessContextManagerBasePath string ActiveDirectoryBasePath string + AlloydbBasePath string ApigeeBasePath string AppEngineBasePath string ArtifactRegistryBasePath string @@ -278,6 +279,7 @@ type Config struct { const AccessApprovalBasePathKey = "AccessApproval" const AccessContextManagerBasePathKey = "AccessContextManager" const ActiveDirectoryBasePathKey = "ActiveDirectory" +const AlloydbBasePathKey = "Alloydb" const ApigeeBasePathKey = "Apigee" const AppEngineBasePathKey = "AppEngine" const ArtifactRegistryBasePathKey = "ArtifactRegistry" @@ -376,6 +378,7 @@ var DefaultBasePaths = map[string]string{ AccessApprovalBasePathKey: "https://accessapproval.googleapis.com/v1/", AccessContextManagerBasePathKey: "https://accesscontextmanager.googleapis.com/v1/", ActiveDirectoryBasePathKey: "https://managedidentities.googleapis.com/v1/", + AlloydbBasePathKey: "https://alloydb.googleapis.com/v1/", ApigeeBasePathKey: "https://apigee.googleapis.com/v1/", AppEngineBasePathKey: "https://appengine.googleapis.com/v1/", ArtifactRegistryBasePathKey: "https://artifactregistry.googleapis.com/v1/", @@ -1236,6 +1239,7 @@ func ConfigureBasePaths(c *Config) { c.AccessApprovalBasePath = DefaultBasePaths[AccessApprovalBasePathKey] c.AccessContextManagerBasePath = DefaultBasePaths[AccessContextManagerBasePathKey] c.ActiveDirectoryBasePath = DefaultBasePaths[ActiveDirectoryBasePathKey] + c.AlloydbBasePath = DefaultBasePaths[AlloydbBasePathKey] c.ApigeeBasePath = DefaultBasePaths[ApigeeBasePathKey] c.AppEngineBasePath = DefaultBasePaths[AppEngineBasePathKey] c.ArtifactRegistryBasePath = DefaultBasePaths[ArtifactRegistryBasePathKey] diff --git a/google/config_test_utils.go b/google/config_test_utils.go index b375b4ebf44..73640b4d355 100644 --- a/google/config_test_utils.go +++ b/google/config_test_utils.go @@ -21,6 +21,7 @@ func configureTestBasePaths(c *Config, url string) { c.AccessApprovalBasePath = url c.AccessContextManagerBasePath = url c.ActiveDirectoryBasePath = url + c.AlloydbBasePath = url c.ApigeeBasePath = url c.AppEngineBasePath = url c.ArtifactRegistryBasePath = url diff --git a/google/provider.go b/google/provider.go index 5c4513a05f4..0c936e5b465 100644 --- a/google/provider.go +++ b/google/provider.go @@ -173,6 +173,14 @@ func Provider() *schema.Provider { "GOOGLE_ACTIVE_DIRECTORY_CUSTOM_ENDPOINT", }, DefaultBasePaths[ActiveDirectoryBasePathKey]), }, + "alloydb_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateCustomEndpoint, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_ALLOYDB_CUSTOM_ENDPOINT", + }, DefaultBasePaths[AlloydbBasePathKey]), + }, "apigee_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -981,9 +989,9 @@ func Provider() *schema.Provider { return provider } -// Generated resources: 259 +// Generated resources: 262 // Generated IAM resources: 168 -// Total generated resources: 427 +// Total generated resources: 430 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -1008,6 +1016,9 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_access_context_manager_access_levels": resourceAccessContextManagerAccessLevels(), "google_active_directory_domain": resourceActiveDirectoryDomain(), "google_active_directory_domain_trust": resourceActiveDirectoryDomainTrust(), + "google_alloydb_cluster": resourceAlloydbCluster(), + "google_alloydb_instance": resourceAlloydbInstance(), + "google_alloydb_backup": resourceAlloydbBackup(), "google_apigee_organization": resourceApigeeOrganization(), "google_apigee_instance": resourceApigeeInstance(), "google_apigee_environment": resourceApigeeEnvironment(), @@ -1633,6 +1644,7 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.AccessApprovalBasePath = d.Get("access_approval_custom_endpoint").(string) config.AccessContextManagerBasePath = d.Get("access_context_manager_custom_endpoint").(string) config.ActiveDirectoryBasePath = d.Get("active_directory_custom_endpoint").(string) + config.AlloydbBasePath = d.Get("alloydb_custom_endpoint").(string) config.ApigeeBasePath = d.Get("apigee_custom_endpoint").(string) config.AppEngineBasePath = d.Get("app_engine_custom_endpoint").(string) config.ArtifactRegistryBasePath = d.Get("artifact_registry_custom_endpoint").(string) diff --git a/google/resource_alloydb_backup.go b/google/resource_alloydb_backup.go new file mode 100644 index 00000000000..20f163fc313 --- /dev/null +++ b/google/resource_alloydb_backup.go @@ -0,0 +1,466 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceAlloydbBackup() *schema.Resource { + return &schema.Resource{ + Create: resourceAlloydbBackupCreate, + Read: resourceAlloydbBackupRead, + Update: resourceAlloydbBackupUpdate, + Delete: resourceAlloydbBackupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAlloydbBackupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backup_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the alloydb backup.`, + }, + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: projectNumberDiffSuppress, + Description: `The full resource name of the backup source cluster (e.g., projects/{project}/locations/{location}/clusters/{clusterId}).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `User-provided description of the backup.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the alloydb backup.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location where the alloydb backup should reside.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Backup was created in UTC.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `A hash of the resource.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backupId}`, + }, + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: `If true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the backup.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Backup was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAlloydbBackupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + clusterNameProp, err := expandAlloydbBackupClusterName(d.Get("cluster_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cluster_name"); !isEmptyValue(reflect.ValueOf(clusterNameProp)) && (ok || !reflect.DeepEqual(v, clusterNameProp)) { + obj["clusterName"] = clusterNameProp + } + labelsProp, err := expandAlloydbBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandAlloydbBackupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + obj, err = resourceAlloydbBackupEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups?backupId={{backup_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Backup: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Backup: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = alloydbOperationWaitTime( + config, res, project, "Creating Backup", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Backup: %s", err) + } + + log.Printf("[DEBUG] Finished creating Backup %q: %#v", d.Id(), res) + + return resourceAlloydbBackupRead(d, meta) +} + +func resourceAlloydbBackupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("AlloydbBackup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + + if err := d.Set("name", flattenAlloydbBackupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("uid", flattenAlloydbBackupUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("cluster_name", flattenAlloydbBackupClusterName(res["clusterName"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("labels", flattenAlloydbBackupLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("create_time", flattenAlloydbBackupCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("update_time", flattenAlloydbBackupUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("state", flattenAlloydbBackupState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("description", flattenAlloydbBackupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("reconciling", flattenAlloydbBackupReconciling(res["reconciling"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("etag", flattenAlloydbBackupEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + + return nil +} + +func resourceAlloydbBackupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + obj, err = resourceAlloydbBackupEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Backup %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Backup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Backup %q: %#v", d.Id(), res) + } + + err = alloydbOperationWaitTime( + config, res, project, "Updating Backup", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAlloydbBackupRead(d, meta) +} + +func resourceAlloydbBackupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Backup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Backup") + } + + err = alloydbOperationWaitTime( + config, res, project, "Deleting Backup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Backup %q: %#v", d.Id(), res) + return nil +} + +func resourceAlloydbBackupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/backups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAlloydbBackupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupClusterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupReconciling(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbBackupEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandAlloydbBackupClusterName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbBackupLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbBackupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func resourceAlloydbBackupEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // The only other available type is AUTOMATED which cannot be set manually + obj["type"] = "ON_DEMAND" + return obj, nil +} diff --git a/google/resource_alloydb_backup_generated_test.go b/google/resource_alloydb_backup_generated_test.go new file mode 100644 index 00000000000..8b5f9eb1dca --- /dev/null +++ b/google/resource_alloydb_backup_generated_test.go @@ -0,0 +1,131 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAlloydbBackup_alloydbBackupFullExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "network_name": BootstrapSharedTestNetwork(t, "alloydb"), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlloydbBackupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbBackup_alloydbBackupFullExample(context), + }, + { + ResourceName: "google_alloydb_backup.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"backup_id", "location", "reconciling", "update_time"}, + }, + }, + }) +} + +func testAccAlloydbBackup_alloydbBackupFullExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_alloydb_backup" "default" { + location = "us-central1" + backup_id = "tf-test-alloydb-backup%{random_suffix}" + cluster_name = google_alloydb_cluster.default.name + + description = "example description" + labels = { + "label" = "key" + } + depends_on = [google_alloydb_instance.default] +} + +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + + depends_on = [google_service_networking_connection.vpc_connection] +} + +resource "google_compute_global_address" "private_ip_alloc" { + name = "tf-test-alloydb-cluster%{random_suffix}" + address_type = "INTERNAL" + purpose = "VPC_PEERING" + prefix_length = 16 + network = data.google_compute_network.default.id +} + +resource "google_service_networking_connection" "vpc_connection" { + network = data.google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +func testAccCheckAlloydbBackupDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_alloydb_backup" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + if err == nil { + return fmt.Errorf("AlloydbBackup still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/resource_alloydb_backup_sweeper_test.go b/google/resource_alloydb_backup_sweeper_test.go new file mode 100644 index 00000000000..0fda3c92553 --- /dev/null +++ b/google/resource_alloydb_backup_sweeper_test.go @@ -0,0 +1,128 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("AlloydbBackup", &resource.Sweeper{ + Name: "AlloydbBackup", + F: testSweepAlloydbBackup, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAlloydbBackup(region string) error { + resourceName := "AlloydbBackup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://alloydb.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backups", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["backups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://alloydb.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backups/{{backup_id}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/resource_alloydb_backup_test.go b/google/resource_alloydb_backup_test.go index 71664db3c87..fb2ae795d72 100644 --- a/google/resource_alloydb_backup_test.go +++ b/google/resource_alloydb_backup_test.go @@ -1 +1,92 @@ package google + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAlloydbBackup_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "network_name": BootstrapSharedTestNetwork(t, "alloydb"), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlloydbBackupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbBackup_alloydbBackupFullExample(context), + }, + { + ResourceName: "google_alloydb_backup.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"backup_id", "location", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbBackup_update(context), + }, + { + ResourceName: "google_alloydb_backup.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"backup_id", "location", "reconciling", "update_time"}, + }, + }, + }) +} + +// Updates "label" field from testAccAlloydbBackup_alloydbBackupFullExample +func testAccAlloydbBackup_update(context map[string]interface{}) string { + return Nprintf(` +resource "google_alloydb_backup" "default" { + location = "us-central1" + backup_id = "tf-test-alloydb-backup%{random_suffix}" + cluster_name = google_alloydb_cluster.default.name + + description = "example description" + labels = { + "label" = "updated_key" + "label2" = "updated_key2" + } + depends_on = [google_alloydb_instance.default] +} + +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + + depends_on = [google_service_networking_connection.vpc_connection] +} + +resource "google_compute_global_address" "private_ip_alloc" { + name = "tf-test-alloydb-cluster%{random_suffix}" + address_type = "INTERNAL" + purpose = "VPC_PEERING" + prefix_length = 16 + network = data.google_compute_network.default.id +} + +resource "google_service_networking_connection" "vpc_connection" { + network = data.google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} diff --git a/google/resource_alloydb_cluster.go b/google/resource_alloydb_cluster.go new file mode 100644 index 00000000000..41053ab2033 --- /dev/null +++ b/google/resource_alloydb_cluster.go @@ -0,0 +1,1129 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceAlloydbCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAlloydbClusterCreate, + Read: resourceAlloydbClusterRead, + Update: resourceAlloydbClusterUpdate, + Delete: resourceAlloydbClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAlloydbClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the alloydb cluster.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: projectNumberDiffSuppress, + Description: `The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: + +"projects/{projectNumber}/global/networks/{network_id}".`, + }, + "automated_backup_policy": { + Type: schema.TypeList, + Optional: true, + Description: `The automated backup policy for this cluster. + +If no policy is provided then the default policy will be used. The default policy takes one backup a day, has a backup window of 1 hour, and retains backups for 14 days.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "weekly_schedule": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Weekly schedule for the Backup.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_times": { + Type: schema.TypeList, + Required: true, + Description: `The times during the day to start a backup. At least one start time must be provided. The start times are assumed to be in UTC and to be an exact hour (e.g., 04:00:00).`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "days_of_week": { + Type: schema.TypeList, + Optional: true, + Description: `The days of the week to perform a backup. At least one day of the week must be provided. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + }, + }, + }, + }, + }, + "backup_window": { + Type: schema.TypeString, + Optional: true, + Description: `The length of the time window during which a backup can be taken. If a backup does not succeed within this time window, it will be canceled and considered failed. + +The backup window must be at least 5 minutes long. There is no upper bound on the window. If not set, it will default to 1 hour. + +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether automated automated backups are enabled.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to backups created using this configuration.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `The location where the backup will be stored. Currently, the only supported option is to store the backup in the same region as the cluster.`, + }, + "quantity_based_retention": { + Type: schema.TypeList, + Optional: true, + Description: `Quantity-based Backup retention policy to retain recent backups.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of backups to retain.`, + }, + }, + }, + ConflictsWith: []string{"automated_backup_policy.0.time_based_retention"}, + }, + "time_based_retention": { + Type: schema.TypeList, + Optional: true, + Description: `Time-based Backup retention policy.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retention_period": { + Type: schema.TypeString, + Optional: true, + Description: `The retention period. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + }, + }, + ConflictsWith: []string{"automated_backup_policy.0.quantity_based_retention"}, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User-settable and human-readable display name for the Cluster.`, + }, + "initial_user": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Initial user to setup during cluster creation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "password": { + Type: schema.TypeString, + Required: true, + Description: `The initial password for the user.`, + Sensitive: true, + }, + "user": { + Type: schema.TypeString, + Optional: true, + Description: `The database username.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the alloydb cluster.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location where the alloydb cluster should reside.`, + }, + "backup_source": { + Type: schema.TypeList, + Computed: true, + Description: `Cluster created from backup.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backup_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the backup resource.`, + }, + }, + }, + }, + "database_version": { + Type: schema.TypeString, + Computed: true, + Description: `The database engine major version. This is an output-only field and it's populated at the Cluster creation time. This field cannot be changed after cluster creation.`, + }, + "migration_source": { + Type: schema.TypeList, + Computed: true, + Description: `Cluster created via DMS migration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host_port": { + Type: schema.TypeString, + Optional: true, + Description: `The host and port of the on-premises instance in host:port format`, + }, + "reference_id": { + Type: schema.TypeString, + Optional: true, + Description: `Place holder for the external source identifier(e.g DMS job name) that created the cluster.`, + }, + "source_type": { + Type: schema.TypeString, + Optional: true, + Description: `Type of migration source.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the cluster resource.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The system-generated UID of the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAlloydbClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbClusterLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + networkProp, err := expandAlloydbClusterNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + displayNameProp, err := expandAlloydbClusterDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + initialUserProp, err := expandAlloydbClusterInitialUser(d.Get("initial_user"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("initial_user"); !isEmptyValue(reflect.ValueOf(initialUserProp)) && (ok || !reflect.DeepEqual(v, initialUserProp)) { + obj["initialUser"] = initialUserProp + } + automatedBackupPolicyProp, err := expandAlloydbClusterAutomatedBackupPolicy(d.Get("automated_backup_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automated_backup_policy"); !isEmptyValue(reflect.ValueOf(automatedBackupPolicyProp)) && (ok || !reflect.DeepEqual(v, automatedBackupPolicyProp)) { + obj["automatedBackupPolicy"] = automatedBackupPolicyProp + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Cluster: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Cluster: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = alloydbOperationWaitTime( + config, res, project, "Creating Cluster", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceAlloydbClusterRead(d, meta) +} + +func resourceAlloydbClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("AlloydbCluster %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + + if err := d.Set("name", flattenAlloydbClusterName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("uid", flattenAlloydbClusterUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("labels", flattenAlloydbClusterLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("network", flattenAlloydbClusterNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("display_name", flattenAlloydbClusterDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("database_version", flattenAlloydbClusterDatabaseVersion(res["databaseVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("automated_backup_policy", flattenAlloydbClusterAutomatedBackupPolicy(res["automatedBackupPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("backup_source", flattenAlloydbClusterBackupSource(res["backupSource"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("migration_source", flattenAlloydbClusterMigrationSource(res["migrationSource"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + + return nil +} + +func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbClusterLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + networkProp, err := expandAlloydbClusterNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + displayNameProp, err := expandAlloydbClusterDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + automatedBackupPolicyProp, err := expandAlloydbClusterAutomatedBackupPolicy(d.Get("automated_backup_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automated_backup_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automatedBackupPolicyProp)) { + obj["automatedBackupPolicy"] = automatedBackupPolicyProp + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Cluster %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("network") { + updateMask = append(updateMask, "network") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("automated_backup_policy") { + updateMask = append(updateMask, "automatedBackupPolicy") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Cluster %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Cluster %q: %#v", d.Id(), res) + } + + err = alloydbOperationWaitTime( + config, res, project, "Updating Cluster", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAlloydbClusterRead(d, meta) +} + +func resourceAlloydbClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Cluster") + } + + err = alloydbOperationWaitTime( + config, res, project, "Deleting Cluster", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Cluster %q: %#v", d.Id(), res) + return nil +} + +func resourceAlloydbClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAlloydbClusterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterDatabaseVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["backup_window"] = + flattenAlloydbClusterAutomatedBackupPolicyBackupWindow(original["backupWindow"], d, config) + transformed["location"] = + flattenAlloydbClusterAutomatedBackupPolicyLocation(original["location"], d, config) + transformed["labels"] = + flattenAlloydbClusterAutomatedBackupPolicyLabels(original["labels"], d, config) + transformed["weekly_schedule"] = + flattenAlloydbClusterAutomatedBackupPolicyWeeklySchedule(original["weeklySchedule"], d, config) + transformed["time_based_retention"] = + flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(original["timeBasedRetention"], d, config) + transformed["quantity_based_retention"] = + flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(original["quantityBasedRetention"], d, config) + transformed["enabled"] = + flattenAlloydbClusterAutomatedBackupPolicyEnabled(original["enabled"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyBackupWindow(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklySchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["days_of_week"] = + flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(original["daysOfWeek"], d, config) + transformed["start_times"] = + flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(original["startTimes"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "hours": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(original["hours"], d, config), + "minutes": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(original["minutes"], d, config), + "seconds": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(original["seconds"], d, config), + "nanos": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(original["nanos"], d, config), + }) + } + return transformed +} +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["retention_period"] = + flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(original["retentionPeriod"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["count"] = + flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(original["count"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterBackupSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["backup_name"] = + flattenAlloydbClusterBackupSourceBackupName(original["backupName"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterBackupSourceBackupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterMigrationSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["host_port"] = + flattenAlloydbClusterMigrationSourceHostPort(original["hostPort"], d, config) + transformed["reference_id"] = + flattenAlloydbClusterMigrationSourceReferenceId(original["referenceId"], d, config) + transformed["source_type"] = + flattenAlloydbClusterMigrationSourceSourceType(original["sourceType"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterMigrationSourceHostPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterMigrationSourceReferenceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbClusterMigrationSourceSourceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandAlloydbClusterLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbClusterNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterInitialUser(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUser, err := expandAlloydbClusterInitialUserUser(original["user"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUser); val.IsValid() && !isEmptyValue(val) { + transformed["user"] = transformedUser + } + + transformedPassword, err := expandAlloydbClusterInitialUserPassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { + transformed["password"] = transformedPassword + } + + return transformed, nil +} + +func expandAlloydbClusterInitialUserUser(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterInitialUserPassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBackupWindow, err := expandAlloydbClusterAutomatedBackupPolicyBackupWindow(original["backup_window"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBackupWindow); val.IsValid() && !isEmptyValue(val) { + transformed["backupWindow"] = transformedBackupWindow + } + + transformedLocation, err := expandAlloydbClusterAutomatedBackupPolicyLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + transformed["location"] = transformedLocation + } + + transformedLabels, err := expandAlloydbClusterAutomatedBackupPolicyLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedWeeklySchedule, err := expandAlloydbClusterAutomatedBackupPolicyWeeklySchedule(original["weekly_schedule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWeeklySchedule); val.IsValid() && !isEmptyValue(val) { + transformed["weeklySchedule"] = transformedWeeklySchedule + } + + transformedTimeBasedRetention, err := expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(original["time_based_retention"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeBasedRetention); val.IsValid() && !isEmptyValue(val) { + transformed["timeBasedRetention"] = transformedTimeBasedRetention + } + + transformedQuantityBasedRetention, err := expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(original["quantity_based_retention"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQuantityBasedRetention); val.IsValid() && !isEmptyValue(val) { + transformed["quantityBasedRetention"] = transformedQuantityBasedRetention + } + + transformedEnabled, err := expandAlloydbClusterAutomatedBackupPolicyEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyBackupWindow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDaysOfWeek, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(original["days_of_week"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDaysOfWeek); val.IsValid() && !isEmptyValue(val) { + transformed["daysOfWeek"] = transformedDaysOfWeek + } + + transformedStartTimes, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(original["start_times"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTimes); val.IsValid() && !isEmptyValue(val) { + transformed["startTimes"] = transformedStartTimes + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRetentionPeriod, err := expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(original["retention_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRetentionPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["retentionPeriod"] = transformedRetentionPeriod + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCount, err := expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(original["count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { + transformed["count"] = transformedCount + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/google/resource_alloydb_cluster_generated_test.go b/google/resource_alloydb_cluster_generated_test.go new file mode 100644 index 00000000000..96fc53dae7d --- /dev/null +++ b/google/resource_alloydb_cluster_generated_test.go @@ -0,0 +1,173 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAlloydbCluster_alloydbClusterBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbCluster_alloydbClusterBasicExample(context), + }, + { + ResourceName: "google_alloydb_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "cluster_id", "location"}, + }, + }, + }) +} + +func testAccAlloydbCluster_alloydbClusterBasicExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.default.name}" +} + +data "google_project" "project" {} + +resource "google_compute_network" "default" { + name = "tf-test-alloydb-cluster%{random_suffix}" +} +`, context) +} + +func TestAccAlloydbCluster_alloydbClusterFullExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbCluster_alloydbClusterFullExample(context), + }, + { + ResourceName: "google_alloydb_cluster.full", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "cluster_id", "location"}, + }, + }, + }) +} + +func testAccAlloydbCluster_alloydbClusterFullExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_alloydb_cluster" "full" { + cluster_id = "tf-test-alloydb-cluster-full%{random_suffix}" + location = "us-central1" + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.default.name}" + + initial_user { + user = "tf-test-alloydb-cluster-full%{random_suffix}" + password = "tf-test-alloydb-cluster-full%{random_suffix}" + } + + automated_backup_policy { + location = "us-central1" + backup_window = "1800s" + enabled = true + + weekly_schedule { + days_of_week = ["MONDAY"] + + start_times { + hours = 23 + minutes = 0 + seconds = 0 + nanos = 0 + } + } + + quantity_based_retention { + count = 1 + } + + labels = { + test = "tf-test-alloydb-cluster-full%{random_suffix}" + } + } + + labels = { + test = "tf-test-alloydb-cluster-full%{random_suffix}" + } +} + +data "google_project" "project" {} + +resource "google_compute_network" "default" { + name = "tf-test-alloydb-cluster-full%{random_suffix}" +} +`, context) +} + +func testAccCheckAlloydbClusterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_alloydb_cluster" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + if err == nil { + return fmt.Errorf("AlloydbCluster still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/resource_alloydb_cluster_sweeper_test.go b/google/resource_alloydb_cluster_sweeper_test.go new file mode 100644 index 00000000000..2685a3316ff --- /dev/null +++ b/google/resource_alloydb_cluster_sweeper_test.go @@ -0,0 +1,128 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("AlloydbCluster", &resource.Sweeper{ + Name: "AlloydbCluster", + F: testSweepAlloydbCluster, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAlloydbCluster(region string) error { + resourceName := "AlloydbCluster" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://alloydb.googleapis.com/v1/projects/{{project}}/locations/{{location}}/clusters", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["clusters"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://alloydb.googleapis.com/v1/projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/resource_alloydb_cluster_test.go b/google/resource_alloydb_cluster_test.go index 71664db3c87..51b8534ed03 100644 --- a/google/resource_alloydb_cluster_test.go +++ b/google/resource_alloydb_cluster_test.go @@ -1 +1,69 @@ package google + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAlloydbCluster_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbCluster_alloydbClusterBasicExample(context), + }, + { + ResourceName: "google_alloydb_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "cluster_id", "location"}, + }, + { + Config: testAccAlloydbCluster_update(context), + }, + { + ResourceName: "google_alloydb_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "cluster_id", "location"}, + }, + { + Config: testAccAlloydbCluster_alloydbClusterBasicExample(context), + }, + }, + }) +} + +func testAccAlloydbCluster_update(context map[string]interface{}) string { + return Nprintf(` +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.default.name}" + + labels = { + update = "true" + } + + lifecycle { + prevent_destroy = true + } +} + +data "google_project" "project" { +} + +resource "google_compute_network" "default" { + name = "tf-test-alloydb-cluster%{random_suffix}" +} +`, context) +} diff --git a/google/resource_alloydb_instance.go b/google/resource_alloydb_instance.go new file mode 100644 index 00000000000..807068c9c34 --- /dev/null +++ b/google/resource_alloydb_instance.go @@ -0,0 +1,745 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceAlloydbInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAlloydbInstanceCreate, + Read: resourceAlloydbInstanceRead, + Update: resourceAlloydbInstanceUpdate, + Delete: resourceAlloydbInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAlloydbInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `Identifies the alloydb cluster. Must be in the format +'projects/{project}/locations/{location}/clusters/{cluster_id}'`, + }, + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the alloydb instance.`, + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateEnum([]string{"PRIMARY", "READ_POOL"}), + Description: `The type of the instance. Possible values: ["PRIMARY", "READ_POOL"]`, + }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "availability_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validateEnum([]string{"AVAILABILITY_TYPE_UNSPECIFIED", "ZONAL", "REGIONAL", ""}), + Description: `Availability type of an Instance. Defaults to REGIONAL for both primary and read instances. Note that primary and read instances can have different availability types. Possible values: ["AVAILABILITY_TYPE_UNSPECIFIED", "ZONAL", "REGIONAL"]`, + }, + "database_flags": { + Type: schema.TypeMap, + Optional: true, + Description: `Database flags. Set at instance level. * They are copied from primary instance on read instance creation. * Read instances can set new or override existing flags that are relevant for reads, e.g. for enabling columnar cache on a read instance. Flags set on read instance may or may not be present on primary.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User-settable and human-readable display name for the Instance.`, + }, + "gce_zone": { + Type: schema.TypeString, + Optional: true, + Description: `The Compute Engine zone that the instance should serve from, per https://cloud.google.com/compute/docs/regions-zones This can ONLY be specified for ZONAL instances. If present for a REGIONAL instance, an error will be thrown. If this is absent for a ZONAL instance, instance is created in a random zone with available capacity.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the alloydb instance.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "machine_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Configurations for the machines that host the underlying database engine.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The number of CPU's in the VM instance.`, + }, + }, + }, + }, + "read_pool_config": { + Type: schema.TypeList, + Optional: true, + Description: `Read pool specific config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Read capacity, i.e. number of nodes in a read pool instance.`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Instance was created in UTC.`, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address for the Instance. This is the connection endpoint for an end-user application.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the instance resource.`, + }, + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: `Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the alloydb instance.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The system-generated UID of the resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Instance was updated in UTC.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + annotationsProp, err := expandAlloydbInstanceAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !isEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + displayNameProp, err := expandAlloydbInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + gceZoneProp, err := expandAlloydbInstanceGceZone(d.Get("gce_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gce_zone"); !isEmptyValue(reflect.ValueOf(gceZoneProp)) && (ok || !reflect.DeepEqual(v, gceZoneProp)) { + obj["gceZone"] = gceZoneProp + } + databaseFlagsProp, err := expandAlloydbInstanceDatabaseFlags(d.Get("database_flags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_flags"); !isEmptyValue(reflect.ValueOf(databaseFlagsProp)) && (ok || !reflect.DeepEqual(v, databaseFlagsProp)) { + obj["databaseFlags"] = databaseFlagsProp + } + availabilityTypeProp, err := expandAlloydbInstanceAvailabilityType(d.Get("availability_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("availability_type"); !isEmptyValue(reflect.ValueOf(availabilityTypeProp)) && (ok || !reflect.DeepEqual(v, availabilityTypeProp)) { + obj["availabilityType"] = availabilityTypeProp + } + instanceTypeProp, err := expandAlloydbInstanceInstanceType(d.Get("instance_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance_type"); !isEmptyValue(reflect.ValueOf(instanceTypeProp)) && (ok || !reflect.DeepEqual(v, instanceTypeProp)) { + obj["instanceType"] = instanceTypeProp + } + readPoolConfigProp, err := expandAlloydbInstanceReadPoolConfig(d.Get("read_pool_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("read_pool_config"); !isEmptyValue(reflect.ValueOf(readPoolConfigProp)) && (ok || !reflect.DeepEqual(v, readPoolConfigProp)) { + obj["readPoolConfig"] = readPoolConfigProp + } + machineConfigProp, err := expandAlloydbInstanceMachineConfig(d.Get("machine_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("machine_config"); !isEmptyValue(reflect.ValueOf(machineConfigProp)) && (ok || !reflect.DeepEqual(v, machineConfigProp)) { + obj["machineConfig"] = machineConfigProp + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances?instanceId={{instance_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Instance: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Instance: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "{{cluster}}/instances/{{instance_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = alloydbOperationWaitTime( + config, res, project, "Creating Instance", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Instance: %s", err) + } + + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) + + return resourceAlloydbInstanceRead(d, meta) +} + +func resourceAlloydbInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("AlloydbInstance %q", d.Id())) + } + + if err := d.Set("name", flattenAlloydbInstanceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("create_time", flattenAlloydbInstanceCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("update_time", flattenAlloydbInstanceUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("uid", flattenAlloydbInstanceUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("labels", flattenAlloydbInstanceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("annotations", flattenAlloydbInstanceAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("state", flattenAlloydbInstanceState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("gce_zone", flattenAlloydbInstanceGceZone(res["gceZone"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("reconciling", flattenAlloydbInstanceReconciling(res["reconciling"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("database_flags", flattenAlloydbInstanceDatabaseFlags(res["databaseFlags"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("availability_type", flattenAlloydbInstanceAvailabilityType(res["availabilityType"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("instance_type", flattenAlloydbInstanceInstanceType(res["instanceType"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("ip_address", flattenAlloydbInstanceIpAddress(res["ipAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("read_pool_config", flattenAlloydbInstanceReadPoolConfig(res["readPoolConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("machine_config", flattenAlloydbInstanceMachineConfig(res["machineConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + return nil +} + +func resourceAlloydbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + annotationsProp, err := expandAlloydbInstanceAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + displayNameProp, err := expandAlloydbInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + gceZoneProp, err := expandAlloydbInstanceGceZone(d.Get("gce_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gce_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gceZoneProp)) { + obj["gceZone"] = gceZoneProp + } + databaseFlagsProp, err := expandAlloydbInstanceDatabaseFlags(d.Get("database_flags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_flags"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, databaseFlagsProp)) { + obj["databaseFlags"] = databaseFlagsProp + } + availabilityTypeProp, err := expandAlloydbInstanceAvailabilityType(d.Get("availability_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("availability_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, availabilityTypeProp)) { + obj["availabilityType"] = availabilityTypeProp + } + readPoolConfigProp, err := expandAlloydbInstanceReadPoolConfig(d.Get("read_pool_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("read_pool_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, readPoolConfigProp)) { + obj["readPoolConfig"] = readPoolConfigProp + } + machineConfigProp, err := expandAlloydbInstanceMachineConfig(d.Get("machine_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("machine_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, machineConfigProp)) { + obj["machineConfig"] = machineConfigProp + } + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("annotations") { + updateMask = append(updateMask, "annotations") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("gce_zone") { + updateMask = append(updateMask, "gceZone") + } + + if d.HasChange("database_flags") { + updateMask = append(updateMask, "databaseFlags") + } + + if d.HasChange("availability_type") { + updateMask = append(updateMask, "availabilityType") + } + + if d.HasChange("read_pool_config") { + updateMask = append(updateMask, "readPoolConfig") + } + + if d.HasChange("machine_config") { + updateMask = append(updateMask, "machineConfig") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = alloydbOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAlloydbInstanceRead(d, meta) +} + +func resourceAlloydbInstanceDelete(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := replaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Instance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Instance") + } + + err = alloydbOperationWaitTime( + config, res, project, "Deleting Instance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) + return nil +} + +func resourceAlloydbInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + // current import_formats can't import fields with forward slashes in their value + if err := parseImportId([]string{ + "(?P.+)/instances/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "{{cluster}}/instances/{{instance_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAlloydbInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceAnnotations(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceGceZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceReconciling(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceDatabaseFlags(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceAvailabilityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceInstanceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenAlloydbInstanceReadPoolConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["node_count"] = + flattenAlloydbInstanceReadPoolConfigNodeCount(original["nodeCount"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbInstanceReadPoolConfigNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbInstanceMachineConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cpu_count"] = + flattenAlloydbInstanceMachineConfigCpuCount(original["cpuCount"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbInstanceMachineConfigCpuCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandAlloydbInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbInstanceAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceGceZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceDatabaseFlags(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbInstanceAvailabilityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceInstanceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceReadPoolConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNodeCount, err := expandAlloydbInstanceReadPoolConfigNodeCount(original["node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeCount); val.IsValid() && !isEmptyValue(val) { + transformed["nodeCount"] = transformedNodeCount + } + + return transformed, nil +} + +func expandAlloydbInstanceReadPoolConfigNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceMachineConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCpuCount, err := expandAlloydbInstanceMachineConfigCpuCount(original["cpu_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpuCount); val.IsValid() && !isEmptyValue(val) { + transformed["cpuCount"] = transformedCpuCount + } + + return transformed, nil +} + +func expandAlloydbInstanceMachineConfigCpuCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/google/resource_alloydb_instance_generated_test.go b/google/resource_alloydb_instance_generated_test.go new file mode 100644 index 00000000000..43498b66de7 --- /dev/null +++ b/google/resource_alloydb_instance_generated_test.go @@ -0,0 +1,128 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAlloydbInstance_alloydbInstanceBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_alloydbInstanceBasicExample(context), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"display_name", "cluster", "instance_id", "reconciling", "update_time"}, + }, + }, + }) +} + +func testAccAlloydbInstance_alloydbInstanceBasicExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } + + depends_on = [google_service_networking_connection.vpc_connection] +} + +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.default.name}" + + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } +} + +data "google_project" "project" {} + +resource "google_compute_network" "default" { + name = "tf-test-alloydb-cluster%{random_suffix}" +} + +resource "google_compute_global_address" "private_ip_alloc" { + name = "tf-test-alloydb-cluster%{random_suffix}" + address_type = "INTERNAL" + purpose = "VPC_PEERING" + prefix_length = 16 + network = google_compute_network.default.id +} + +resource "google_service_networking_connection" "vpc_connection" { + network = google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} +`, context) +} + +func testAccCheckAlloydbInstanceDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_alloydb_instance" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + if err == nil { + return fmt.Errorf("AlloydbInstance still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/resource_alloydb_instance_sweeper_test.go b/google/resource_alloydb_instance_sweeper_test.go new file mode 100644 index 00000000000..d38c323e648 --- /dev/null +++ b/google/resource_alloydb_instance_sweeper_test.go @@ -0,0 +1,128 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("AlloydbInstance", &resource.Sweeper{ + Name: "AlloydbInstance", + F: testSweepAlloydbInstance, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAlloydbInstance(region string) error { + resourceName := "AlloydbInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://alloydb.googleapis.com/v1/{{cluster}}/instances?instanceId={{instance_id}}", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://alloydb.googleapis.com/v1/{{cluster}}/instances/{{instance_id}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/resource_alloydb_instance_test.go b/google/resource_alloydb_instance_test.go index 71664db3c87..6eb20c9bab6 100644 --- a/google/resource_alloydb_instance_test.go +++ b/google/resource_alloydb_instance_test.go @@ -1 +1,92 @@ package google + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAlloydbInstance_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_alloydbInstanceBasicExample(context), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + { + Config: testAccAlloydbInstance_update(context), + }, + { + ResourceName: "google_alloydb_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time"}, + }, + }, + }) +} + +func testAccAlloydbInstance_update(context map[string]interface{}) string { + return Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 4 + } + + labels = { + test = "tf-test-alloydb-instance%{random_suffix}" + } + + depends_on = [google_service_networking_connection.vpc_connection] +} + +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.default.name}" + + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } +} + +data "google_project" "project" { +} + +resource "google_compute_network" "default" { + name = "tf-test-alloydb-cluster%{random_suffix}" +} + +resource "google_compute_global_address" "private_ip_alloc" { + name = "tf-test-alloydb-cluster%{random_suffix}" + address_type = "INTERNAL" + purpose = "VPC_PEERING" + prefix_length = 16 + network = google_compute_network.default.id +} + +resource "google_service_networking_connection" "vpc_connection" { + network = google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} +`, context) +} diff --git a/website/docs/r/alloydb_backup.html.markdown b/website/docs/r/alloydb_backup.html.markdown index a8a0d456ae7..bedd74c3eef 100644 --- a/website/docs/r/alloydb_backup.html.markdown +++ b/website/docs/r/alloydb_backup.html.markdown @@ -21,12 +21,10 @@ description: |- An AlloyDB Backup. -~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. -See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. To get more information about Backup, see: -* [API documentation](https://cloud.google.com/alloydb/docs/reference/rest/v1beta/projects.locations.backups/create) +* [API documentation](https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.backups/create) * How-to Guides * [AlloyDB](https://cloud.google.com/alloydb/docs/) @@ -40,7 +38,6 @@ To get more information about Backup, see: ```hcl resource "google_alloydb_backup" "default" { - provider = google-beta location = "us-central1" backup_id = "alloydb-backup" cluster_name = google_alloydb_cluster.default.name @@ -53,14 +50,12 @@ resource "google_alloydb_backup" "default" { } resource "google_alloydb_cluster" "default" { - provider = google-beta cluster_id = "alloydb-cluster" location = "us-central1" network = data.google_compute_network.default.id } resource "google_alloydb_instance" "default" { - provider = google-beta cluster = google_alloydb_cluster.default.name instance_id = "alloydb-instance" instance_type = "PRIMARY" @@ -69,7 +64,6 @@ resource "google_alloydb_instance" "default" { } resource "google_compute_global_address" "private_ip_alloc" { - provider = google-beta name = "alloydb-cluster" address_type = "INTERNAL" purpose = "VPC_PEERING" @@ -78,14 +72,12 @@ resource "google_compute_global_address" "private_ip_alloc" { } resource "google_service_networking_connection" "vpc_connection" { - provider = google-beta network = data.google_compute_network.default.id service = "servicenetworking.googleapis.com" reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] } data "google_compute_network" "default" { - provider = google-beta name = "alloydb-network" } ``` diff --git a/website/docs/r/alloydb_cluster.html.markdown b/website/docs/r/alloydb_cluster.html.markdown index 66c5cf2a4f2..15a42a0b707 100644 --- a/website/docs/r/alloydb_cluster.html.markdown +++ b/website/docs/r/alloydb_cluster.html.markdown @@ -21,12 +21,10 @@ description: |- A managed alloydb cluster. -~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. -See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. To get more information about Cluster, see: -* [API documentation](https://cloud.google.com/alloydb/docs/reference/rest/v1beta/projects.locations.clusters/create) +* [API documentation](https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.clusters/create) * How-to Guides * [AlloyDB](https://cloud.google.com/alloydb/docs/) @@ -43,18 +41,14 @@ state as plain-text. [Read more about sensitive data in state](https://www.terra ```hcl resource "google_alloydb_cluster" "default" { - provider = google-beta cluster_id = "alloydb-cluster" location = "us-central1" network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.default.name}" } -data "google_project" "project" { - provider = google-beta -} +data "google_project" "project" {} resource "google_compute_network" "default" { - provider = google-beta name = "alloydb-cluster" } ``` @@ -68,7 +62,6 @@ resource "google_compute_network" "default" { ```hcl resource "google_alloydb_cluster" "full" { - provider = google-beta cluster_id = "alloydb-cluster-full" location = "us-central1" network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.default.name}" @@ -108,12 +101,9 @@ resource "google_alloydb_cluster" "full" { } } -data "google_project" "project" { - provider = google-beta -} +data "google_project" "project" {} resource "google_compute_network" "default" { - provider = google-beta name = "alloydb-cluster-full" } ``` diff --git a/website/docs/r/alloydb_instance.html.markdown b/website/docs/r/alloydb_instance.html.markdown index 489c12e5f50..54c8ca079ca 100644 --- a/website/docs/r/alloydb_instance.html.markdown +++ b/website/docs/r/alloydb_instance.html.markdown @@ -21,12 +21,10 @@ description: |- A managed alloydb cluster instance. -~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. -See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. To get more information about Instance, see: -* [API documentation](https://cloud.google.com/alloydb/docs/reference/rest/v1beta/projects.locations.clusters.instances/create) +* [API documentation](https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.clusters.instances/create) * How-to Guides * [AlloyDB](https://cloud.google.com/alloydb/docs/) @@ -40,7 +38,6 @@ To get more information about Instance, see: ```hcl resource "google_alloydb_instance" "default" { - provider = google-beta cluster = google_alloydb_cluster.default.name instance_id = "alloydb-instance" instance_type = "PRIMARY" @@ -53,7 +50,6 @@ resource "google_alloydb_instance" "default" { } resource "google_alloydb_cluster" "default" { - provider = google-beta cluster_id = "alloydb-cluster" location = "us-central1" network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.default.name}" @@ -63,17 +59,13 @@ resource "google_alloydb_cluster" "default" { } } -data "google_project" "project" { - provider = google-beta -} +data "google_project" "project" {} resource "google_compute_network" "default" { - provider = google-beta name = "alloydb-cluster" } resource "google_compute_global_address" "private_ip_alloc" { - provider = google-beta name = "alloydb-cluster" address_type = "INTERNAL" purpose = "VPC_PEERING" @@ -82,7 +74,6 @@ resource "google_compute_global_address" "private_ip_alloc" { } resource "google_service_networking_connection" "vpc_connection" { - provider = google-beta network = google_compute_network.default.id service = "servicenetworking.googleapis.com" reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name]