From ebe5e1b8d1815eb7eabbb8c91f0c199d8d00f6d2 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Wed, 13 Nov 2019 21:04:00 +0000 Subject: [PATCH] [DO NOT MERGE] 3.0.0 staging branch Signed-off-by: Modular Magician --- google/batcher.go | 2 +- google/config.go | 1 + .../data_source_compute_lb_ip_ranges_test.go | 3 +- ...a_source_compute_network_endpoint_group.go | 8 +- google/data_source_container_registry_test.go | 33 +- google/data_source_dns_managed_zone.go | 10 +- google/data_source_dns_managed_zone_test.go | 8 +- .../data_source_google_active_folder_test.go | 7 +- ...data_source_google_billing_account_test.go | 11 +- ...ta_source_google_client_openid_userinfo.go | 2 +- ...urce_google_client_openid_userinfo_test.go | 20 +- ...a_source_google_cloudfunctions_function.go | 2 +- ...rce_google_cloudfunctions_function_test.go | 9 +- google/data_source_google_compute_address.go | 3 +- ...data_source_google_compute_address_test.go | 4 +- ...a_source_google_compute_backend_service.go | 11 +- ...rce_google_compute_backend_service_test.go | 4 +- ...a_source_google_compute_forwarding_rule.go | 2 +- ...rce_google_compute_forwarding_rule_test.go | 32 +- ...ta_source_google_compute_global_address.go | 4 +- ...urce_google_compute_global_address_test.go | 4 +- .../data_source_google_compute_image_test.go | 13 +- google/data_source_google_compute_instance.go | 2 +- ...ta_source_google_compute_instance_group.go | 8 +- ...urce_google_compute_instance_group_test.go | 38 +- ...ata_source_google_compute_instance_test.go | 76 +- google/data_source_google_compute_network.go | 2 +- ...data_source_google_compute_network_test.go | 9 +- ...ce_google_compute_region_instance_group.go | 3 +- ...ogle_compute_region_instance_group_test.go | 48 +- ...a_source_google_compute_ssl_certificate.go | 10 +- ...rce_google_compute_ssl_certificate_test.go | 10 +- .../data_source_google_compute_ssl_policy.go | 10 +- ...a_source_google_compute_ssl_policy_test.go | 11 +- .../data_source_google_compute_subnetwork.go | 2 +- ...a_source_google_compute_subnetwork_test.go | 26 +- .../data_source_google_compute_vpn_gateway.go | 2 +- ..._source_google_compute_vpn_gateway_test.go | 11 +- .../data_source_google_container_cluster.go | 14 +- ...ta_source_google_container_cluster_test.go | 28 +- ...source_google_container_engine_versions.go | 21 +- ...e_google_container_engine_versions_test.go | 35 +- ...ource_google_folder_organization_policy.go | 2 +- ..._google_folder_organization_policy_test.go | 14 +- google/data_source_google_folder_test.go | 24 +- google/data_source_google_iam_role_test.go | 2 +- .../data_source_google_kms_crypto_key_test.go | 6 +- ...urce_google_kms_crypto_key_version_test.go | 4 +- .../data_source_google_kms_key_ring_test.go | 7 +- ...ource_google_kms_secret_ciphertext_test.go | 6 +- google/data_source_google_kms_secret_test.go | 6 +- google/data_source_google_organization.go | 2 +- .../data_source_google_organization_test.go | 10 +- google/data_source_google_project.go | 5 +- ...google_project_organization_policy_test.go | 6 +- google/data_source_google_project_services.go | 30 - ...ata_source_google_project_services_test.go | 53 - google/data_source_google_project_test.go | 13 +- ...oogle_service_account_access_token_test.go | 19 +- .../data_source_google_service_account_key.go | 6 - ..._source_google_service_account_key_test.go | 11 +- ...data_source_google_service_account_test.go | 2 +- ...a_source_storage_object_signed_url_test.go | 30 +- google/iam.go | 6 - ...y_authorization_attestor_generated_test.go | 12 +- ...functions_cloud_function_generated_test.go | 48 +- .../iam_compute_subnetwork_generated_test.go | 8 +- ...m_iap_app_engine_service_generated_test.go | 88 +- ...m_iap_app_engine_version_generated_test.go | 56 +- ..._iap_web_backend_service_generated_test.go | 8 +- google/iam_iap_web_generated_test.go | 40 +- ..._iap_web_type_app_engine_generated_test.go | 56 +- ...iam_iap_web_type_compute_generated_test.go | 40 +- ...am_runtime_config_config_generated_test.go | 16 +- google/iam_test.go | 161 --- google/node_config.go | 28 +- google/provider.go | 2 - google/provider_test.go | 7 - google/pubsub_utils.go | 10 - ...rce_access_context_manager_access_level.go | 12 +- ...ccess_context_manager_service_perimeter.go | 3 + google/resource_app_engine_application.go | 2 +- ...ation_url_dispatch_rules_generated_test.go | 22 +- google/resource_app_engine_domain_mapping.go | 20 +- ...pp_engine_domain_mapping_generated_test.go | 2 +- google/resource_app_engine_firewall_rule.go | 5 +- ...app_engine_firewall_rule_generated_test.go | 8 +- ...esource_app_engine_standard_app_version.go | 26 +- ...ine_standard_app_version_generated_test.go | 8 +- google/resource_big_query_dataset.go | 26 +- google/resource_bigquery_table.go | 20 +- google/resource_bigtable_app_profile.go | 18 +- ...rce_bigtable_app_profile_generated_test.go | 50 +- google/resource_bigtable_instance.go | 46 +- google/resource_bigtable_table.go | 10 +- google/resource_bigtable_table_test.go | 5 - .../resource_binary_authorization_attestor.go | 4 +- ...y_authorization_attestor_generated_test.go | 3 +- .../resource_binary_authorization_policy.go | 14 +- google/resource_cloud_build_trigger.go | 192 +-- google/resource_cloud_scheduler_job.go | 66 +- ...urce_cloud_scheduler_job_generated_test.go | 52 +- google/resource_cloudfunctions_function.go | 62 +- .../resource_cloudfunctions_function_test.go | 7 +- google/resource_cloudiot_registry.go | 49 +- google/resource_cloudiot_registry_test.go | 83 +- google/resource_composer_environment.go | 37 +- google/resource_composer_environment_test.go | 10 +- google/resource_compute_address.go | 4 +- ...resource_compute_address_generated_test.go | 40 +- google/resource_compute_attached_disk.go | 19 +- google/resource_compute_attached_disk_test.go | 6 +- google/resource_compute_autoscaler.go | 4 +- ...ource_compute_autoscaler_generated_test.go | 12 +- google/resource_compute_backend_bucket.go | 9 +- ...e_compute_backend_bucket_generated_test.go | 2 +- ...e_compute_backend_bucket_signed_url_key.go | 2 +- ...pute_backend_bucket_signed_url_key_test.go | 2 +- google/resource_compute_backend_service.go | 73 +- ..._compute_backend_service_generated_test.go | 2 +- ..._compute_backend_service_signed_url_key.go | 2 +- ...ute_backend_service_signed_url_key_test.go | 2 +- .../resource_compute_backend_service_test.go | 8 +- google/resource_compute_disk.go | 17 +- google/resource_compute_disk_test.go | 4 +- google/resource_compute_firewall.go | 16 +- ...esource_compute_firewall_generated_test.go | 2 +- google/resource_compute_forwarding_rule.go | 57 +- ..._compute_forwarding_rule_generated_test.go | 22 +- google/resource_compute_global_address.go | 4 +- ...resource_compute_global_forwarding_rule.go | 27 +- ...e_global_forwarding_rule_generated_test.go | 12 +- google/resource_compute_health_check.go | 63 +- ...rce_compute_health_check_generated_test.go | 116 +- google/resource_compute_health_check_test.go | 3 + google/resource_compute_http_health_check.go | 4 +- ...resource_compute_http_health_check_test.go | 8 +- google/resource_compute_https_health_check.go | 4 +- google/resource_compute_image.go | 8 +- google/resource_compute_image_test.go | 8 +- google/resource_compute_instance.go | 268 ++-- ...resource_compute_instance_from_template.go | 2 +- ...rce_compute_instance_from_template_test.go | 3 + google/resource_compute_instance_group.go | 25 +- ...resource_compute_instance_group_manager.go | 170 +-- ...rce_compute_instance_group_manager_test.go | 182 --- .../resource_compute_instance_group_test.go | 2 +- google/resource_compute_instance_template.go | 162 ++- ...resource_compute_instance_template_test.go | 116 +- google/resource_compute_instance_test.go | 14 +- ...esource_compute_interconnect_attachment.go | 4 +- google/resource_compute_network.go | 55 +- ...resource_compute_network_endpoint_group.go | 4 +- ...e_network_endpoint_group_generated_test.go | 8 +- google/resource_compute_network_peering.go | 11 +- .../resource_compute_network_peering_test.go | 1 - google/resource_compute_network_test.go | 40 +- google/resource_compute_node_group.go | 4 +- ...ource_compute_node_group_generated_test.go | 14 +- google/resource_compute_node_template.go | 22 +- ...ce_compute_node_template_generated_test.go | 6 +- google/resource_compute_region_autoscaler.go | 4 +- ...ompute_region_autoscaler_generated_test.go | 12 +- ...resource_compute_region_backend_service.go | 4 +- ...e_region_backend_service_generated_test.go | 2 +- ...rce_compute_region_backend_service_test.go | 4 +- google/resource_compute_region_disk.go | 4 +- ...urce_compute_region_disk_generated_test.go | 22 +- google/resource_compute_region_disk_test.go | 6 +- ...e_compute_region_instance_group_manager.go | 212 +-- ...pute_region_instance_group_manager_test.go | 198 +-- google/resource_compute_reservation.go | 4 +- ...urce_compute_reservation_generated_test.go | 2 +- google/resource_compute_resource_policy.go | 30 +- ..._compute_resource_policy_generated_test.go | 12 +- google/resource_compute_route.go | 24 +- .../resource_compute_route_generated_test.go | 2 +- google/resource_compute_router.go | 16 +- .../resource_compute_router_generated_test.go | 4 +- google/resource_compute_router_peer.go | 2 +- google/resource_compute_security_policy.go | 31 +- .../resource_compute_security_policy_test.go | 2 +- google/resource_compute_snapshot.go | 32 +- ...esource_compute_snapshot_generated_test.go | 26 +- google/resource_compute_ssl_certificate.go | 4 +- ..._compute_ssl_certificate_generated_test.go | 30 +- .../resource_compute_ssl_certificate_test.go | 6 +- google/resource_compute_ssl_policy.go | 4 +- google/resource_compute_ssl_policy_test.go | 2 +- google/resource_compute_subnetwork.go | 132 +- ...ource_compute_subnetwork_generated_test.go | 4 +- google/resource_compute_subnetwork_test.go | 9 +- google/resource_compute_target_http_proxy.go | 4 +- ...ompute_target_http_proxy_generated_test.go | 14 +- ...resource_compute_target_http_proxy_test.go | 5 +- google/resource_compute_target_https_proxy.go | 4 +- ...mpute_target_https_proxy_generated_test.go | 16 +- ...esource_compute_target_https_proxy_test.go | 5 +- google/resource_compute_target_instance.go | 4 +- ..._compute_target_instance_generated_test.go | 8 +- google/resource_compute_target_pool.go | 45 +- google/resource_compute_target_pool_test.go | 8 +- google/resource_compute_target_ssl_proxy.go | 4 +- ...compute_target_ssl_proxy_generated_test.go | 10 +- .../resource_compute_target_ssl_proxy_test.go | 5 +- google/resource_compute_target_tcp_proxy.go | 4 +- ...compute_target_tcp_proxy_generated_test.go | 10 +- .../resource_compute_target_tcp_proxy_test.go | 5 +- google/resource_compute_url_map.go | 12 +- ...resource_compute_url_map_generated_test.go | 18 +- google/resource_compute_url_map_test.go | 5 +- google/resource_compute_vpn_gateway.go | 4 +- ...urce_compute_vpn_gateway_generated_test.go | 30 +- google/resource_compute_vpn_tunnel.go | 4 +- ...ource_compute_vpn_tunnel_generated_test.go | 30 +- google/resource_container_analysis_note.go | 4 +- google/resource_container_cluster.go | 429 ++---- google/resource_container_cluster_test.go | 810 ++++------- google/resource_container_node_pool.go | 66 +- google/resource_container_node_pool_test.go | 139 +- google/resource_dataproc_cluster.go | 275 ++-- google/resource_dataproc_cluster_test.go | 13 +- google/resource_dataproc_job.go | 180 ++- google/resource_dataproc_job_test.go | 61 +- google/resource_dns_managed_zone.go | 22 +- ...esource_dns_managed_zone_generated_test.go | 16 +- google/resource_endpoints_service.go | 6 - google/resource_filestore_instance.go | 4 +- ...resource_firestore_index_generated_test.go | 2 +- ...ource_google_folder_organization_policy.go | 6 +- google/resource_google_organization_policy.go | 71 +- google/resource_google_project.go | 214 ++- google/resource_google_project_iam_policy.go | 15 - google/resource_google_project_service.go | 13 +- .../resource_google_project_service_test.go | 2 +- google/resource_google_project_services.go | 335 ----- .../resource_google_project_services_test.go | 440 ------ google/resource_google_project_test.go | 5 +- google/resource_google_service_account.go | 5 - google/resource_iam_audit_config.go | 36 +- google/resource_logging_metric.go | 47 +- .../resource_logging_metric_generated_test.go | 38 +- google/resource_ml_engine_model.go | 6 +- ...resource_ml_engine_model_generated_test.go | 12 +- google/resource_monitoring_alert_policy.go | 6 +- ...esource_monitoring_group_generated_test.go | 6 +- ...ing_notification_channel_generated_test.go | 2 +- ...resource_monitoring_uptime_check_config.go | 119 +- ...ring_uptime_check_config_generated_test.go | 11 +- google/resource_pubsub_subscription.go | 34 +- ...urce_pubsub_subscription_generated_test.go | 4 +- google/resource_pubsub_subscription_test.go | 34 +- .../resource_pubsub_topic_generated_test.go | 1 - google/resource_redis_instance.go | 16 +- .../resource_redis_instance_generated_test.go | 2 +- google/resource_source_repo_repository.go | 4 +- google/resource_spanner_database.go | 4 +- ...esource_spanner_database_generated_test.go | 8 +- ...esource_spanner_instance_generated_test.go | 6 +- google/resource_sql_database.go | 5 +- .../resource_sql_database_generated_test.go | 14 +- google/resource_sql_database_instance.go | 217 ++- google/resource_sql_ssl_cert.go | 4 +- google/resource_storage_bucket.go | 69 +- ...ge_bucket_access_control_generated_test.go | 2 +- google/resource_storage_bucket_object.go | 7 - google/resource_storage_bucket_test.go | 133 +- ...lt_object_access_control_generated_test.go | 4 +- ...ge_object_access_control_generated_test.go | 14 +- google/resource_storage_transfer_job.go | 70 +- google/resource_tpu_node.go | 7 +- google/resource_tpu_node_generated_test.go | 44 +- google/serviceusage_operation.go | 16 + google/test_utils.go | 4 + google/utils.go | 1 + google/validation.go | 8 + .../d/datasource_client_config.html.markdown | 20 +- .../datasource_compute_address.html.markdown | 4 +- ...ource_compute_global_address.html.markdown | 4 +- .../d/datasource_compute_image.html.markdown | 2 +- .../datasource_compute_instance.html.markdown | 4 +- ...asource_compute_lb_ip_ranges.html.markdown | 9 +- ...ompute_region_instance_group.html.markdown | 17 +- ...urce_compute_ssl_certificate.html.markdown | 8 +- ...oogle_client_openid_userinfo.html.markdown | 46 +- ...ogle_composer_image_versions.html.markdown | 6 +- ...ogle_compute_backend_service.html.markdown | 2 +- ...mpute_network_endpoint_group.html.markdown | 6 +- ...e_folder_organization_policy.html.markdown | 2 +- .../datasource_google_iam_role.html.markdown | 3 +- ...ce_google_netblock_ip_ranges.html.markdown | 13 +- ..._project_organization_policy.html.markdown | 2 +- ...ource_google_service_account.html.markdown | 8 +- ...service_account_access_token.html.markdown | 21 +- ...e_google_service_account_key.html.markdown | 4 +- ...urce_tpu_tensorflow_versions.html.markdown | 16 +- website/docs/d/dns_managed_zone.html.markdown | 4 +- .../docs/d/google_active_folder.html.markdown | 2 +- .../d/google_billing_account.html.markdown | 2 +- ...pute_default_service_account.html.markdown | 7 +- ...oogle_compute_instance_group.html.markdown | 2 +- .../d/google_compute_node_types.html.markdown | 2 +- .../d/google_compute_regions.html.markdown | 9 +- ...ogle_compute_resource_policy.html.markdown | 2 +- .../docs/d/google_compute_zones.html.markdown | 11 +- .../d/google_container_cluster.html.markdown | 16 +- ...le_container_engine_versions.html.markdown | 14 +- ...gle_container_registry_image.html.markdown | 4 +- ...ontainer_registry_repository.html.markdown | 5 +- website/docs/d/google_folder.html.markdown | 7 +- .../docs/d/google_iam_policy.html.markdown | 2 +- .../d/google_kms_crypto_key.html.markdown | 4 +- ...oogle_kms_crypto_key_version.html.markdown | 4 +- .../docs/d/google_kms_secret.html.markdown | 8 +- ...google_kms_secret_ciphertext.html.markdown | 8 +- .../docs/d/google_organization.html.markdown | 2 +- website/docs/d/google_project.html.markdown | 5 +- .../d/google_project_services.html.markdown | 40 - website/docs/d/google_projects.html.markdown | 2 +- ...rage_project_service_account.html.markdown | 11 +- ...sfer_project_service_account.html.markdown | 5 +- website/docs/d/signed_url.html.markdown | 20 +- .../guides/provider_reference.html.markdown | 4 +- .../guides/version_3_upgrade.html.markdown | 1285 ++++++++++++++++- ...context_manager_access_level.html.markdown | 8 +- ...xt_manager_service_perimeter.html.markdown | 12 +- .../r/app_engine_application.html.markdown | 2 +- ...plication_url_dispatch_rules.html.markdown | 22 +- .../r/app_engine_domain_mapping.html.markdown | 6 +- .../r/app_engine_firewall_rule.html.markdown | 9 +- ..._engine_standard_app_version.html.markdown | 16 +- ...igquery_data_transfer_config.html.markdown | 27 +- website/docs/r/bigquery_dataset.html.markdown | 8 +- website/docs/r/bigquery_table.html.markdown | 7 +- .../docs/r/bigtable_app_profile.html.markdown | 52 +- .../docs/r/bigtable_gc_policy.html.markdown | 34 +- .../docs/r/bigtable_instance.html.markdown | 2 +- .../r/bigtable_instance_iam.html.markdown | 20 +- website/docs/r/bigtable_table.html.markdown | 15 +- ...inary_authorization_attestor.html.markdown | 15 +- ...y_authorization_attestor_iam.html.markdown | 6 +- .../binary_authorization_policy.html.markdown | 30 +- .../docs/r/cloud_scheduler_job.html.markdown | 56 +- .../docs/r/cloudbuild_trigger.html.markdown | 87 +- ...functions_cloud_function_iam.html.markdown | 6 +- .../r/cloudfunctions_function.html.markdown | 46 +- .../docs/r/cloudiot_registry.html.markdown | 8 +- .../docs/r/composer_environment.html.markdown | 24 +- website/docs/r/compute_address.html.markdown | 40 +- .../r/compute_attached_disk.html.markdown | 11 +- .../docs/r/compute_autoscaler.html.markdown | 34 +- .../r/compute_backend_bucket.html.markdown | 6 +- ...ackend_bucket_signed_url_key.html.markdown | 4 +- .../r/compute_backend_service.html.markdown | 36 +- ...ckend_service_signed_url_key.html.markdown | 12 +- website/docs/r/compute_firewall.html.markdown | 2 +- .../r/compute_forwarding_rule.html.markdown | 39 +- ...mpute_global_forwarding_rule.html.markdown | 69 +- .../docs/r/compute_health_check.html.markdown | 116 +- website/docs/r/compute_image.html.markdown | 2 +- ...mpute_instance_from_template.html.markdown | 14 +- .../r/compute_instance_group.html.markdown | 23 +- ...mpute_instance_group_manager.html.markdown | 67 +- .../docs/r/compute_instance_iam.html.markdown | 6 +- .../r/compute_instance_template.html.markdown | 14 +- ...pute_interconnect_attachment.html.markdown | 4 +- website/docs/r/compute_network.html.markdown | 10 - .../r/compute_network_endpoint.html.markdown | 27 +- ...mpute_network_endpoint_group.html.markdown | 8 +- .../r/compute_network_peering.html.markdown | 15 +- .../docs/r/compute_node_group.html.markdown | 14 +- .../r/compute_node_template.html.markdown | 18 +- ...ompute_project_metadata_item.html.markdown | 2 +- .../r/compute_region_autoscaler.html.markdown | 38 +- ...mpute_region_backend_service.html.markdown | 6 +- .../docs/r/compute_region_disk.html.markdown | 22 +- ...egion_instance_group_manager.html.markdown | 62 +- .../docs/r/compute_reservation.html.markdown | 2 +- .../r/compute_resource_policy.html.markdown | 12 +- website/docs/r/compute_route.html.markdown | 39 +- website/docs/r/compute_router.html.markdown | 6 +- .../docs/r/compute_router_nat.html.markdown | 92 +- .../r/compute_security_policy.html.markdown | 6 +- ...pute_shared_vpc_host_project.html.markdown | 5 +- website/docs/r/compute_snapshot.html.markdown | 28 +- .../r/compute_ssl_certificate.html.markdown | 30 +- .../docs/r/compute_subnetwork.html.markdown | 17 +- .../r/compute_target_http_proxy.html.markdown | 14 +- .../compute_target_https_proxy.html.markdown | 16 +- .../r/compute_target_instance.html.markdown | 8 +- .../docs/r/compute_target_pool.html.markdown | 9 +- .../r/compute_target_ssl_proxy.html.markdown | 10 +- .../r/compute_target_tcp_proxy.html.markdown | 10 +- website/docs/r/compute_url_map.html.markdown | 18 +- .../docs/r/compute_vpn_gateway.html.markdown | 30 +- .../docs/r/compute_vpn_tunnel.html.markdown | 80 +- .../docs/r/container_cluster.html.markdown | 124 +- .../docs/r/container_node_pool.html.markdown | 20 +- website/docs/r/dataflow_job.html.markdown | 14 +- website/docs/r/dataproc_cluster.html.markdown | 261 ++-- .../docs/r/dataproc_cluster_iam.html.markdown | 22 +- website/docs/r/dataproc_job.html.markdown | 190 ++- website/docs/r/dataproc_job_iam.html.markdown | 22 +- website/docs/r/dns_managed_zone.html.markdown | 53 +- ....markdown => dns_record_set.html.markdown} | 49 +- .../docs/r/endpoints_service.html.markdown | 6 +- website/docs/r/firestore_index.html.markdown | 2 +- ...billing_account_iam_binding.html.markdown} | 0 ..._billing_account_iam_member.html.markdown} | 0 ..._billing_account_iam_policy.html.markdown} | 4 +- website/docs/r/google_folder.html.markdown | 4 +- .../r/google_folder_iam_binding.html.markdown | 4 +- .../r/google_folder_iam_member.html.markdown | 6 +- .../r/google_folder_iam_policy.html.markdown | 6 +- ...e_folder_organization_policy.html.markdown | 8 +- ...gle_iap_tunnel_instance_iam.html.markdown} | 0 .../r/google_kms_key_ring_iam.html.markdown | 4 +- ...gle_organization_iam_member.html.markdown} | 4 +- ...gle_organization_iam_policy.html.markdown} | 4 +- .../google_organization_policy.html.markdown | 11 +- website/docs/r/google_project.html.markdown | 8 +- .../docs/r/google_project_iam.html.markdown | 2 +- ..._project_organization_policy.html.markdown | 4 +- .../r/google_project_service.html.markdown | 3 - .../r/google_project_services.html.markdown | 55 - .../google_service_account_iam.html.markdown | 16 +- .../google_service_account_key.html.markdown | 16 +- .../r/healthcare_dataset_iam.html.markdown | 10 +- .../healthcare_dicom_store_iam.html.markdown | 10 +- .../r/healthcare_fhir_store_iam.html.markdown | 10 +- .../healthcare_hl7_v2_store_iam.html.markdown | 11 +- website/docs/r/kms_crypto_key.html.markdown | 6 +- website/docs/r/kms_key_ring.html.markdown | 2 +- ...ng_billing_account_exclusion.html.markdown | 10 +- ...logging_billing_account_sink.html.markdown | 18 +- .../r/logging_folder_exclusion.html.markdown | 14 +- .../docs/r/logging_folder_sink.html.markdown | 26 +- website/docs/r/logging_metric.html.markdown | 40 +- ...gging_organization_exclusion.html.markdown | 10 +- .../r/logging_organization_sink.html.markdown | 18 +- .../r/logging_project_exclusion.html.markdown | 8 +- .../docs/r/logging_project_sink.html.markdown | 36 +- website/docs/r/ml_engine_model.html.markdown | 14 +- .../r/monitoring_alert_policy.html.markdown | 8 +- website/docs/r/monitoring_group.html.markdown | 6 +- ...itoring_notification_channel.html.markdown | 2 +- ...nitoring_uptime_check_config.html.markdown | 17 +- .../docs/r/pubsub_subscription.html.markdown | 14 +- .../r/pubsub_subscription_iam.html.markdown | 6 +- website/docs/r/pubsub_topic.html.markdown | 5 +- website/docs/r/redis_instance.html.markdown | 2 +- .../r/resource_manager_lien.html.markdown | 8 +- .../docs/r/runtimeconfig_config.html.markdown | 4 +- .../r/runtimeconfig_variable.html.markdown | 20 +- website/docs/r/scc_source.html.markdown | 2 +- ...ervice_networking_connection.html.markdown | 6 +- .../r/sourcerepo_repository_iam.html.markdown | 6 +- website/docs/r/spanner_database.html.markdown | 8 +- .../docs/r/spanner_database_iam.html.markdown | 16 +- website/docs/r/spanner_instance.html.markdown | 6 +- .../docs/r/spanner_instance_iam.html.markdown | 12 +- website/docs/r/sql_database.html.markdown | 15 +- .../r/sql_database_instance.html.markdown | 87 +- website/docs/r/sql_ssl_cert.html.markdown | 2 +- website/docs/r/sql_user.html.markdown | 2 +- website/docs/r/storage_bucket.html.markdown | 4 +- ...torage_bucket_access_control.html.markdown | 2 +- .../docs/r/storage_bucket_acl.html.markdown | 2 +- .../docs/r/storage_bucket_iam.html.markdown | 12 +- ...efault_object_access_control.html.markdown | 4 +- .../storage_default_object_acl.html.markdown | 2 +- .../docs/r/storage_notification.html.markdown | 31 +- ...torage_object_access_control.html.markdown | 14 +- .../docs/r/storage_object_acl.html.markdown | 6 +- .../docs/r/storage_transfer_job.html.markdown | 107 +- website/docs/r/tpu_node.html.markdown | 46 +- .../docs/r/usage_export_bucket.html.markdown | 4 +- website/google.erb | 6 - 478 files changed, 6456 insertions(+), 7746 deletions(-) delete mode 100644 google/data_source_google_project_services.go delete mode 100644 google/data_source_google_project_services_test.go delete mode 100644 google/resource_google_project_services.go delete mode 100644 google/resource_google_project_services_test.go delete mode 100644 website/docs/d/google_project_services.html.markdown rename website/docs/r/{dns_record_set.markdown => dns_record_set.html.markdown} (80%) rename website/docs/r/{google_billing_account_iam_binding.md => google_billing_account_iam_binding.html.markdown} (100%) rename website/docs/r/{google_billing_account_iam_member.md => google_billing_account_iam_member.html.markdown} (100%) rename website/docs/r/{google_billing_account_iam_policy.md => google_billing_account_iam_policy.html.markdown} (93%) rename website/docs/r/{google_iap_tunnel_instance_iam.markdown => google_iap_tunnel_instance_iam.html.markdown} (100%) rename website/docs/r/{google_organization_iam_member.md => google_organization_iam_member.html.markdown} (96%) rename website/docs/r/{google_organization_iam_policy.md => google_organization_iam_policy.html.markdown} (95%) delete mode 100644 website/docs/r/google_project_services.html.markdown diff --git a/google/batcher.go b/google/batcher.go index 85aa0005cc3..caa6b45958a 100644 --- a/google/batcher.go +++ b/google/batcher.go @@ -125,7 +125,7 @@ func (b *RequestBatcher) stop() { // may choose to use a key with method if needed to diff GET/read and // POST/create) // -// As an example, for google_project_service and google_project_services, the +// As an example, for google_project_service, the // batcher is called to batch services.batchEnable() calls for a project // $PROJECT. The calling code uses the template // "serviceusage:projects/$PROJECT/services:batchEnable", which mirrors the HTTP request: diff --git a/google/config.go b/google/config.go index 806fe119b2e..d34a882819b 100644 --- a/google/config.go +++ b/google/config.go @@ -233,6 +233,7 @@ var defaultClientScopes = []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite", "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/userinfo.email", } func (c *Config) LoadAndValidate() error { diff --git a/google/data_source_compute_lb_ip_ranges_test.go b/google/data_source_compute_lb_ip_ranges_test.go index 2bba95ed298..8950d39b387 100644 --- a/google/data_source_compute_lb_ip_ranges_test.go +++ b/google/data_source_compute_lb_ip_ranges_test.go @@ -30,5 +30,6 @@ func TestAccDataSourceComputeLbIpRanges_basic(t *testing.T) { } const testAccComputeLbIpRangesConfig = ` -data "google_compute_lb_ip_ranges" "some" {} +data "google_compute_lb_ip_ranges" "some" { +} ` diff --git a/google/data_source_compute_network_endpoint_group.go b/google/data_source_compute_network_endpoint_group.go index 9945d578fb4..bf0b16f6e1e 100644 --- a/google/data_source_compute_network_endpoint_group.go +++ b/google/data_source_compute_network_endpoint_group.go @@ -25,11 +25,15 @@ func dataSourceGoogleComputeNetworkEndpointGroup() *schema.Resource { func dataSourceComputeNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) if name, ok := d.GetOk("name"); ok { + project, err := getProject(d, config) + if err != nil { + return err + } zone, err := getZone(d, config) if err != nil { return err } - d.SetId(fmt.Sprintf("%s/%s", zone, name.(string))) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", project, zone, name.(string))) } else if selfLink, ok := d.GetOk("self_link"); ok { parsed, err := ParseNetworkEndpointGroupFieldValue(selfLink.(string), d, config) if err != nil { @@ -38,7 +42,7 @@ func dataSourceComputeNetworkEndpointGroupRead(d *schema.ResourceData, meta inte d.Set("name", parsed.Name) d.Set("zone", parsed.Zone) d.Set("project", parsed.Project) - d.SetId(fmt.Sprintf("%s/%s", parsed.Zone, parsed.Name)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", parsed.Project, parsed.Zone, parsed.Name)) } else { return errors.New("Must provide either `self_link` or `zone/name`") } diff --git a/google/data_source_container_registry_test.go b/google/data_source_container_registry_test.go index c0d1577770a..d10a8caa79f 100644 --- a/google/data_source_container_registry_test.go +++ b/google/data_source_container_registry_test.go @@ -67,26 +67,29 @@ func TestDataSourceGoogleContainerRegistryImage(t *testing.T) { const testAccCheckGoogleContainerRegistryImage_basic = ` data "google_container_registry_image" "test" { - project = "foo" - region = "bar" - name = "baz" + project = "foo" + region = "bar" + name = "baz" } + data "google_container_registry_image" "test2" { - project = "foo" - region = "bar" - name = "baz" - tag = "qux" + project = "foo" + region = "bar" + name = "baz" + tag = "qux" } + data "google_container_registry_image" "test3" { - project = "foo" - region = "bar" - name = "baz" - digest = "1234" + project = "foo" + region = "bar" + name = "baz" + digest = "1234" } + data "google_container_registry_image" "testScoped" { - project = "example.com:foo" - region = "bar" - name = "baz" - tag = "qux" + project = "example.com:foo" + region = "bar" + name = "baz" + tag = "qux" } ` diff --git a/google/data_source_dns_managed_zone.go b/google/data_source_dns_managed_zone.go index f86699203a5..e5a281b8598 100644 --- a/google/data_source_dns_managed_zone.go +++ b/google/data_source_dns_managed_zone.go @@ -1,6 +1,10 @@ package google -import "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) func dataSourceDnsManagedZone() *schema.Resource { return &schema.Resource{ @@ -43,13 +47,13 @@ func dataSourceDnsManagedZone() *schema.Resource { func dataSourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - d.SetId(d.Get("name").(string)) - project, err := getProject(d, config) if err != nil { return err } + d.SetId(fmt.Sprintf("projects/%s/managedZones/%s", project, d.Get("name").(string))) + zone, err := config.clientDns.ManagedZones.Get( project, d.Id()).Do() if err != nil { diff --git a/google/data_source_dns_managed_zone_test.go b/google/data_source_dns_managed_zone_test.go index b2b48ef2c76..34d014f504d 100644 --- a/google/data_source_dns_managed_zone_test.go +++ b/google/data_source_dns_managed_zone_test.go @@ -27,13 +27,13 @@ func TestAccDataSourceDnsManagedZone_basic(t *testing.T) { func testAccDataSourceDnsManagedZone_basic() string { return fmt.Sprintf(` resource "google_dns_managed_zone" "foo" { - name = "qa-zone-%s" - dns_name = "qa.tf-test.club." - description = "QA DNS zone" + name = "qa-zone-%s" + dns_name = "qa.tf-test.club." + description = "QA DNS zone" } data "google_dns_managed_zone" "qa" { - name = "${google_dns_managed_zone.foo.name}" + name = google_dns_managed_zone.foo.name } `, acctest.RandString(10)) } diff --git a/google/data_source_google_active_folder_test.go b/google/data_source_google_active_folder_test.go index 8e18b1f2451..b887ccbde83 100644 --- a/google/data_source_google_active_folder_test.go +++ b/google/data_source_google_active_folder_test.go @@ -82,13 +82,14 @@ func testAccDataSourceGoogleActiveFolderCheck(data_source_name string, resource_ func testAccDataSourceGoogleActiveFolderConfig(parent string, displayName string) string { return fmt.Sprintf(` resource "google_folder" "foobar" { - parent = "%s" + parent = "%s" display_name = "%s" } data "google_active_folder" "my_folder" { - parent = "${google_folder.foobar.parent}" - display_name = "${google_folder.foobar.display_name}" + parent = google_folder.foobar.parent + display_name = google_folder.foobar.display_name } + `, parent, displayName) } diff --git a/google/data_source_google_billing_account_test.go b/google/data_source_google_billing_account_test.go index 5cd9ade7ff4..749d6d3b379 100644 --- a/google/data_source_google_billing_account_test.go +++ b/google/data_source_google_billing_account_test.go @@ -85,20 +85,23 @@ func testAccCheckGoogleBillingAccount_byName(name string) string { return fmt.Sprintf(` data "google_billing_account" "acct" { billing_account = "%s" -}`, name) +} +`, name) } func testAccCheckGoogleBillingAccount_byNameClosed(name string) string { return fmt.Sprintf(` data "google_billing_account" "acct" { billing_account = "%s" - open = false -}`, name) + open = false +} +`, name) } func testAccCheckGoogleBillingAccount_byDisplayName(name string) string { return fmt.Sprintf(` data "google_billing_account" "acct" { display_name = "%s" -}`, name) +} +`, name) } diff --git a/google/data_source_google_client_openid_userinfo.go b/google/data_source_google_client_openid_userinfo.go index eb524a0e2f9..6d8cc2a44fe 100644 --- a/google/data_source_google_client_openid_userinfo.go +++ b/google/data_source_google_client_openid_userinfo.go @@ -26,7 +26,7 @@ func dataSourceGoogleClientOpenIDUserinfoRead(d *schema.ResourceData, meta inter // URL retrieved from https://accounts.google.com/.well-known/openid-configuration res, err := sendRequest(config, "GET", "", "https://openidconnect.googleapis.com/v1/userinfo", nil) if err != nil { - return fmt.Errorf("error retrieving userinfo for your provider credentials; have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) + return fmt.Errorf("error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) } d.SetId(time.Now().UTC().String()) diff --git a/google/data_source_google_client_openid_userinfo_test.go b/google/data_source_google_client_openid_userinfo_test.go index 0f7fc42578a..f293f8b17fd 100644 --- a/google/data_source_google_client_openid_userinfo_test.go +++ b/google/data_source_google_client_openid_userinfo_test.go @@ -24,23 +24,5 @@ func TestAccDataSourceGoogleClientOpenIDUserinfo_basic(t *testing.T) { } const testAccCheckGoogleClientOpenIDUserinfo_basic = ` -provider "google" { - alias = "google-scoped" - - # We need to add an additional scope to test this; because our tests rely on - # every env var being set, we can just add an alias with the appropriate - # scopes. This will fail if someone uses an access token instead of creds - # unless they've configured the userinfo.email scope. - scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/userinfo.email", - ] -} - -data "google_client_openid_userinfo" "me" { - provider = "google.google-scoped" -} +data "google_client_openid_userinfo" "me" {} ` diff --git a/google/data_source_google_cloudfunctions_function.go b/google/data_source_google_cloudfunctions_function.go index 6722215b9cc..303e1b0a336 100644 --- a/google/data_source_google_cloudfunctions_function.go +++ b/google/data_source_google_cloudfunctions_function.go @@ -39,7 +39,7 @@ func dataSourceGoogleCloudFunctionsFunctionRead(d *schema.ResourceData, meta int Name: d.Get("name").(string), } - d.SetId(cloudFuncId.terraformId()) + d.SetId(cloudFuncId.cloudFunctionId()) err = resourceCloudFunctionsRead(d, meta) if err != nil { diff --git a/google/data_source_google_cloudfunctions_function_test.go b/google/data_source_google_cloudfunctions_function_test.go index b569742c48b..9c473fa8c9b 100644 --- a/google/data_source_google_cloudfunctions_function_test.go +++ b/google/data_source_google_cloudfunctions_function_test.go @@ -43,23 +43,24 @@ resource "google_storage_bucket" "bucket" { resource "google_storage_bucket_object" "archive" { name = "index.zip" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name source = "%s" } resource "google_cloudfunctions_function" "function_http" { name = "%s-http" + runtime = "nodejs8" description = "test function" available_memory_mb = 128 - source_archive_bucket = "${google_storage_bucket.bucket.name}" - source_archive_object = "${google_storage_bucket_object.archive.name}" + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name trigger_http = true timeout = 61 entry_point = "helloGET" } data "google_cloudfunctions_function" "function_http" { - name = "${google_cloudfunctions_function.function_http.name}" + name = google_cloudfunctions_function.function_http.name } `, bucketName, zipFilePath, functionName) } diff --git a/google/data_source_google_compute_address.go b/google/data_source_google_compute_address.go index c1286e3f9d5..28bee45f353 100644 --- a/google/data_source_google_compute_address.go +++ b/google/data_source_google_compute_address.go @@ -3,7 +3,6 @@ package google import ( "fmt" "regexp" - "strconv" "strings" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" @@ -78,7 +77,7 @@ func dataSourceGoogleComputeAddressRead(d *schema.ResourceData, meta interface{} d.Set("project", project) d.Set("region", region) - d.SetId(strconv.FormatUint(address.Id, 10)) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/addresses/%s", project, region, name)) return nil } diff --git a/google/data_source_google_compute_address_test.go b/google/data_source_google_compute_address_test.go index b8631cf2592..7e618003c7c 100644 --- a/google/data_source_google_compute_address_test.go +++ b/google/data_source_google_compute_address_test.go @@ -161,11 +161,11 @@ func testAccCheckDataSourceComputeAddressDestroy(resource_name string) resource. func testAccDataSourceComputeAddressConfig(rsName, dsName string) string { return fmt.Sprintf(` resource "google_compute_address" "%s" { - name = "address-test" + name = "address-test" } data "google_compute_address" "%s" { - name = "${google_compute_address.%s.name}" + name = google_compute_address.%s.name } `, rsName, dsName, rsName) } diff --git a/google/data_source_google_compute_backend_service.go b/google/data_source_google_compute_backend_service.go index 55078c5d5be..a8ac7db3b9a 100644 --- a/google/data_source_google_compute_backend_service.go +++ b/google/data_source_google_compute_backend_service.go @@ -1,6 +1,8 @@ package google import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -20,9 +22,16 @@ func dataSourceGoogleComputeBackendService() *schema.Resource { } func dataSourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + serviceName := d.Get("name").(string) - d.SetId(serviceName) + project, err := getProject(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/global/backendServices/%s", project, serviceName)) return resourceComputeBackendServiceRead(d, meta) } diff --git a/google/data_source_google_compute_backend_service_test.go b/google/data_source_google_compute_backend_service_test.go index 50ba234348d..6009f3b8536 100644 --- a/google/data_source_google_compute_backend_service_test.go +++ b/google/data_source_google_compute_backend_service_test.go @@ -32,7 +32,7 @@ func testAccDataSourceComputeBackendService_basic(serviceName, checkName string) resource "google_compute_backend_service" "foobar" { name = "%s" description = "foobar backend service" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] + health_checks = [google_compute_http_health_check.zero.self_link] } resource "google_compute_http_health_check" "zero" { @@ -43,7 +43,7 @@ resource "google_compute_http_health_check" "zero" { } data "google_compute_backend_service" "baz" { - name = "${google_compute_backend_service.foobar.name}" + name = google_compute_backend_service.foobar.name } `, serviceName, checkName) } diff --git a/google/data_source_google_compute_forwarding_rule.go b/google/data_source_google_compute_forwarding_rule.go index 6acbc98c9f3..bb6613fc2ef 100644 --- a/google/data_source_google_compute_forwarding_rule.go +++ b/google/data_source_google_compute_forwarding_rule.go @@ -107,7 +107,7 @@ func dataSourceGoogleComputeForwardingRuleRead(d *schema.ResourceData, meta inte if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("Forwarding Rule Not Found : %s", name)) } - d.SetId(frule.Name) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/forwardingRules/%s", project, region, name)) d.Set("self_link", frule.SelfLink) d.Set("description", frule.Description) diff --git a/google/data_source_google_compute_forwarding_rule_test.go b/google/data_source_google_compute_forwarding_rule_test.go index 110aab1fae1..8205113a01b 100644 --- a/google/data_source_google_compute_forwarding_rule_test.go +++ b/google/data_source_google_compute_forwarding_rule_test.go @@ -83,20 +83,22 @@ func testAccDataSourceGoogleForwardingRuleCheck(data_source_name string, resourc func testAccDataSourceGoogleForwardingRuleConfig(poolName, ruleName string) string { return fmt.Sprintf(` - resource "google_compute_target_pool" "foobar-tp" { - description = "Resource created for Terraform acceptance testing" - instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "%s" - } - resource "google_compute_forwarding_rule" "foobar-fr" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "UDP" - name = "%s" - port_range = "80-81" - target = "${google_compute_target_pool.foobar-tp.self_link}" - } - data "google_compute_forwarding_rule" "my_forwarding_rule" { - name = "${google_compute_forwarding_rule.foobar-fr.name}" - } +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" +} + +resource "google_compute_forwarding_rule" "foobar-fr" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80-81" + target = google_compute_target_pool.foobar-tp.self_link +} + +data "google_compute_forwarding_rule" "my_forwarding_rule" { + name = google_compute_forwarding_rule.foobar-fr.name +} `, poolName, ruleName) } diff --git a/google/data_source_google_compute_global_address.go b/google/data_source_google_compute_global_address.go index 5825e74da10..3662a2d3666 100644 --- a/google/data_source_google_compute_global_address.go +++ b/google/data_source_google_compute_global_address.go @@ -2,7 +2,6 @@ package google import ( "fmt" - "strconv" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -58,7 +57,6 @@ func dataSourceGoogleComputeGlobalAddressRead(d *schema.ResourceData, meta inter d.Set("status", address.Status) d.Set("self_link", address.SelfLink) d.Set("project", project) - - d.SetId(strconv.FormatUint(address.Id, 10)) + d.SetId(fmt.Sprintf("projects/%s/global/addresses/%s", project, name)) return nil } diff --git a/google/data_source_google_compute_global_address_test.go b/google/data_source_google_compute_global_address_test.go index 3e93935d271..3f9ff0bc542 100644 --- a/google/data_source_google_compute_global_address_test.go +++ b/google/data_source_google_compute_global_address_test.go @@ -77,11 +77,11 @@ func testAccDataSourceComputeGlobalAddressCheck(data_source_name string, resourc func testAccDataSourceComputeGlobalAddressConfig(rsName, dsName string) string { return fmt.Sprintf(` resource "google_compute_global_address" "%s" { - name = "address-test" + name = "address-test" } data "google_compute_global_address" "%s" { - name = "${google_compute_global_address.%s.name}" + name = google_compute_global_address.%s.name } `, rsName, dsName, rsName) } diff --git a/google/data_source_google_compute_image_test.go b/google/data_source_google_compute_image_test.go index acb00158fd7..24c08ad753e 100644 --- a/google/data_source_google_compute_image_test.go +++ b/google/data_source_google_compute_image_test.go @@ -87,19 +87,22 @@ func testAccDataSourceCustomImageConfig(family, name string) string { resource "google_compute_image" "image" { family = "%s" name = "%s" - source_disk = "${google_compute_disk.disk.self_link}" + source_disk = google_compute_disk.disk.self_link } + resource "google_compute_disk" "disk" { name = "%s-disk" zone = "us-central1-b" } + data "google_compute_image" "from_name" { - project = "${google_compute_image.image.project}" - name = "${google_compute_image.image.name}" + project = google_compute_image.image.project + name = google_compute_image.image.name } + data "google_compute_image" "from_family" { - project = "${google_compute_image.image.project}" - family = "${google_compute_image.image.family}" + project = google_compute_image.image.project + family = google_compute_image.image.family } `, family, name, name) } diff --git a/google/data_source_google_compute_instance.go b/google/data_source_google_compute_instance.go index 286d0ae4e87..3868f405c45 100644 --- a/google/data_source_google_compute_instance.go +++ b/google/data_source_google_compute_instance.go @@ -154,6 +154,6 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ d.Set("project", project) d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)) d.Set("name", instance.Name) - d.SetId(ConvertSelfLinkToV1(instance.SelfLink)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, instance.Zone, instance.Name)) return nil } diff --git a/google/data_source_google_compute_instance_group.go b/google/data_source_google_compute_instance_group.go index 42945a59eba..9809f7a656a 100644 --- a/google/data_source_google_compute_instance_group.go +++ b/google/data_source_google_compute_instance_group.go @@ -87,7 +87,11 @@ func dataSourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{} if err != nil { return err } - d.SetId(fmt.Sprintf("%s/%s", zone, name.(string))) + project, err := getProject(d, config) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", project, zone, name.(string))) } else if selfLink, ok := d.GetOk("self_link"); ok { parsed, err := ParseInstanceGroupFieldValue(selfLink.(string), d, config) if err != nil { @@ -96,7 +100,7 @@ func dataSourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{} d.Set("name", parsed.Name) d.Set("zone", parsed.Zone) d.Set("project", parsed.Project) - d.SetId(fmt.Sprintf("%s/%s", parsed.Zone, parsed.Name)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", parsed.Project, parsed.Zone, parsed.Name)) } else { return errors.New("Must provide either `self_link` or `zone/name`") } diff --git a/google/data_source_google_compute_instance_group_test.go b/google/data_source_google_compute_instance_group_test.go index cbaa09c0d30..9158ee105f2 100644 --- a/google/data_source_google_compute_instance_group_test.go +++ b/google/data_source_google_compute_instance_group_test.go @@ -210,7 +210,7 @@ resource "google_compute_instance" "test" { boot_disk { initialize_params { - image = "${data.google_compute_image.my_image.self_link}" + image = data.google_compute_image.my_image.self_link } } @@ -225,16 +225,16 @@ resource "google_compute_instance" "test" { resource "google_compute_instance_group" "test" { name = "tf-test-%s" - zone = "${google_compute_instance.test.zone}" + zone = google_compute_instance.test.zone instances = [ - "${google_compute_instance.test.self_link}", + google_compute_instance.test.self_link, ] } data "google_compute_instance_group" "test" { - name = "${google_compute_instance_group.test.name}" - zone = "${google_compute_instance_group.test.zone}" + name = google_compute_instance_group.test.name + zone = google_compute_instance_group.test.zone } `, acctest.RandString(10), acctest.RandString(10)) } @@ -253,7 +253,7 @@ resource "google_compute_instance" "test" { boot_disk { initialize_params { - image = "${data.google_compute_image.my_image.self_link}" + image = data.google_compute_image.my_image.self_link } } @@ -268,7 +268,7 @@ resource "google_compute_instance" "test" { resource "google_compute_instance_group" "test" { name = "tf-test-%s" - zone = "${google_compute_instance.test.zone}" + zone = google_compute_instance.test.zone named_port { name = "http" @@ -281,13 +281,13 @@ resource "google_compute_instance_group" "test" { } instances = [ - "${google_compute_instance.test.self_link}", + google_compute_instance.test.self_link, ] } data "google_compute_instance_group" "test" { - name = "${google_compute_instance_group.test.name}" - zone = "${google_compute_instance_group.test.zone}" + name = google_compute_instance_group.test.name + zone = google_compute_instance_group.test.zone } `, acctest.RandString(10), acctest.RandString(10)) } @@ -300,13 +300,13 @@ data "google_compute_image" "my_image" { } resource "google_compute_instance_template" "igm-basic" { - name = "%s" + name = "%s" machine_type = "n1-standard-1" disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true } network_interface { @@ -315,17 +315,17 @@ resource "google_compute_instance_template" "igm-basic" { } resource "google_compute_instance_group_manager" "igm" { - name = "%s" - instance_template = "${google_compute_instance_template.igm-basic.self_link}" + name = "%s" + instance_template = google_compute_instance_template.igm-basic.self_link base_instance_name = "igm" - zone = "us-central1-a" - target_size = 10 + zone = "us-central1-a" + target_size = 10 wait_for_instances = true } data "google_compute_instance_group" "test" { - self_link = "${google_compute_instance_group_manager.igm.instance_group}" + self_link = google_compute_instance_group_manager.igm.instance_group } `, acctest.RandomWithPrefix("test-igm"), acctest.RandomWithPrefix("test-igm")) } diff --git a/google/data_source_google_compute_instance_test.go b/google/data_source_google_compute_instance_test.go index 08aab937fce..108b0dc4e1e 100644 --- a/google/data_source_google_compute_instance_test.go +++ b/google/data_source_google_compute_instance_test.go @@ -88,50 +88,50 @@ func testAccDataSourceComputeInstanceCheck(datasourceName string, resourceName s func testAccDataSourceComputeInstanceConfig(instanceName string) string { return fmt.Sprintf(` resource "google_compute_instance" "foo" { - name = "%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] - - boot_disk { - initialize_params{ - image = "debian-8-jessie-v20160803" - } - } - - scratch_disk { - } - - network_interface { - network = "default" - - access_config { - // Ephemeral IP - } - } - - metadata = { - foo = "bar" - baz = "qux" - startup-script = "echo Hello" - } - - labels = { - my_key = "my_value" - my_other_key = "my_other_value" - } - - enable_display = true + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-8-jessie-v20160803" + } + } + + scratch_disk { + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } + + metadata = { + foo = "bar" + baz = "qux" + startup-script = "echo Hello" + } + + labels = { + my_key = "my_value" + my_other_key = "my_other_value" + } + + enable_display = true } data "google_compute_instance" "bar" { - name = "${google_compute_instance.foo.name}" - zone = "us-central1-a" + name = google_compute_instance.foo.name + zone = "us-central1-a" } data "google_compute_instance" "baz" { - self_link = "${google_compute_instance.foo.self_link}" + self_link = google_compute_instance.foo.self_link } `, instanceName) } diff --git a/google/data_source_google_compute_network.go b/google/data_source_google_compute_network.go index 9c4d2cba561..31e9655497a 100644 --- a/google/data_source_google_compute_network.go +++ b/google/data_source_google_compute_network.go @@ -61,6 +61,6 @@ func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{} d.Set("self_link", network.SelfLink) d.Set("description", network.Description) d.Set("subnetworks_self_links", network.Subnetworks) - d.SetId(network.Name) + d.SetId(fmt.Sprintf("projects/%s/global/networks/%s", project, network.Name)) return nil } diff --git a/google/data_source_google_compute_network_test.go b/google/data_source_google_compute_network_test.go index 16cec24c47d..b4bd6edcb1f 100644 --- a/google/data_source_google_compute_network_test.go +++ b/google/data_source_google_compute_network_test.go @@ -69,11 +69,12 @@ func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name func testAccDataSourceGoogleNetworkConfig(name string) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" - description = "my-description" + name = "%s" + description = "my-description" } data "google_compute_network" "my_network" { - name = "${google_compute_network.foobar.name}" -}`, name) + name = google_compute_network.foobar.name +} +`, name) } diff --git a/google/data_source_google_compute_region_instance_group.go b/google/data_source_google_compute_region_instance_group.go index cd01ae00100..749797b4661 100644 --- a/google/data_source_google_compute_region_instance_group.go +++ b/google/data_source_google_compute_region_instance_group.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "strconv" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" @@ -113,7 +112,7 @@ func dataSourceComputeRegionInstanceGroupRead(d *schema.ResourceData, meta inter } else { d.Set("instances", flattenInstancesWithNamedPorts(members.Items)) } - d.SetId(strconv.FormatUint(instanceGroup.Id, 16)) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/instanceGroups/%s", project, region, name)) d.Set("self_link", instanceGroup.SelfLink) d.Set("name", name) d.Set("project", project) diff --git a/google/data_source_google_compute_region_instance_group_test.go b/google/data_source_google_compute_region_instance_group_test.go index a031f220832..d0d14504463 100644 --- a/google/data_source_google_compute_region_instance_group_test.go +++ b/google/data_source_google_compute_region_instance_group_test.go @@ -29,43 +29,43 @@ func TestAccDataSourceRegionInstanceGroup(t *testing.T) { func testAccDataSourceRegionInstanceGroup_basic(instanceManagerName string) string { return fmt.Sprintf(` resource "google_compute_target_pool" "foo" { - name = "%s" + name = "%s" } data "google_compute_image" "debian" { - project = "debian-cloud" - name = "debian-9-stretch-v20171129" + project = "debian-cloud" + name = "debian-9-stretch-v20171129" } resource "google_compute_instance_template" "foo" { - machine_type = "n1-standard-1" - disk { - source_image = "${data.google_compute_image.debian.self_link}" - } - network_interface { - access_config { - } - network = "default" - } + machine_type = "n1-standard-1" + disk { + source_image = data.google_compute_image.debian.self_link + } + network_interface { + access_config { + } + network = "default" + } } resource "google_compute_region_instance_group_manager" "foo" { - name = "%s" - base_instance_name = "foo" - instance_template = "${google_compute_instance_template.foo.self_link}" - region = "us-central1" - target_pools = ["${google_compute_target_pool.foo.self_link}"] - target_size = 1 + name = "%s" + base_instance_name = "foo" + instance_template = google_compute_instance_template.foo.self_link + region = "us-central1" + target_pools = [google_compute_target_pool.foo.self_link] + target_size = 1 - named_port { - name = "web" - port = 80 - } - wait_for_instances = true + named_port { + name = "web" + port = 80 + } + wait_for_instances = true } data "google_compute_region_instance_group" "data_source" { - self_link = "${google_compute_region_instance_group_manager.foo.instance_group}" + self_link = google_compute_region_instance_group_manager.foo.instance_group } `, acctest.RandomWithPrefix("test-rigm-"), instanceManagerName) } diff --git a/google/data_source_google_compute_ssl_certificate.go b/google/data_source_google_compute_ssl_certificate.go index 3f49e66aae9..04369989bf5 100644 --- a/google/data_source_google_compute_ssl_certificate.go +++ b/google/data_source_google_compute_ssl_certificate.go @@ -1,6 +1,8 @@ package google import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -21,9 +23,15 @@ func dataSourceGoogleComputeSslCertificate() *schema.Resource { } func dataSourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } certificateName := d.Get("name").(string) - d.SetId(certificateName) + d.SetId(fmt.Sprintf("projects/%s/global/sslCertificates/%s", project, certificateName)) return resourceComputeSslCertificateRead(d, meta) } diff --git a/google/data_source_google_compute_ssl_certificate_test.go b/google/data_source_google_compute_ssl_certificate_test.go index 93a51a61990..1e0c4a502e0 100644 --- a/google/data_source_google_compute_ssl_certificate_test.go +++ b/google/data_source_google_compute_ssl_certificate_test.go @@ -34,14 +34,14 @@ func TestAccDataSourceComputeSslCertificate(t *testing.T) { func testAccDataSourceComputeSslCertificateConfig() string { return fmt.Sprintf(` resource "google_compute_ssl_certificate" "foobar" { - name = "cert-test-%s" - description = "really descriptive" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" + name = "cert-test-%s" + description = "really descriptive" + private_key = file("test-fixtures/ssl_cert/test.key") + certificate = file("test-fixtures/ssl_cert/test.crt") } data "google_compute_ssl_certificate" "cert" { - name = "${google_compute_ssl_certificate.foobar.name}" + name = google_compute_ssl_certificate.foobar.name } `, acctest.RandString(10)) } diff --git a/google/data_source_google_compute_ssl_policy.go b/google/data_source_google_compute_ssl_policy.go index cec4d28acd6..6e994847122 100644 --- a/google/data_source_google_compute_ssl_policy.go +++ b/google/data_source_google_compute_ssl_policy.go @@ -1,6 +1,8 @@ package google import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -21,9 +23,15 @@ func dataSourceGoogleComputeSslPolicy() *schema.Resource { } func datasourceComputeSslPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } policyName := d.Get("name").(string) - d.SetId(policyName) + d.SetId(fmt.Sprintf("projects/%s/global/sslPolicies/%s", project, policyName)) return resourceComputeSslPolicyRead(d, meta) } diff --git a/google/data_source_google_compute_ssl_policy_test.go b/google/data_source_google_compute_ssl_policy_test.go index 52c2b261a7e..7d8ce20a1fa 100644 --- a/google/data_source_google_compute_ssl_policy_test.go +++ b/google/data_source_google_compute_ssl_policy_test.go @@ -68,16 +68,15 @@ func testAccDataSourceGoogleSslPolicyCheck(data_source_name string, resource_nam func testAccDataSourceGoogleSslPolicy() string { return fmt.Sprintf(` - resource "google_compute_ssl_policy" "foobar" { - name = "%s" - description = "my-description" - min_tls_version = "TLS_1_2" - profile = "MODERN" + name = "%s" + description = "my-description" + min_tls_version = "TLS_1_2" + profile = "MODERN" } data "google_compute_ssl_policy" "ssl_policy" { - name = "${google_compute_ssl_policy.foobar.name}" + name = google_compute_ssl_policy.foobar.name } `, acctest.RandomWithPrefix("test-ssl-policy")) } diff --git a/google/data_source_google_compute_subnetwork.go b/google/data_source_google_compute_subnetwork.go index 4ba2649cda1..51270d513e3 100644 --- a/google/data_source_google_compute_subnetwork.go +++ b/google/data_source_google_compute_subnetwork.go @@ -95,7 +95,7 @@ func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interfac d.Set("region", region) d.Set("secondary_ip_range", flattenSecondaryRanges(subnetwork.SecondaryIpRanges)) - d.SetId(fmt.Sprintf("%s/%s", region, name)) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", project, region, name)) return nil } diff --git a/google/data_source_google_compute_subnetwork_test.go b/google/data_source_google_compute_subnetwork_test.go index 36a34f90c02..3da51de52cc 100644 --- a/google/data_source_google_compute_subnetwork_test.go +++ b/google/data_source_google_compute_subnetwork_test.go @@ -76,28 +76,28 @@ func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_na func testAccDataSourceGoogleSubnetwork() string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" - description = "my-description" + name = "%s" + description = "my-description" } resource "google_compute_subnetwork" "foobar" { - name = "subnetwork-test" - description = "my-description" - ip_cidr_range = "10.0.0.0/24" - network = "${google_compute_network.foobar.self_link}" - private_ip_google_access = true - secondary_ip_range { - range_name = "tf-test-secondary-range" - ip_cidr_range = "192.168.1.0/24" - } + name = "subnetwork-test" + description = "my-description" + ip_cidr_range = "10.0.0.0/24" + network = google_compute_network.foobar.self_link + private_ip_google_access = true + secondary_ip_range { + range_name = "tf-test-secondary-range" + ip_cidr_range = "192.168.1.0/24" + } } data "google_compute_subnetwork" "my_subnetwork" { - name = "${google_compute_subnetwork.foobar.name}" + name = google_compute_subnetwork.foobar.name } data "google_compute_subnetwork" "my_subnetwork_self_link" { - self_link = "${google_compute_subnetwork.foobar.self_link}" + self_link = google_compute_subnetwork.foobar.self_link } `, acctest.RandomWithPrefix("network-test")) } diff --git a/google/data_source_google_compute_vpn_gateway.go b/google/data_source_google_compute_vpn_gateway.go index b8b39ba915e..dfab9a5c484 100644 --- a/google/data_source_google_compute_vpn_gateway.go +++ b/google/data_source_google_compute_vpn_gateway.go @@ -73,6 +73,6 @@ func dataSourceGoogleComputeVpnGatewayRead(d *schema.ResourceData, meta interfac d.Set("self_link", gateway.SelfLink) d.Set("description", gateway.Description) d.Set("project", project) - d.SetId(gateway.Name) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/targetVpnGateways/%s", project, region, name)) return nil } diff --git a/google/data_source_google_compute_vpn_gateway_test.go b/google/data_source_google_compute_vpn_gateway_test.go index 09542f19ce8..8202cf514e0 100644 --- a/google/data_source_google_compute_vpn_gateway_test.go +++ b/google/data_source_google_compute_vpn_gateway_test.go @@ -70,12 +70,13 @@ func testAccDataSourceGoogleVpnGatewayCheck(data_source_name string, resource_na func testAccDataSourceGoogleVpnGatewayConfig(name string) string { return fmt.Sprintf(` resource "google_compute_vpn_gateway" "foobar" { - name = "%s" - description = "my-description" - network = "default" + name = "%s" + description = "my-description" + network = "default" } data "google_compute_vpn_gateway" "my_vpn_gateway" { - name = "${google_compute_vpn_gateway.foobar.name}" -}`, name) + name = google_compute_vpn_gateway.foobar.name +} +`, name) } diff --git a/google/data_source_google_container_cluster.go b/google/data_source_google_container_cluster.go index 7e884cc61e9..069a17d281d 100644 --- a/google/data_source_google_container_cluster.go +++ b/google/data_source_google_container_cluster.go @@ -21,9 +21,21 @@ func dataSourceGoogleContainerCluster() *schema.Resource { } func datasourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + clusterName := d.Get("name").(string) - d.SetId(clusterName) + location, err := getLocation(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.SetId(containerClusterFullName(project, location, clusterName)) return resourceContainerClusterRead(d, meta) } diff --git a/google/data_source_google_container_cluster_test.go b/google/data_source_google_container_cluster_test.go index 662f39591aa..0e536bbec90 100644 --- a/google/data_source_google_container_cluster_test.go +++ b/google/data_source_google_container_cluster_test.go @@ -63,19 +63,19 @@ func TestAccContainerClusterDatasource_regional(t *testing.T) { func testAccContainerClusterDatasource_zonal() string { return fmt.Sprintf(` resource "google_container_cluster" "kubes" { - name = "cluster-test-%s" - location = "us-central1-a" - initial_node_count = 1 + name = "cluster-test-%s" + location = "us-central1-a" + initial_node_count = 1 - master_auth { - username = "mr.yoda" - password = "adoy.rm.123456789" - } + master_auth { + username = "mr.yoda" + password = "adoy.rm.123456789" + } } data "google_container_cluster" "kubes" { - name = "${google_container_cluster.kubes.name}" - location = "${google_container_cluster.kubes.zone}" + name = google_container_cluster.kubes.name + location = google_container_cluster.kubes.location } `, acctest.RandString(10)) } @@ -83,14 +83,14 @@ data "google_container_cluster" "kubes" { func testAccContainerClusterDatasource_regional() string { return fmt.Sprintf(` resource "google_container_cluster" "kubes" { - name = "cluster-test-%s" - location = "us-central1" - initial_node_count = 1 + name = "cluster-test-%s" + location = "us-central1" + initial_node_count = 1 } data "google_container_cluster" "kubes" { - name = "${google_container_cluster.kubes.name}" - location = "${google_container_cluster.kubes.region}" + name = google_container_cluster.kubes.name + location = google_container_cluster.kubes.location } `, acctest.RandString(10)) } diff --git a/google/data_source_google_container_engine_versions.go b/google/data_source_google_container_engine_versions.go index 44cf650693d..3ba9217f13c 100644 --- a/google/data_source_google_container_engine_versions.go +++ b/google/data_source_google_container_engine_versions.go @@ -21,21 +21,18 @@ func dataSourceGoogleContainerEngineVersions() *schema.Resource { Optional: true, }, "location": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"zone", "region"}, + Type: schema.TypeString, + Optional: true, }, "zone": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Use location instead", - ConflictsWith: []string{"region", "location"}, + Type: schema.TypeString, + Optional: true, + Removed: "Use location instead", }, "region": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Use location instead", - ConflictsWith: []string{"zone", "location"}, + Type: schema.TypeString, + Optional: true, + Removed: "Use location instead", }, "default_cluster_version": { Type: schema.TypeString, @@ -76,7 +73,7 @@ func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta in return err } if len(location) == 0 { - return fmt.Errorf("Cannot determine location: set location, zone, or region in this data source or at provider-level") + return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") } location = fmt.Sprintf("projects/%s/locations/%s", project, location) diff --git a/google/data_source_google_container_engine_versions_test.go b/google/data_source_google_container_engine_versions_test.go index 84e13eae07d..eef37286ab6 100644 --- a/google/data_source_google_container_engine_versions_test.go +++ b/google/data_source_google_container_engine_versions_test.go @@ -21,7 +21,6 @@ func TestAccContainerEngineVersions_basic(t *testing.T) { Config: testAccCheckGoogleContainerEngineVersionsConfig, Check: resource.ComposeTestCheckFunc( testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.location"), - testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"), ), }, }, @@ -46,24 +45,6 @@ func TestAccContainerEngineVersions_filtered(t *testing.T) { }) } -func TestAccContainerEngineVersions_regional(t *testing.T) { - t.Parallel() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckGoogleContainerEngineVersionsRegionalConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.location"), - testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"), - ), - }, - }, - }) -} - func testAccCheckGoogleContainerEngineVersionsMeta(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -138,25 +119,11 @@ var testAccCheckGoogleContainerEngineVersionsConfig = ` data "google_container_engine_versions" "location" { location = "us-central1-b" } - -data "google_container_engine_versions" "versions" { - zone = "us-central1-b" -} ` var testAccCheckGoogleContainerEngineVersions_filtered = ` data "google_container_engine_versions" "versions" { - zone = "us-central1-b" + location = "us-central1-b" version_prefix = "1.1." } ` - -var testAccCheckGoogleContainerEngineVersionsRegionalConfig = ` -data "google_container_engine_versions" "location" { - location = "us-central1" -} - -data "google_container_engine_versions" "versions" { - region = "us-central1" -} -` diff --git a/google/data_source_google_folder_organization_policy.go b/google/data_source_google_folder_organization_policy.go index 5777ea75d26..b31f18c7687 100644 --- a/google/data_source_google_folder_organization_policy.go +++ b/google/data_source_google_folder_organization_policy.go @@ -21,7 +21,7 @@ func dataSourceGoogleFolderOrganizationPolicy() *schema.Resource { func datasourceGoogleFolderOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%s:%s", d.Get("folder"), d.Get("constraint"))) + d.SetId(fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) return resourceGoogleFolderOrganizationPolicyRead(d, meta) } diff --git a/google/data_source_google_folder_organization_policy_test.go b/google/data_source_google_folder_organization_policy_test.go index e4afe828e22..63935f80134 100644 --- a/google/data_source_google_folder_organization_policy_test.go +++ b/google/data_source_google_folder_organization_policy_test.go @@ -35,17 +35,17 @@ resource "google_folder" "orgpolicy" { } resource "google_folder_organization_policy" "resource" { - folder = "${google_folder.orgpolicy.name}" - constraint = "serviceuser.services" + folder = google_folder.orgpolicy.name + constraint = "serviceuser.services" - restore_policy { - default = true - } + restore_policy { + default = true + } } data "google_folder_organization_policy" "data" { - folder = "${google_folder_organization_policy.resource.folder}" + folder = google_folder_organization_policy.resource.folder constraint = "serviceuser.services" } - `, folder, "organizations/"+org) +`, folder, "organizations/"+org) } diff --git a/google/data_source_google_folder_test.go b/google/data_source_google_folder_test.go index 3b3b5334b5c..91e4be72bdd 100644 --- a/google/data_source_google_folder_test.go +++ b/google/data_source_google_folder_test.go @@ -119,43 +119,47 @@ func testAccDataSourceGoogleFolderCheck(data_source_name string, resource_name s func testAccCheckGoogleFolder_byFullNameConfig(parent string, displayName string) string { return fmt.Sprintf(` resource "google_folder" "foobar" { - parent = "%s" + parent = "%s" display_name = "%s" } data "google_folder" "folder" { - folder = "${google_folder.foobar.name}" -}`, parent, displayName) + folder = google_folder.foobar.name +} +`, parent, displayName) } func testAccCheckGoogleFolder_byShortNameConfig(parent string, displayName string) string { return fmt.Sprintf(` resource "google_folder" "foobar" { - parent = "%s" + parent = "%s" display_name = "%s" } data "google_folder" "folder" { - folder = "${replace(google_folder.foobar.name, "folders/", "")}" -}`, parent, displayName) + folder = replace(google_folder.foobar.name, "folders/", "") +} +`, parent, displayName) } func testAccCheckGoogleFolder_lookupOrganizationConfig(parent string, displayName string) string { return fmt.Sprintf(` resource "google_folder" "foobar" { - parent = "%s" + parent = "%s" display_name = "%s" } data "google_folder" "folder" { - folder = "${google_folder.foobar.name}" + folder = google_folder.foobar.name lookup_organization = true -}`, parent, displayName) +} +`, parent, displayName) } func testAccCheckGoogleFolder_byFullNameNotFoundConfig(name string) string { return fmt.Sprintf(` data "google_folder" "folder" { folder = "%s" -}`, name) +} +`, name) } diff --git a/google/data_source_google_iam_role_test.go b/google/data_source_google_iam_role_test.go index 7b16ea43938..049da7e0bb6 100644 --- a/google/data_source_google_iam_role_test.go +++ b/google/data_source_google_iam_role_test.go @@ -45,7 +45,7 @@ func testAccCheckGoogleIAMRoleCheck(n string) resource.TestCheckFunc { func testAccCheckGoogleIamRoleConfig(name string) string { return fmt.Sprintf(` data "google_iam_role" "role" { - name = "%s" + name = "%s" } `, name) } diff --git a/google/data_source_google_kms_crypto_key_test.go b/google/data_source_google_kms_crypto_key_test.go index 887bfcbc495..b9d7ec3147b 100644 --- a/google/data_source_google_kms_crypto_key_test.go +++ b/google/data_source_google_kms_crypto_key_test.go @@ -31,8 +31,8 @@ func TestAccDataSourceGoogleKmsCryptoKey_basic(t *testing.T) { func testAccDataSourceGoogleKmsCryptoKey_basic(keyRingName, cryptoKeyName string) string { return fmt.Sprintf(` data "google_kms_crypto_key" "kms_crypto_key" { - key_ring = "%s" - name = "%s" + key_ring = "%s" + name = "%s" } - `, keyRingName, cryptoKeyName) +`, keyRingName, cryptoKeyName) } diff --git a/google/data_source_google_kms_crypto_key_version_test.go b/google/data_source_google_kms_crypto_key_version_test.go index 48d62707ded..a009f06a075 100644 --- a/google/data_source_google_kms_crypto_key_version_test.go +++ b/google/data_source_google_kms_crypto_key_version_test.go @@ -41,7 +41,7 @@ func TestAccDataSourceGoogleKmsCryptoKeyVersion_basic(t *testing.T) { func testAccDataSourceGoogleKmsCryptoKeyVersion_basic(kmsKey string) string { return fmt.Sprintf(` data "google_kms_crypto_key_version" "version" { - crypto_key = "%s" - } + crypto_key = "%s" +} `, kmsKey) } diff --git a/google/data_source_google_kms_key_ring_test.go b/google/data_source_google_kms_key_ring_test.go index 084da2f9e08..21239d508c6 100644 --- a/google/data_source_google_kms_key_ring_test.go +++ b/google/data_source_google_kms_key_ring_test.go @@ -29,10 +29,9 @@ func TestAccDataSourceGoogleKmsKeyRing_basic(t *testing.T) { func testAccDataSourceGoogleKmsKeyRing_basic(keyRingName string) string { return fmt.Sprintf(` - data "google_kms_key_ring" "kms_key_ring" { - name = "%s" - location = "global" + name = "%s" + location = "global" } - `, keyRingName) +`, keyRingName) } diff --git a/google/data_source_google_kms_secret_ciphertext_test.go b/google/data_source_google_kms_secret_ciphertext_test.go index 60589a22026..16675d09066 100644 --- a/google/data_source_google_kms_secret_ciphertext_test.go +++ b/google/data_source_google_kms_secret_ciphertext_test.go @@ -111,8 +111,8 @@ func testAccDecryptSecretDataWithCryptoKey(s *terraform.State, cryptoKeyId *kmsC func testGoogleKmsSecretCiphertext_datasource(cryptoKeyTerraformId, plaintext string) string { return fmt.Sprintf(` data "google_kms_secret_ciphertext" "acceptance" { - crypto_key = "%s" - plaintext = "%s" + crypto_key = "%s" + plaintext = "%s" } - `, cryptoKeyTerraformId, plaintext) +`, cryptoKeyTerraformId, plaintext) } diff --git a/google/data_source_google_kms_secret_test.go b/google/data_source_google_kms_secret_test.go index 31267359a84..1b3d49a8f2f 100644 --- a/google/data_source_google_kms_secret_test.go +++ b/google/data_source_google_kms_secret_test.go @@ -89,8 +89,8 @@ func testAccEncryptSecretDataWithCryptoKey(s *terraform.State, cryptoKeyResource func testGoogleKmsSecret_datasource(cryptoKeyTerraformId, ciphertext string) string { return fmt.Sprintf(` data "google_kms_secret" "acceptance" { - crypto_key = "%s" - ciphertext = "%s" + crypto_key = "%s" + ciphertext = "%s" } - `, cryptoKeyTerraformId, ciphertext) +`, cryptoKeyTerraformId, ciphertext) } diff --git a/google/data_source_google_organization.go b/google/data_source_google_organization.go index 0bf0a723ee7..eed599e1e54 100644 --- a/google/data_source_google_organization.go +++ b/google/data_source_google_organization.go @@ -76,7 +76,7 @@ func dataSourceOrganizationRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("one of domain or organization must be set") } - d.SetId(GetResourceNameFromSelfLink(organization.Name)) + d.SetId(organization.Name) d.Set("name", organization.Name) d.Set("domain", organization.DisplayName) d.Set("create_time", organization.CreationTime) diff --git a/google/data_source_google_organization_test.go b/google/data_source_google_organization_test.go index 4f055dd5720..ffb4dc4aabe 100644 --- a/google/data_source_google_organization_test.go +++ b/google/data_source_google_organization_test.go @@ -20,7 +20,7 @@ func TestAccDataSourceGoogleOrganization_byFullName(t *testing.T) { { Config: testAccCheckGoogleOrganization_byName(name), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.google_organization.org", "id", orgId), + resource.TestCheckResourceAttr("data.google_organization.org", "id", name), resource.TestCheckResourceAttr("data.google_organization.org", "name", name), ), }, @@ -39,7 +39,7 @@ func TestAccDataSourceGoogleOrganization_byShortName(t *testing.T) { { Config: testAccCheckGoogleOrganization_byName(orgId), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.google_organization.org", "id", orgId), + resource.TestCheckResourceAttr("data.google_organization.org", "id", name), resource.TestCheckResourceAttr("data.google_organization.org", "name", name), ), }, @@ -66,12 +66,14 @@ func testAccCheckGoogleOrganization_byName(name string) string { return fmt.Sprintf(` data "google_organization" "org" { organization = "%s" -}`, name) +} +`, name) } func testAccCheckGoogleOrganization_byDomain(name string) string { return fmt.Sprintf(` data "google_organization" "org" { domain = "%s" -}`, name) +} +`, name) } diff --git a/google/data_source_google_project.go b/google/data_source_google_project.go index 618c752d557..3b23b74cdc9 100644 --- a/google/data_source_google_project.go +++ b/google/data_source_google_project.go @@ -1,6 +1,7 @@ package google import ( + "fmt" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) @@ -21,13 +22,13 @@ func datasourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error if v, ok := d.GetOk("project_id"); ok { project := v.(string) - d.SetId(project) + d.SetId(fmt.Sprintf("projects/%s", project)) } else { project, err := getProject(d, config) if err != nil { return err } - d.SetId(project) + d.SetId(fmt.Sprintf("projects/%s", project)) } return resourceGoogleProjectRead(d, meta) diff --git a/google/data_source_google_project_organization_policy_test.go b/google/data_source_google_project_organization_policy_test.go index f9bf52a9613..14f672f7d96 100644 --- a/google/data_source_google_project_organization_policy_test.go +++ b/google/data_source_google_project_organization_policy_test.go @@ -26,8 +26,6 @@ func TestAccDataSourceGoogleProjectOrganizationPolicy_basic(t *testing.T) { func testAccDataSourceGoogleProjectOrganizationPolicy_basic(project string) string { return fmt.Sprintf(` - - resource "google_project_organization_policy" "resource" { project = "%s" constraint = "constraints/compute.trustedImageProjects" @@ -40,8 +38,8 @@ resource "google_project_organization_policy" "resource" { } data "google_project_organization_policy" "data" { - project = "${google_project_organization_policy.resource.project}" + project = google_project_organization_policy.resource.project constraint = "constraints/compute.trustedImageProjects" } - `, project) +`, project) } diff --git a/google/data_source_google_project_services.go b/google/data_source_google_project_services.go deleted file mode 100644 index 5c50559ee89..00000000000 --- a/google/data_source_google_project_services.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -func dataSourceGoogleProjectServices() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(resourceGoogleProjectServices().Schema) - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleProjectServicesRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - d.SetId(project) - - return resourceGoogleProjectServicesRead(d, meta) -} diff --git a/google/data_source_google_project_services_test.go b/google/data_source_google_project_services_test.go deleted file mode 100644 index 30ff56a849f..00000000000 --- a/google/data_source_google_project_services_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" -) - -func TestAccDataSourceGoogleProjectServices_basic(t *testing.T) { - t.Parallel() - org := getTestOrgFromEnv(t) - project := "terraform-" + acctest.RandString(10) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckGoogleProjectServicesConfig(project, org), - Check: resource.ComposeTestCheckFunc( - checkDataSourceStateMatchesResourceStateWithIgnores( - "data.google_project_services.project_services", - "google_project_services.project_services", - map[string]struct{}{ - // Virtual fields - "disable_on_destroy": {}, - }, - ), - ), - }, - }, - }) -} - -func testAccCheckGoogleProjectServicesConfig(project, org string) string { - return fmt.Sprintf(` -resource "google_project" "project" { - project_id = "%s" - name = "%s" - org_id = "%s" -} - -resource "google_project_services" "project_services" { - project = "${google_project.project.project_id}" - services = ["admin.googleapis.com"] -} - -data "google_project_services" "project_services" { - project = "${google_project_services.project_services.project}" -}`, project, project, org) -} diff --git a/google/data_source_google_project_test.go b/google/data_source_google_project_test.go index 9fe586f3cd2..3b20afa2266 100644 --- a/google/data_source_google_project_test.go +++ b/google/data_source_google_project_test.go @@ -37,12 +37,13 @@ func TestAccDataSourceGoogleProject_basic(t *testing.T) { func testAccCheckGoogleProjectConfig(project, org string) string { return fmt.Sprintf(` resource "google_project" "project" { - project_id = "%s" - name = "%s" - org_id = "%s" + project_id = "%s" + name = "%s" + org_id = "%s" } - + data "google_project" "project" { - project_id = "${google_project.project.project_id}" -}`, project, project, org) + project_id = google_project.project.project_id +} +`, project, project, org) } diff --git a/google/data_source_google_service_account_access_token_test.go b/google/data_source_google_service_account_access_token_test.go index 2709649c2e5..3c8f45c4914 100644 --- a/google/data_source_google_service_account_access_token_test.go +++ b/google/data_source_google_service_account_access_token_test.go @@ -52,15 +52,14 @@ func TestAccDataSourceGoogleServiceAccountAccessToken_basic(t *testing.T) { func testAccCheckGoogleServiceAccountAccessToken_datasource(targetServiceAccountID string) string { return fmt.Sprintf(` +data "google_service_account_access_token" "default" { + target_service_account = "%s" + scopes = ["userinfo-email", "https://www.googleapis.com/auth/cloud-platform"] + lifetime = "30s" +} - data "google_service_account_access_token" "default" { - target_service_account = "%s" - scopes = ["userinfo-email", "https://www.googleapis.com/auth/cloud-platform"] - lifetime = "30s" - } - - output "access_token" { - value = "${data.google_service_account_access_token.default.access_token}" - } - `, targetServiceAccountID) +output "access_token" { + value = data.google_service_account_access_token.default.access_token +} +`, targetServiceAccountID) } diff --git a/google/data_source_google_service_account_key.go b/google/data_source_google_service_account_key.go index 3f620620455..227d69a7637 100644 --- a/google/data_source_google_service_account_key.go +++ b/google/data_source_google_service_account_key.go @@ -36,12 +36,6 @@ func dataSourceGoogleServiceAccountKey() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "service_account_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Please use name to specify full service account key path projects/{project}/serviceAccounts/{serviceAccount}/keys/{keyId}", - }, }, } } diff --git a/google/data_source_google_service_account_key_test.go b/google/data_source_google_service_account_key_test.go index e0a1d4aa0f4..6ab5d898266 100644 --- a/google/data_source_google_service_account_key_test.go +++ b/google/data_source_google_service_account_key_test.go @@ -42,15 +42,16 @@ func TestAccDatasourceGoogleServiceAccountKey_basic(t *testing.T) { func testAccDatasourceGoogleServiceAccountKey(account string) string { return fmt.Sprintf(` resource "google_service_account" "acceptance" { - account_id = "%s" + account_id = "%s" } resource "google_service_account_key" "acceptance" { - service_account_id = "${google_service_account.acceptance.name}" - public_key_type = "TYPE_X509_PEM_FILE" + service_account_id = google_service_account.acceptance.name + public_key_type = "TYPE_X509_PEM_FILE" } data "google_service_account_key" "acceptance" { - name = "${google_service_account_key.acceptance.name}" -}`, account) + name = google_service_account_key.acceptance.name +} +`, account) } diff --git a/google/data_source_google_service_account_test.go b/google/data_source_google_service_account_test.go index 672cc578926..7d52ab95640 100644 --- a/google/data_source_google_service_account_test.go +++ b/google/data_source_google_service_account_test.go @@ -41,7 +41,7 @@ resource "google_service_account" "acceptance" { } data "google_service_account" "acceptance" { - account_id = "${google_service_account.acceptance.account_id}" + account_id = google_service_account.acceptance.account_id } `, account) } diff --git a/google/data_source_storage_object_signed_url_test.go b/google/data_source_storage_object_signed_url_test.go index 94ad4752845..3b4ececefc8 100644 --- a/google/data_source_storage_object_signed_url_test.go +++ b/google/data_source_storage_object_signed_url_test.go @@ -226,42 +226,42 @@ data "google_storage_object_signed_url" "blerg" { func testAccTestGoogleStorageObjectSignedURL(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { - name = "%s" + name = "%s" } resource "google_storage_bucket_object" "story" { name = "path/to/file" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name content = "once upon a time..." } data "google_storage_object_signed_url" "story_url" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" - + bucket = google_storage_bucket.bucket.name + path = google_storage_bucket_object.story.name } data "google_storage_object_signed_url" "story_url_w_headers" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" + bucket = google_storage_bucket.bucket.name + path = google_storage_bucket_object.story.name extension_headers = { - x-goog-test = "foo" - x-goog-if-generation-match = 1 + x-goog-test = "foo" + x-goog-if-generation-match = 1 } } data "google_storage_object_signed_url" "story_url_w_content_type" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" + bucket = google_storage_bucket.bucket.name + path = google_storage_bucket_object.story.name content_type = "text/plain" } data "google_storage_object_signed_url" "story_url_w_md5" { - bucket = "${google_storage_bucket.bucket.name}" - path = "${google_storage_bucket_object.story.name}" + bucket = google_storage_bucket.bucket.name + path = google_storage_bucket_object.story.name - content_md5 = "${google_storage_bucket_object.story.md5hash}" -}`, bucketName) + content_md5 = google_storage_bucket_object.story.md5hash +} +`, bucketName) } diff --git a/google/iam.go b/google/iam.go index 554f219cabb..e36c37f71f8 100644 --- a/google/iam.go +++ b/google/iam.go @@ -266,12 +266,6 @@ func listFromIamBindingMap(bm map[iamBindingKey]map[string]struct{}) []*cloudres return rb } -// Flatten AuditConfigs so each service has a single exemption list of log type to members -func mergeAuditConfigs(auditConfigs []*cloudresourcemanager.AuditConfig) []*cloudresourcemanager.AuditConfig { - am := createIamAuditConfigsMap(auditConfigs) - return listFromIamAuditConfigMap(am) -} - // Flattens AuditConfigs so each role has a single Binding with combined members\ func removeAllAuditConfigsWithService(ac []*cloudresourcemanager.AuditConfig, service string) []*cloudresourcemanager.AuditConfig { acMap := createIamAuditConfigsMap(ac) diff --git a/google/iam_binary_authorization_attestor_generated_test.go b/google/iam_binary_authorization_attestor_generated_test.go index 8172fb89dc7..b9f382c464c 100644 --- a/google/iam_binary_authorization_attestor_generated_test.go +++ b/google/iam_binary_authorization_attestor_generated_test.go @@ -113,7 +113,7 @@ func testAccBinaryAuthorizationAttestorIamMember_basicGenerated(context map[stri resource "google_binary_authorization_attestor" "attestor" { name = "test-attestor%{random_suffix}" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name public_keys { ascii_armored_pgp_public_key = <[^/]+)/domainMappings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{domain_name}}") + id, err := replaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_app_engine_domain_mapping_generated_test.go b/google/resource_app_engine_domain_mapping_generated_test.go index 7366bb64d60..e2172e75374 100644 --- a/google/resource_app_engine_domain_mapping_generated_test.go +++ b/google/resource_app_engine_domain_mapping_generated_test.go @@ -53,7 +53,7 @@ func testAccAppEngineDomainMapping_appEngineDomainMappingBasicExample(context ma return Nprintf(` resource "google_app_engine_domain_mapping" "domain_mapping" { domain_name = "dm-test-%{random_suffix}.gcp.tfacc.hashicorptest.com" - + ssl_settings { ssl_management_type = "AUTOMATIC" } diff --git a/google/resource_app_engine_firewall_rule.go b/google/resource_app_engine_firewall_rule.go index c3791e21546..1a0dc61f591 100644 --- a/google/resource_app_engine_firewall_rule.go +++ b/google/resource_app_engine_firewall_rule.go @@ -125,7 +125,7 @@ func resourceAppEngineFirewallRuleCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{priority}}") + id, err := replaceVars(d, config, "apps/{{project}}/firewall/ingressRules/{{priority}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -273,6 +273,7 @@ func resourceAppEngineFirewallRuleDelete(d *schema.ResourceData, meta interface{ func resourceAppEngineFirewallRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ + "apps/(?P[^/]+)/firewall/ingressRules/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", }, d, config); err != nil { @@ -280,7 +281,7 @@ func resourceAppEngineFirewallRuleImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{priority}}") + id, err := replaceVars(d, config, "apps/{{project}}/firewall/ingressRules/{{priority}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_app_engine_firewall_rule_generated_test.go b/google/resource_app_engine_firewall_rule_generated_test.go index 42216f29e1a..581b13b22de 100644 --- a/google/resource_app_engine_firewall_rule_generated_test.go +++ b/google/resource_app_engine_firewall_rule_generated_test.go @@ -58,14 +58,14 @@ resource "google_project" "my_project" { } resource "google_app_engine_application" "app" { - project = "${google_project.my_project.project_id}" + project = google_project.my_project.project_id location_id = "us-central" } resource "google_app_engine_firewall_rule" "rule" { - project = "${google_app_engine_application.app.project}" - priority = 1000 - action = "ALLOW" + project = google_app_engine_application.app.project + priority = 1000 + action = "ALLOW" source_range = "*" } `, context) diff --git a/google/resource_app_engine_standard_app_version.go b/google/resource_app_engine_standard_app_version.go index 46d854402b2..db5b4290c90 100644 --- a/google/resource_app_engine_standard_app_version.go +++ b/google/resource_app_engine_standard_app_version.go @@ -66,18 +66,19 @@ All files must be readable using the credentials supplied with this call.`, Type: schema.TypeString, Required: true, }, - "sha1_sum": { + "source_url": { Type: schema.TypeString, - Optional: true, - Description: `SHA1 checksum of the file`, + Required: true, + Description: `Source URL`, }, - "source_url": { + "sha1_sum": { Type: schema.TypeString, Optional: true, - Description: `Source URL`, + Description: `SHA1 checksum of the file`, }, }, }, + AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files"}, }, "zip": { Type: schema.TypeList, @@ -86,18 +87,19 @@ All files must be readable using the credentials supplied with this call.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "source_url": { + Type: schema.TypeString, + Required: true, + Description: `Source URL`, + }, "files_count": { Type: schema.TypeInt, Optional: true, Description: `files count`, }, - "source_url": { - Type: schema.TypeString, - Optional: true, - Description: `Source URL`, - }, }, }, + AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files"}, }, }, }, @@ -111,7 +113,7 @@ All files must be readable using the credentials supplied with this call.`, Schema: map[string]*schema.Schema{ "shell": { Type: schema.TypeString, - Optional: true, + Required: true, Description: `The format should be a shell command that can be fed to bash -c.`, }, }, @@ -158,7 +160,7 @@ Only the auto value is supported for Node.js in the App Engine standard environm Schema: map[string]*schema.Schema{ "script_path": { Type: schema.TypeString, - Optional: true, + Required: true, Description: `Path to the script from the application root directory.`, }, }, diff --git a/google/resource_app_engine_standard_app_version_generated_test.go b/google/resource_app_engine_standard_app_version_generated_test.go index 2c64b885d49..fa6b9b773f6 100644 --- a/google/resource_app_engine_standard_app_version_generated_test.go +++ b/google/resource_app_engine_standard_app_version_generated_test.go @@ -97,13 +97,13 @@ resource "google_app_engine_standard_app_version" "myapp_v2" { } resource "google_storage_bucket" "bucket" { - name = "appengine-static-content%{random_suffix}" + name = "appengine-static-content%{random_suffix}" } resource "google_storage_bucket_object" "object" { - name = "hello-world.zip" - bucket = "${google_storage_bucket.bucket.name}" - source = "./test-fixtures/appengine/hello-world.zip" + name = "hello-world.zip" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/appengine/hello-world.zip" } `, context) } diff --git a/google/resource_big_query_dataset.go b/google/resource_big_query_dataset.go index 1f75710055f..6e320c65afb 100644 --- a/google/resource_big_query_dataset.go +++ b/google/resource_big_query_dataset.go @@ -224,6 +224,16 @@ milliseconds since the epoch.`, func bigqueryDatasetAccessSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + Description: `Describes the rights granted to the user specified by the other +member of the access object. Primitive, Predefined and custom +roles are supported. Predefined roles that have equivalent +primitive roles are swapped by the API to their Primitive +counterparts, and will show a diff post-create. See +[official docs](https://cloud.google.com/bigquery/docs/access-control).`, + }, "domain": { Type: schema.TypeString, Optional: true, @@ -235,16 +245,6 @@ domain specified will be granted the specified access`, Optional: true, Description: `An email address of a Google Group to grant access to.`, }, - "role": { - Type: schema.TypeString, - Optional: true, - Description: `Describes the rights granted to the user specified by the other -member of the access object. Primitive, Predefined and custom -roles are supported. Predefined roles that have equivalent -primitive roles are swapped by the API to their Primitive -counterparts, and will show a diff post-create. See -[official docs](https://cloud.google.com/bigquery/docs/access-control).`, - }, "special_group": { Type: schema.TypeString, Optional: true, @@ -381,7 +381,7 @@ func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) err } // Store the ID now - id, err := replaceVars(d, config, "{{project}}:{{dataset_id}}") + id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -575,15 +575,15 @@ func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) err func resourceBigQueryDatasetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ + "projects/(?P[^/]+)/datasets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+):(?P[^/]+)", "(?P[^/]+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}:{{dataset_id}}") + id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_bigquery_table.go b/google/resource_bigquery_table.go index bbf03070f4c..d92b1870225 100644 --- a/google/resource_bigquery_table.go +++ b/google/resource_bigquery_table.go @@ -163,15 +163,16 @@ func resourceBigQueryTable() *schema.Resource { // Range: [Optional] Range of a sheet to query from. Only used when non-empty. // Typical format: !: "range": { - Removed: "This field is in beta. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/guides/provider_versions.html for more details.", - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"external_data_configuration.0.google_sheets_options.0.range"}, }, // SkipLeadingRows: [Optional] The number of rows at the top // of the sheet that BigQuery will skip when reading the data. "skip_leading_rows": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: []string{"external_data_configuration.0.google_sheets_options.0.skip_leading_rows"}, }, }, }, @@ -476,7 +477,7 @@ func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[INFO] BigQuery table %s has been created", res.Id) - d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) + d.SetId(fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) return resourceBigQueryTableRead(d, meta) } @@ -844,12 +845,11 @@ type bigQueryTableId struct { } func parseBigQueryTableId(id string) (*bigQueryTableId, error) { - // Expected format is "PROJECT:DATASET.TABLE", but the project can itself have . and : in it. - // Those characters are not valid dataset or table components, so just split on the last two. - matchRegex := regexp.MustCompile("^(.+):([^:.]+)\\.([^:.]+)$") + // Expected format is "projects/{{project}}/datasets/{{dataset}}/tables/{{table}}" + matchRegex := regexp.MustCompile("^projects/(.+)/datasets/(.+)/tables/(.+)$") subMatches := matchRegex.FindStringSubmatch(id) if subMatches == nil { - return nil, fmt.Errorf("Invalid BigQuery table specifier. Expecting {project}:{dataset-id}.{table-id}, got %s", id) + return nil, fmt.Errorf("Invalid BigQuery table specifier. Expecting projects/{{project}}/datasets/{{dataset}}/tables/{{table}}, got %s", id) } return &bigQueryTableId{ Project: subMatches[1], diff --git a/google/resource_bigtable_app_profile.go b/google/resource_bigtable_app_profile.go index 12f91030f01..781e2f7d625 100644 --- a/google/resource_bigtable_app_profile.go +++ b/google/resource_bigtable_app_profile.go @@ -73,7 +73,7 @@ func resourceBigtableAppProfile() *schema.Resource { Description: `If true, read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is available in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes consistency to improve availability.`, - ConflictsWith: []string{"single_cluster_routing"}, + ExactlyOneOf: []string{"single_cluster_routing", "multi_cluster_routing_use_any"}, }, "single_cluster_routing": { Type: schema.TypeList, @@ -83,20 +83,20 @@ consistency to improve availability.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + Description: `The cluster to which read/write requests should be routed.`, + }, "allow_transactional_writes": { Type: schema.TypeBool, Optional: true, Description: `If true, CheckAndMutateRow and ReadModifyWriteRow requests are allowed by this app profile. It is unsafe to send these requests to the same table/row/column in multiple clusters.`, }, - "cluster_id": { - Type: schema.TypeString, - Optional: true, - Description: `The cluster to which read/write requests should be routed.`, - }, }, }, - ConflictsWith: []string{"multi_cluster_routing_use_any"}, + ExactlyOneOf: []string{"single_cluster_routing", "multi_cluster_routing_use_any"}, }, "name": { Type: schema.TypeString, @@ -152,7 +152,7 @@ func resourceBigtableAppProfileCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{instance}}/{{app_profile_id}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -278,7 +278,7 @@ func resourceBigtableAppProfileImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{instance}}/{{app_profile_id}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_bigtable_app_profile_generated_test.go b/google/resource_bigtable_app_profile_generated_test.go index 1e3b40b6c32..28b1df74e9a 100644 --- a/google/resource_bigtable_app_profile_generated_test.go +++ b/google/resource_bigtable_app_profile_generated_test.go @@ -52,21 +52,21 @@ func TestAccBigtableAppProfile_bigtableAppProfileMulticlusterExample(t *testing. func testAccBigtableAppProfile_bigtableAppProfileMulticlusterExample(context map[string]interface{}) string { return Nprintf(` resource "google_bigtable_instance" "instance" { - name = "tf-test-instance-%{random_suffix}" - cluster { - cluster_id = "tf-test-instance-%{random_suffix}" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" - } + name = "tf-test-instance-%{random_suffix}" + cluster { + cluster_id = "tf-test-instance-%{random_suffix}" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_app_profile" "ap" { - instance = google_bigtable_instance.instance.name - app_profile_id = "tf-test-profile-%{random_suffix}" + instance = google_bigtable_instance.instance.name + app_profile_id = "tf-test-profile-%{random_suffix}" - multi_cluster_routing_use_any = true - ignore_warnings = true + multi_cluster_routing_use_any = true + ignore_warnings = true } `, context) } @@ -99,25 +99,25 @@ func TestAccBigtableAppProfile_bigtableAppProfileSingleclusterExample(t *testing func testAccBigtableAppProfile_bigtableAppProfileSingleclusterExample(context map[string]interface{}) string { return Nprintf(` resource "google_bigtable_instance" "instance" { - name = "tf-test-instance-%{random_suffix}" - cluster { - cluster_id = "tf-test-instance-%{random_suffix}" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" - } + name = "tf-test-instance-%{random_suffix}" + cluster { + cluster_id = "tf-test-instance-%{random_suffix}" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_app_profile" "ap" { - instance = google_bigtable_instance.instance.name - app_profile_id = "tf-test-profile-%{random_suffix}" + instance = google_bigtable_instance.instance.name + app_profile_id = "tf-test-profile-%{random_suffix}" - single_cluster_routing { - cluster_id = "tf-test-instance-%{random_suffix}" - allow_transactional_writes = true - } + single_cluster_routing { + cluster_id = "tf-test-instance-%{random_suffix}" + allow_transactional_writes = true + } - ignore_warnings = true + ignore_warnings = true } `, context) } diff --git a/google/resource_bigtable_instance.go b/google/resource_bigtable_instance.go index 56d8f79d2dd..5b22f30c832 100644 --- a/google/resource_bigtable_instance.go +++ b/google/resource_bigtable_instance.go @@ -87,34 +87,6 @@ func resourceBigtableInstance() *schema.Resource { Computed: true, ForceNew: true, }, - - "cluster_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use cluster instead.", - }, - - "zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use cluster instead.", - }, - - "num_nodes": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Removed: "Use cluster instead.", - }, - - "storage_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use cluster instead.", - }, }, } } @@ -159,7 +131,11 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating instance. %s", err) } - d.SetId(conf.InstanceID) + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) return resourceBigtableInstanceRead(d, meta) } @@ -180,11 +156,13 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro defer c.Close() - instance, err := c.InstanceInfo(ctx, d.Id()) + instanceName := d.Get("name").(string) + + instance, err := c.InstanceInfo(ctx, instanceName) if err != nil { - log.Printf("[WARN] Removing %s because it's gone", d.Id()) + log.Printf("[WARN] Removing %s because it's gone", instanceName) d.SetId("") - return fmt.Errorf("Error retrieving instance. Could not find %s. %s", d.Id(), err) + return fmt.Errorf("Error retrieving instance. Could not find %s. %s", instanceName, err) } d.Set("project", project) @@ -276,7 +254,7 @@ func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) e defer c.Close() - name := d.Id() + name := d.Get("name").(string) err = c.DeleteInstance(ctx, name) if err != nil { return fmt.Errorf("Error deleting instance. %s", err) @@ -394,7 +372,7 @@ func resourceBigtableInstanceImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_bigtable_table.go b/google/resource_bigtable_table.go index 0c7678cd5ca..5a24e522402 100644 --- a/google/resource_bigtable_table.go +++ b/google/resource_bigtable_table.go @@ -111,7 +111,11 @@ func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error } } - d.SetId(name) + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) return resourceBigtableTableRead(d, meta) } @@ -133,7 +137,7 @@ func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { defer c.Close() - name := d.Id() + name := d.Get("name").(string) table, err := c.TableInfo(ctx, name) if err != nil { log.Printf("[WARN] Removing %s because it's gone", name) @@ -199,7 +203,7 @@ func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_bigtable_table_test.go b/google/resource_bigtable_table_test.go index 470b56c7f71..f8b7176fabe 100644 --- a/google/resource_bigtable_table_test.go +++ b/google/resource_bigtable_table_test.go @@ -28,8 +28,6 @@ func TestAccBigtableTable_basic(t *testing.T) { ResourceName: "google_bigtable_table.table", ImportState: true, ImportStateVerify: true, - //TODO(rileykarson): Remove ImportStateId when id format is fixed in 3.0.0 - ImportStateId: fmt.Sprintf("%s/%s", instanceName, tableName), }, }, }) @@ -54,7 +52,6 @@ func TestAccBigtableTable_splitKeys(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"split_keys"}, - ImportStateId: fmt.Sprintf("%s/%s", instanceName, tableName), }, }, }) @@ -79,7 +76,6 @@ func TestAccBigtableTable_family(t *testing.T) { ResourceName: "google_bigtable_table.table", ImportState: true, ImportStateVerify: true, - ImportStateId: fmt.Sprintf("%s/%s", instanceName, tableName), }, }, }) @@ -104,7 +100,6 @@ func TestAccBigtableTable_familyMany(t *testing.T) { ResourceName: "google_bigtable_table.table", ImportState: true, ImportStateVerify: true, - ImportStateId: fmt.Sprintf("%s/%s", instanceName, tableName), }, }, }) diff --git a/google/resource_binary_authorization_attestor.go b/google/resource_binary_authorization_attestor.go index a5800dfbb8e..aa69f100ecf 100644 --- a/google/resource_binary_authorization_attestor.go +++ b/google/resource_binary_authorization_attestor.go @@ -218,7 +218,7 @@ func resourceBinaryAuthorizationAttestorCreate(d *schema.ResourceData, meta inte } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -359,7 +359,7 @@ func resourceBinaryAuthorizationAttestorImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_binary_authorization_attestor_generated_test.go b/google/resource_binary_authorization_attestor_generated_test.go index 154ab81399f..ba560eb0f76 100644 --- a/google/resource_binary_authorization_attestor_generated_test.go +++ b/google/resource_binary_authorization_attestor_generated_test.go @@ -53,7 +53,7 @@ func testAccBinaryAuthorizationAttestor_binaryAuthorizationAttestorBasicExample( resource "google_binary_authorization_attestor" "attestor" { name = "test-attestor%{random_suffix}" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name public_keys { ascii_armored_pgp_public_key = <[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + return &cloudFunctionId{ + Project: d.Get("project").(string), + Region: d.Get("region").(string), + Name: d.Get("name").(string), + }, nil } func joinMapKeys(mapToJoin *map[int]bool) string { @@ -190,8 +188,7 @@ func resourceCloudFunctionsFunction() *schema.Resource { "runtime": { Type: schema.TypeString, - Optional: true, - Default: "nodejs6", + Required: true, }, "service_account_email": { @@ -212,27 +209,10 @@ func resourceCloudFunctionsFunction() *schema.Resource { Optional: true, }, - "trigger_bucket": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field is removed. Use `event_trigger` instead.", - ConflictsWith: []string{"trigger_http", "trigger_topic"}, - }, - "trigger_http": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"trigger_bucket", "trigger_topic"}, - }, - - "trigger_topic": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field is removed. Use `event_trigger` instead.", - ConflictsWith: []string{"trigger_http", "trigger_bucket"}, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, }, "event_trigger": { @@ -389,7 +369,7 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro } // Name of function should be unique - d.SetId(cloudFuncId.terraformId()) + d.SetId(cloudFuncId.cloudFunctionId()) err = cloudFunctionsOperationWait(config.clientCloudFunctions, op, "Creating CloudFunctions Function", int(d.Timeout(schema.TimeoutCreate).Minutes())) @@ -403,7 +383,7 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + cloudFuncId, err := parseCloudFunctionId(d, config) if err != nil { return err } @@ -464,7 +444,7 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro return err } - cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + cloudFuncId, err := parseCloudFunctionId(d, config) if err != nil { return err } @@ -553,7 +533,7 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro func resourceCloudFunctionsDestroy(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + cloudFuncId, err := parseCloudFunctionId(d, config) if err != nil { return err } diff --git a/google/resource_cloudfunctions_function_test.go b/google/resource_cloudfunctions_function_test.go index a0cd6625369..46c193c28b5 100644 --- a/google/resource_cloudfunctions_function_test.go +++ b/google/resource_cloudfunctions_function_test.go @@ -501,6 +501,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" description = "test function" available_memory_mb = 128 source_archive_bucket = "${google_storage_bucket.bucket.name}" @@ -605,7 +606,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" - runtime = "nodejs6" + runtime = "nodejs8" available_memory_mb = 128 source_archive_bucket = "${google_storage_bucket.bucket.name}" source_archive_object = "${google_storage_bucket_object.archive.name}" @@ -636,6 +637,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" available_memory_mb = 128 source_archive_bucket = "${google_storage_bucket.bucket.name}" source_archive_object = "${google_storage_bucket_object.archive.name}" @@ -663,6 +665,7 @@ resource "google_storage_bucket_object" "archive" { resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" available_memory_mb = 128 source_archive_bucket = "${google_storage_bucket.bucket.name}" source_archive_object = "${google_storage_bucket_object.archive.name}" @@ -679,6 +682,7 @@ func testAccCloudFunctionsFunction_sourceRepo(functionName, project string) stri return fmt.Sprintf(` resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" source_repository { // There isn't yet an API that'll allow us to create a source repository and @@ -710,6 +714,7 @@ data "google_compute_default_service_account" "default" { } resource "google_cloudfunctions_function" "function" { name = "%s" + runtime = "nodejs8" source_archive_bucket = "${google_storage_bucket.bucket.name}" source_archive_object = "${google_storage_bucket_object.archive.name}" diff --git a/google/resource_cloudiot_registry.go b/google/resource_cloudiot_registry.go index 69b784437a6..21916631c45 100644 --- a/google/resource_cloudiot_registry.go +++ b/google/resource_cloudiot_registry.go @@ -56,27 +56,16 @@ func resourceCloudIoTRegistry() *schema.Resource { []string{"", "NONE", "ERROR", "INFO", "DEBUG"}, false), }, "event_notification_config": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - Deprecated: "eventNotificationConfig has been deprecated in favor of eventNotificationConfigs (plural). Please switch to using the plural field.", - ConflictsWith: []string{"event_notification_configs"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_topic_name": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - }, + Type: schema.TypeMap, + Optional: true, + Computed: true, + Removed: "Please use event_notification_configs instead", }, "event_notification_configs": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 10, - ConflictsWith: []string{"event_notification_config"}, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 10, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "pubsub_topic_name": { @@ -143,7 +132,7 @@ func resourceCloudIoTRegistry() *schema.Resource { Schema: map[string]*schema.Schema{ "public_key_certificate": { Type: schema.TypeMap, - Optional: true, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "format": { @@ -241,9 +230,6 @@ func createDeviceRegistry(d *schema.ResourceData) *cloudiot.DeviceRegistry { deviceRegistry := &cloudiot.DeviceRegistry{} if v, ok := d.GetOk("event_notification_configs"); ok { deviceRegistry.EventNotificationConfigs = buildEventNotificationConfigs(v.([]interface{})) - } else if v, ok := d.GetOk("event_notification_config"); ok { - deviceRegistry.EventNotificationConfigs = []*cloudiot.EventNotificationConfig{ - buildEventNotificationConfig(v.(map[string]interface{}))} } if v, ok := d.GetOk("state_notification_config"); ok { @@ -314,15 +300,6 @@ func resourceCloudIoTRegistryUpdate(d *schema.ResourceData, meta interface{}) er } } - if d.HasChange("event_notification_config") { - hasChanged = true - updateMask = append(updateMask, "event_notification_configs") - if v, ok := d.GetOk("event_notification_config"); ok { - deviceRegistry.EventNotificationConfigs = []*cloudiot.EventNotificationConfig{ - buildEventNotificationConfig(v.(map[string]interface{}))} - } - } - if d.HasChange("state_notification_config") { hasChanged = true updateMask = append(updateMask, "state_notification_config.pubsub_topic_name") @@ -402,14 +379,8 @@ func resourceCloudIoTRegistryRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("event_notification_configs", cfgs); err != nil { return fmt.Errorf("Error reading Registry: %s", err) } - if err := d.Set("event_notification_config", map[string]string{ - "pubsub_topic_name": res.EventNotificationConfigs[0].PubsubTopicName, - }); err != nil { - return fmt.Errorf("Error reading Registry: %s", err) - } } else { d.Set("event_notification_configs", nil) - d.Set("event_notification_config", nil) } pubsubTopicName := res.StateNotificationConfig.PubsubTopicName @@ -433,6 +404,8 @@ func resourceCloudIoTRegistryRead(d *schema.ResourceData, meta interface{}) erro } d.Set("credentials", credentials) d.Set("log_level", res.LogLevel) + // Removed Computed field must be set to nil to prevent spurious diffs + d.Set("event_notification_config", nil) return nil } diff --git a/google/resource_cloudiot_registry_test.go b/google/resource_cloudiot_registry_test.go index 2468de14126..a002e821667 100644 --- a/google/resource_cloudiot_registry_test.go +++ b/google/resource_cloudiot_registry_test.go @@ -116,7 +116,7 @@ func TestAccCloudIoTRegistry_update(t *testing.T) { }) } -func TestAccCloudIoTRegistry_eventNotificationConfigDeprecatedSingleToPlural(t *testing.T) { +func TestAccCloudIoTRegistry_eventNotificationConfigsSingle(t *testing.T) { t.Parallel() registryName := fmt.Sprintf("tf-registry-test-%s", acctest.RandString(10)) @@ -128,29 +128,18 @@ func TestAccCloudIoTRegistry_eventNotificationConfigDeprecatedSingleToPlural(t * CheckDestroy: testAccCheckCloudIoTRegistryDestroy, Steps: []resource.TestStep{ { - // Use deprecated field (event_notification_config) to create - Config: testAccCloudIoTRegistry_singleEventNotificationConfig(topic, registryName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "google_cloudiot_registry.foobar", "event_notification_configs.#", "1"), - ), + Config: testAccCloudIoTRegistry_singleEventNotificationConfigs(topic, registryName), }, { ResourceName: "google_cloudiot_registry.foobar", ImportState: true, ImportStateVerify: true, }, - { - // Use new field (event_notification_configs) to see if plan changed - Config: testAccCloudIoTRegistry_pluralEventNotificationConfigs(topic, registryName), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, }, }) } -func TestAccCloudIoTRegistry_eventNotificationConfigMultiple(t *testing.T) { +func TestAccCloudIoTRegistry_eventNotificationConfigsMultiple(t *testing.T) { t.Parallel() registryName := fmt.Sprintf("tf-registry-test-%s", acctest.RandString(10)) @@ -173,40 +162,6 @@ func TestAccCloudIoTRegistry_eventNotificationConfigMultiple(t *testing.T) { }) } -func TestAccCloudIoTRegistry_eventNotificationConfigPluralToDeprecatedSingle(t *testing.T) { - t.Parallel() - - registryName := fmt.Sprintf("tf-registry-test-%s", acctest.RandString(10)) - topic := fmt.Sprintf("tf-registry-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCloudIoTRegistryDestroy, - Steps: []resource.TestStep{ - { - // Use new field (event_notification_configs) to create - Config: testAccCloudIoTRegistry_pluralEventNotificationConfigs(topic, registryName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "google_cloudiot_registry.foobar", "event_notification_configs.#", "1"), - ), - }, - { - ResourceName: "google_cloudiot_registry.foobar", - ImportState: true, - ImportStateVerify: true, - }, - { - // Use old field (event_notification_config) to see if plan changed - Config: testAccCloudIoTRegistry_singleEventNotificationConfig(topic, registryName), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - func testAccCheckCloudIoTRegistryDestroy(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "google_cloudiot_registry" { @@ -276,37 +231,14 @@ resource "google_cloudiot_registry" "foobar" { `, acctest.RandString(10), acctest.RandString(10), registryName) } -func testAccCloudIoTRegistry_singleEventNotificationConfig(topic, registryName string) string { - return fmt.Sprintf(` -resource "google_project_iam_binding" "cloud-iot-iam-binding" { - members = ["serviceAccount:cloud-iot@system.gserviceaccount.com"] - role = "roles/pubsub.publisher" -} - -resource "google_pubsub_topic" "event-topic" { - name = "%s" -} - -resource "google_cloudiot_registry" "foobar" { - depends_on = ["google_project_iam_binding.cloud-iot-iam-binding"] - - name = "%s" - - event_notification_config = { - pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" - } -} -`, topic, registryName) -} - -func testAccCloudIoTRegistry_pluralEventNotificationConfigs(topic, registryName string) string { +func testAccCloudIoTRegistry_singleEventNotificationConfigs(topic, registryName string) string { return fmt.Sprintf(` resource "google_project_iam_binding" "cloud-iot-iam-binding" { members = ["serviceAccount:cloud-iot@system.gserviceaccount.com"] role = "roles/pubsub.publisher" } -resource "google_pubsub_topic" "event-topic" { +resource "google_pubsub_topic" "event-topic-1" { name = "%s" } @@ -315,8 +247,9 @@ resource "google_cloudiot_registry" "foobar" { name = "%s" - event_notification_config = { - pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" + event_notification_configs { + pubsub_topic_name = "${google_pubsub_topic.event-topic-1.id}" + subfolder_matches = "" } } `, topic, registryName) diff --git a/google/resource_composer_environment.go b/google/resource_composer_environment.go index f370cd1b991..cb55e18e30e 100644 --- a/google/resource_composer_environment.go +++ b/google/resource_composer_environment.go @@ -35,6 +35,14 @@ var composerEnvironmentReservedEnvVar = map[string]struct{}{ "SQL_USER": {}, } +var composerSoftwareConfigKeys = []string{ + "config.0.software_config.0.airflow_config_overrides", + "config.0.software_config.0.pypi_packages", + "config.0.software_config.0.env_variables", + "config.0.software_config.0.image_version", + "config.0.software_config.0.python_version", +} + func resourceComposerEnvironment() *schema.Resource { return &schema.Resource{ Create: resourceComposerEnvironmentCreate, @@ -161,8 +169,7 @@ func resourceComposerEnvironment() *schema.Resource { Schema: map[string]*schema.Schema{ "use_ip_aliases": { Type: schema.TypeBool, - Optional: true, - Default: true, + Required: true, ForceNew: true, }, "cluster_secondary_range_name": { @@ -205,19 +212,22 @@ func resourceComposerEnvironment() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "airflow_config_overrides": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, + Elem: &schema.Schema{Type: schema.TypeString}, }, "pypi_packages": { Type: schema.TypeMap, Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, Elem: &schema.Schema{Type: schema.TypeString}, ValidateFunc: validateComposerEnvironmentPypiPackages, }, "env_variables": { Type: schema.TypeMap, Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, Elem: &schema.Schema{Type: schema.TypeString}, ValidateFunc: validateComposerEnvironmentEnvVariables, }, @@ -225,14 +235,16 @@ func resourceComposerEnvironment() *schema.Resource { Type: schema.TypeString, Computed: true, Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, ValidateFunc: validateRegexp(composerEnvironmentVersionRegexp), DiffSuppressFunc: composerImageVersionDiffSuppress, }, "python_version": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, + Computed: true, + ForceNew: true, }, }, }, @@ -247,9 +259,8 @@ func resourceComposerEnvironment() *schema.Resource { Schema: map[string]*schema.Schema{ "enable_private_endpoint": { Type: schema.TypeBool, - Optional: true, + Required: true, ForceNew: true, - Default: true, }, "master_ipv4_cidr_block": { Type: schema.TypeString, @@ -313,7 +324,7 @@ func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -574,7 +585,7 @@ func resourceComposerEnvironmentImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_composer_environment_test.go b/google/resource_composer_environment_test.go index 8078d73317e..333e99bc86d 100644 --- a/google/resource_composer_environment_test.go +++ b/google/resource_composer_environment_test.go @@ -283,13 +283,13 @@ func testAccComposerEnvironmentDestroy(s *terraform.State) error { } idTokens := strings.Split(rs.Primary.ID, "/") - if len(idTokens) != 3 { - return fmt.Errorf("Invalid ID %q, expected format {project}/{region}/{environment}", rs.Primary.ID) + if len(idTokens) != 6 { + return fmt.Errorf("Invalid ID %q, expected format projects/{project}/regions/{region}/environments/{environment}", rs.Primary.ID) } envName := &composerEnvironmentName{ - Project: idTokens[0], - Region: idTokens[1], - Environment: idTokens[2], + Project: idTokens[1], + Region: idTokens[3], + Environment: idTokens[5], } _, err := config.clientComposer.Projects.Locations.Environments.Get(envName.resourceName()).Do() diff --git a/google/resource_compute_address.go b/google/resource_compute_address.go index 0084026ffa5..c659fcf6915 100644 --- a/google/resource_compute_address.go +++ b/google/resource_compute_address.go @@ -216,7 +216,7 @@ func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) erro } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/addresses/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -352,7 +352,7 @@ func resourceComputeAddressImport(d *schema.ResourceData, meta interface{}) ([]* } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/addresses/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_address_generated_test.go b/google/resource_compute_address_generated_test.go index de5e22047ad..d555f3d8a70 100644 --- a/google/resource_compute_address_generated_test.go +++ b/google/resource_compute_address_generated_test.go @@ -90,12 +90,12 @@ resource "google_compute_subnetwork" "default" { name = "my-subnet%{random_suffix}" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } resource "google_compute_address" "internal_with_subnet_and_address" { name = "my-internal-address%{random_suffix}" - subnetwork = "${google_compute_subnetwork.default.self_link}" + subnetwork = google_compute_subnetwork.default.self_link address_type = "INTERNAL" address = "10.0.42.42" region = "us-central1" @@ -168,27 +168,27 @@ resource "google_compute_address" "static" { } data "google_compute_image" "debian_image" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } resource "google_compute_instance" "instance_with_ip" { - name = "vm-instance%{random_suffix}" - machine_type = "f1-micro" - zone = "us-central1-a" - - boot_disk { - initialize_params{ - image = "${data.google_compute_image.debian_image.self_link}" - } - } - - network_interface { - network = "default" - access_config { - nat_ip = "${google_compute_address.static.address}" - } - } + name = "vm-instance%{random_suffix}" + machine_type = "f1-micro" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.debian_image.self_link + } + } + + network_interface { + network = "default" + access_config { + nat_ip = google_compute_address.static.address + } + } } `, context) } diff --git a/google/resource_compute_attached_disk.go b/google/resource_compute_attached_disk.go index bbc45e6e213..aa80d24f15e 100644 --- a/google/resource_compute_attached_disk.go +++ b/google/resource_compute_attached_disk.go @@ -100,7 +100,7 @@ func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error return err } - d.SetId(fmt.Sprintf("%s:%s", zv.Name, diskName)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s/%s", zv.Project, zv.Zone, zv.Name, diskName)) waitErr := computeSharedOperationWaitTime(config.clientCompute, op, zv.Project, int(d.Timeout(schema.TimeoutCreate).Minutes()), "disk to attach") @@ -196,22 +196,17 @@ func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*sc config := meta.(*Config) err := parseImportId( - []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/[^/]+", - "(?P[^/]+)/(?P[^/]+)/[^/]+"}, d, config) + []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) if err != nil { return nil, err } - // In all acceptable id formats the actual id will be the last in the path - id := GetResourceNameFromSelfLink(d.Id()) - d.SetId(id) - - IDParts := strings.Split(d.Id(), ":") - if len(IDParts) != 2 { - return nil, fmt.Errorf("unable to determine attached disk id - id should be '{google_compute_instance.name}:{google_compute_disk.name}'") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{instance}}/{{disk}}") + if err != nil { + return nil, err } - d.Set("instance", IDParts[0]) - d.Set("disk", IDParts[1]) + d.SetId(id) return []*schema.ResourceData{d}, nil } diff --git a/google/resource_compute_attached_disk_test.go b/google/resource_compute_attached_disk_test.go index e412157a14d..717d22190a0 100644 --- a/google/resource_compute_attached_disk_test.go +++ b/google/resource_compute_attached_disk_test.go @@ -14,7 +14,7 @@ func TestAccComputeAttachedDisk_basic(t *testing.T) { diskName := acctest.RandomWithPrefix("tf-test-disk") instanceName := acctest.RandomWithPrefix("tf-test-inst") - importID := fmt.Sprintf("%s/us-central1-a/%s:%s", getTestProjectFromEnv(), instanceName, diskName) + importID := fmt.Sprintf("%s/us-central1-a/%s/%s", getTestProjectFromEnv(), instanceName, diskName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -46,7 +46,7 @@ func TestAccComputeAttachedDisk_full(t *testing.T) { diskName := acctest.RandomWithPrefix("tf-test") instanceName := acctest.RandomWithPrefix("tf-test") - importID := fmt.Sprintf("%s/us-central1-a/%s:%s", getTestProjectFromEnv(), instanceName, diskName) + importID := fmt.Sprintf("%s/us-central1-a/%s/%s", getTestProjectFromEnv(), instanceName, diskName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -73,7 +73,7 @@ func TestAccComputeAttachedDisk_region(t *testing.T) { diskName := acctest.RandomWithPrefix("tf-test") instanceName := acctest.RandomWithPrefix("tf-test") - importID := fmt.Sprintf("%s/us-central1-a/%s:%s", getTestProjectFromEnv(), instanceName, diskName) + importID := fmt.Sprintf("%s/us-central1-a/%s/%s", getTestProjectFromEnv(), instanceName, diskName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/google/resource_compute_autoscaler.go b/google/resource_compute_autoscaler.go index 76c778787a0..acf3cd80519 100644 --- a/google/resource_compute_autoscaler.go +++ b/google/resource_compute_autoscaler.go @@ -277,7 +277,7 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e } // Store the ID now - id, err := replaceVars(d, config, "{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -470,7 +470,7 @@ func resourceComputeAutoscalerImport(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_autoscaler_generated_test.go b/google/resource_compute_autoscaler_generated_test.go index 240bb03dc4a..1711099f4c9 100644 --- a/google/resource_compute_autoscaler_generated_test.go +++ b/google/resource_compute_autoscaler_generated_test.go @@ -53,7 +53,7 @@ func testAccComputeAutoscaler_autoscalerBasicExample(context map[string]interfac resource "google_compute_autoscaler" "foobar" { name = "my-autoscaler%{random_suffix}" zone = "us-central1-f" - target = "${google_compute_instance_group_manager.foobar.self_link}" + target = google_compute_instance_group_manager.foobar.self_link autoscaling_policy { max_replicas = 5 @@ -74,7 +74,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -99,17 +99,17 @@ resource "google_compute_instance_group_manager" "foobar" { zone = "us-central1-f" version { - instance_template = "${google_compute_instance_template.foobar.self_link}" + instance_template = google_compute_instance_template.foobar.self_link name = "primary" } - target_pools = ["${google_compute_target_pool.foobar.self_link}"] + target_pools = [google_compute_target_pool.foobar.self_link] base_instance_name = "foobar" } data "google_compute_image" "debian_9" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } `, context) } diff --git a/google/resource_compute_backend_bucket.go b/google/resource_compute_backend_bucket.go index 28dd7e1bfe1..a11fc3c6c8b 100644 --- a/google/resource_compute_backend_bucket.go +++ b/google/resource_compute_backend_bucket.go @@ -71,16 +71,15 @@ last character, which cannot be a dash.`, Schema: map[string]*schema.Schema{ "signed_url_cache_max_age_sec": { Type: schema.TypeInt, - Optional: true, + Required: true, Description: `Maximum number of seconds the response to a signed URL request will -be considered fresh. Defaults to 1hr (3600s). After this time period, +be considered fresh. After this time period, the response will be revalidated before being served. When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.`, - Default: 3600, }, }, }, @@ -166,7 +165,7 @@ func resourceComputeBackendBucketCreate(d *schema.ResourceData, meta interface{} } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -358,7 +357,7 @@ func resourceComputeBackendBucketImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_backend_bucket_generated_test.go b/google/resource_compute_backend_bucket_generated_test.go index d819c77aaa2..496911ecda0 100644 --- a/google/resource_compute_backend_bucket_generated_test.go +++ b/google/resource_compute_backend_bucket_generated_test.go @@ -53,7 +53,7 @@ func testAccComputeBackendBucket_backendBucketBasicExample(context map[string]in resource "google_compute_backend_bucket" "image_backend" { name = "image-backend-bucket%{random_suffix}" description = "Contains beautiful images" - bucket_name = "${google_storage_bucket.image_bucket.name}" + bucket_name = google_storage_bucket.image_bucket.name enable_cdn = true } diff --git a/google/resource_compute_backend_bucket_signed_url_key.go b/google/resource_compute_backend_bucket_signed_url_key.go index 242784c86a1..4a79c6e10bc 100644 --- a/google/resource_compute_backend_bucket_signed_url_key.go +++ b/google/resource_compute_backend_bucket_signed_url_key.go @@ -114,7 +114,7 @@ func resourceComputeBackendBucketSignedUrlKeyCreate(d *schema.ResourceData, meta } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{backend_bucket}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_backend_bucket_signed_url_key_test.go b/google/resource_compute_backend_bucket_signed_url_key_test.go index 5d0f1db0b98..4c5944ed508 100644 --- a/google/resource_compute_backend_bucket_signed_url_key_test.go +++ b/google/resource_compute_backend_bucket_signed_url_key_test.go @@ -85,7 +85,7 @@ func checkComputeBackendBucketSignedUrlKeyExists(s *terraform.State) (bool, erro } config := testAccProvider.Meta().(*Config) - keyName := rs.Primary.ID + keyName := rs.Primary.Attributes["name"] url, err := replaceVarsForTest(config, rs, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}") if err != nil { diff --git a/google/resource_compute_backend_service.go b/google/resource_compute_backend_service.go index eb7ca8e4051..6393b8d81be 100644 --- a/google/resource_compute_backend_service.go +++ b/google/resource_compute_backend_service.go @@ -222,14 +222,16 @@ When the load balancing scheme is INTERNAL, this field is not used.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "include_host": { - Type: schema.TypeBool, - Optional: true, - Description: `If true requests to different hosts will be cached separately.`, + Type: schema.TypeBool, + Optional: true, + Description: `If true requests to different hosts will be cached separately.`, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, }, "include_protocol": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, http and https requests will be cached separately.`, + Type: schema.TypeBool, + Optional: true, + Description: `If true, http and https requests will be cached separately.`, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, }, "include_query_string": { Type: schema.TypeBool, @@ -241,6 +243,7 @@ string will be included. If false, the query string will be excluded from the cache key entirely.`, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, }, "query_string_blacklist": { Type: schema.TypeSet, @@ -254,7 +257,8 @@ delimiters.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - Set: schema.HashString, + Set: schema.HashString, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, }, "query_string_whitelist": { Type: schema.TypeSet, @@ -268,10 +272,12 @@ delimiters.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - Set: schema.HashString, + Set: schema.HashString, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, }, }, }, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy", "cdn_policy.0.signed_url_cache_max_age_sec"}, }, "signed_url_cache_max_age_sec": { Type: schema.TypeInt, @@ -286,7 +292,8 @@ internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.`, - Default: 3600, + Default: 3600, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy", "cdn_policy.0.signed_url_cache_max_age_sec"}, }, }, }, @@ -414,6 +421,28 @@ object. This field is used in optimistic locking.`, func computeBackendServiceBackendSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + Description: `The fully-qualified URL of an Instance Group or Network Endpoint +Group resource. In case of instance group this defines the list +of instances that serve traffic. Member virtual machine +instances from each instance group must live in the same zone as +the instance group itself. No two backends in a backend service +are allowed to use same Instance Group resource. + +For Network Endpoint Groups this defines list of endpoints. All +endpoints of Network Endpoint Group must be hosted on instances +located in the same zone as the Network Endpoint Group. + +Backend services cannot mix Instance Group and +Network Endpoint Group backends. + +Note that you must specify an Instance Group or Network Endpoint +Group resource using the fully-qualified URL, rather than a +partial URL.`, + }, "balancing_mode": { Type: schema.TypeString, Optional: true, @@ -442,28 +471,6 @@ setting of 0 means the group is completely drained, offering Optional: true, Description: `An optional description of this resource. Provide this property when you create the resource.`, - }, - "group": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The fully-qualified URL of an Instance Group or Network Endpoint -Group resource. In case of instance group this defines the list -of instances that serve traffic. Member virtual machine -instances from each instance group must live in the same zone as -the instance group itself. No two backends in a backend service -are allowed to use same Instance Group resource. - -For Network Endpoint Groups this defines list of endpoints. All -endpoints of Network Endpoint Group must be hosted on instances -located in the same zone as the Network Endpoint Group. - -Backend services cannot mix Instance Group and -Network Endpoint Group backends. - -Note that you must specify an Instance Group or Network Endpoint -Group resource using the fully-qualified URL, rather than a -partial URL.`, }, "max_connections": { Type: schema.TypeInt, @@ -657,7 +664,7 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1009,7 +1016,7 @@ func resourceComputeBackendServiceImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_backend_service_generated_test.go b/google/resource_compute_backend_service_generated_test.go index b4d9a73e823..2bcf1a1965e 100644 --- a/google/resource_compute_backend_service_generated_test.go +++ b/google/resource_compute_backend_service_generated_test.go @@ -52,7 +52,7 @@ func testAccComputeBackendService_backendServiceBasicExample(context map[string] return Nprintf(` resource "google_compute_backend_service" "default" { name = "backend-service%{random_suffix}" - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/google/resource_compute_backend_service_signed_url_key.go b/google/resource_compute_backend_service_signed_url_key.go index 5c45c504e9d..ed5dda551f6 100644 --- a/google/resource_compute_backend_service_signed_url_key.go +++ b/google/resource_compute_backend_service_signed_url_key.go @@ -114,7 +114,7 @@ func resourceComputeBackendServiceSignedUrlKeyCreate(d *schema.ResourceData, met } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{backend_service}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_backend_service_signed_url_key_test.go b/google/resource_compute_backend_service_signed_url_key_test.go index baad22edcd6..ba546e6b036 100644 --- a/google/resource_compute_backend_service_signed_url_key_test.go +++ b/google/resource_compute_backend_service_signed_url_key_test.go @@ -85,7 +85,7 @@ func checkComputeBackendServiceSignedUrlKeyExists(s *terraform.State) (bool, err } config := testAccProvider.Meta().(*Config) - keyName := rs.Primary.ID + keyName := rs.Primary.Attributes["name"] url, err := replaceVarsForTest(config, rs, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}") if err != nil { diff --git a/google/resource_compute_backend_service_test.go b/google/resource_compute_backend_service_test.go index b7e97b4f2b8..156d377797d 100644 --- a/google/resource_compute_backend_service_test.go +++ b/google/resource_compute_backend_service_test.go @@ -927,7 +927,9 @@ resource "google_compute_instance" "endpoint-instance" { network_interface { subnetwork = "${google_compute_subnetwork.default.self_link}" - access_config { } + access_config { + network_tier = "PREMIUM" + } } } @@ -1003,7 +1005,9 @@ resource "google_compute_instance" "endpoint-instance" { network_interface { subnetwork = "${google_compute_subnetwork.default.self_link}" - access_config { } + access_config { + network_tier = "PREMIUM" + } } } diff --git a/google/resource_compute_disk.go b/google/resource_compute_disk.go index 44c27e7c7e1..cbbc481c74d 100644 --- a/google/resource_compute_disk.go +++ b/google/resource_compute_disk.go @@ -506,19 +506,6 @@ project/zones/zone/instances/instance`, DiffSuppressFunc: compareSelfLinkOrResourceName, }, }, - "disk_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - Removed: "Use disk_encryption_key.raw_key instead.", - }, - - "disk_encryption_key_sha256": { - Type: schema.TypeString, - Computed: true, - Removed: "Use disk_encryption_key.sha256 instead.", - }, "project": { Type: schema.TypeString, Optional: true, @@ -637,7 +624,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -959,7 +946,7 @@ func resourceComputeDiskImport(d *schema.ResourceData, meta interface{}) ([]*sch } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_disk_test.go b/google/resource_compute_disk_test.go index 7a84b2ee8a7..5b86f891e90 100644 --- a/google/resource_compute_disk_test.go +++ b/google/resource_compute_disk_test.go @@ -442,12 +442,12 @@ func testAccCheckComputeDiskExists(n, p string, disk *compute.Disk) resource.Tes config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Disks.Get( - p, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Disk not found") } diff --git a/google/resource_compute_firewall.go b/google/resource_compute_firewall.go index 7cdf838d30c..d4d08bd1079 100644 --- a/google/resource_compute_firewall.go +++ b/google/resource_compute_firewall.go @@ -102,18 +102,18 @@ character, which cannot be a dash.`, Description: `The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.`, - Elem: computeFirewallAllowSchema(), - Set: resourceComputeFirewallRuleHash, - ConflictsWith: []string{"deny"}, + Elem: computeFirewallAllowSchema(), + Set: resourceComputeFirewallRuleHash, + ExactlyOneOf: []string{"allow", "deny"}, }, "deny": { Type: schema.TypeSet, Optional: true, Description: `The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a denied connection.`, - Elem: computeFirewallDenySchema(), - Set: resourceComputeFirewallRuleHash, - ConflictsWith: []string{"allow"}, + Elem: computeFirewallDenySchema(), + Set: resourceComputeFirewallRuleHash, + ExactlyOneOf: []string{"allow", "deny"}, }, "description": { Type: schema.TypeString, @@ -435,7 +435,7 @@ func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) err } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/firewalls/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -696,7 +696,7 @@ func resourceComputeFirewallImport(d *schema.ResourceData, meta interface{}) ([] } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/firewalls/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_firewall_generated_test.go b/google/resource_compute_firewall_generated_test.go index e1075ff7d4c..60edb27bec1 100644 --- a/google/resource_compute_firewall_generated_test.go +++ b/google/resource_compute_firewall_generated_test.go @@ -52,7 +52,7 @@ func testAccComputeFirewall_firewallBasicExample(context map[string]interface{}) return Nprintf(` resource "google_compute_firewall" "default" { name = "test-firewall%{random_suffix}" - network = "${google_compute_network.default.name}" + network = google_compute_network.default.name allow { protocol = "icmp" diff --git a/google/resource_compute_forwarding_rule.go b/google/resource_compute_forwarding_rule.go index 354f822f9b7..ac12e1e30da 100644 --- a/google/resource_compute_forwarding_rule.go +++ b/google/resource_compute_forwarding_rule.go @@ -57,10 +57,11 @@ characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.`, }, "ip_address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validateIpAddress, Description: `The IP address that this forwarding rule is serving on behalf of. Addresses are restricted based on the forwarding rule's load balancing @@ -80,15 +81,11 @@ forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule. -~> **NOTE** The address should be specified as a literal IP address, -e.g. '100.1.2.3' to avoid a permanent diff, as the server returns the -IP address regardless of the input value. - -The server accepts a literal IP address or a URL reference to an existing -Address resource. The following examples are all valid but only the first -will prevent a permadiff. If you are using 'google_compute_address' or -similar, interpolate using '.address' instead of '.self_link' or similar -to prevent a diff on re-apply.`, +An address must be specified by a literal IP address. ~> **NOTE**: While +the API allows you to specify various resource paths for an address resource +instead, Terraform requires this to specifically be an IP address to +avoid needing to fetching the IP address from resource paths on refresh +or unnecessary diffs.`, }, "ip_protocol": { Type: schema.TypeString, @@ -128,14 +125,6 @@ for INTERNAL load balancing.`, Description: `An optional description of this resource. Provide this property when you create the resource.`, }, - "ip_version": { - Type: schema.TypeString, - Optional: true, - Deprecated: "ipVersion is not used for regional forwarding rules. Please remove this field if you are using it.", - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"IPV4", "IPV6", ""}, false), - Description: `ipVersion is not a valid field for regional forwarding rules.`, - }, "load_balancing_scheme": { Type: schema.TypeString, Optional: true, @@ -276,6 +265,11 @@ object.`, Description: `The internal fully qualified service name for this Forwarding Rule. This field is only used for INTERNAL load balancing.`, }, + "ip_version": { + Type: schema.TypeString, + Optional: true, + Removed: "ipVersion is not used for regional forwarding rules. Please remove this field if you are using it.", + }, "project": { Type: schema.TypeString, Optional: true, @@ -318,12 +312,6 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(reflect.ValueOf(backendServiceProp)) && (ok || !reflect.DeepEqual(v, backendServiceProp)) { obj["backendService"] = backendServiceProp } - ipVersionProp, err := expandComputeForwardingRuleIpVersion(d.Get("ip_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_version"); !isEmptyValue(reflect.ValueOf(ipVersionProp)) && (ok || !reflect.DeepEqual(v, ipVersionProp)) { - obj["ipVersion"] = ipVersionProp - } loadBalancingSchemeProp, err := expandComputeForwardingRuleLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err @@ -407,7 +395,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -470,9 +458,6 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) if err := d.Set("backend_service", flattenComputeForwardingRuleBackendService(res["backendService"], d)); err != nil { return fmt.Errorf("Error reading ForwardingRule: %s", err) } - if err := d.Set("ip_version", flattenComputeForwardingRuleIpVersion(res["ipVersion"], d)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) - } if err := d.Set("load_balancing_scheme", flattenComputeForwardingRuleLoadBalancingScheme(res["loadBalancingScheme"], d)); err != nil { return fmt.Errorf("Error reading ForwardingRule: %s", err) } @@ -618,7 +603,7 @@ func resourceComputeForwardingRuleImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -650,10 +635,6 @@ func flattenComputeForwardingRuleBackendService(v interface{}, d *schema.Resourc return ConvertSelfLinkToV1(v.(string)) } -func flattenComputeForwardingRuleIpVersion(v interface{}, d *schema.ResourceData) interface{} { - return v -} - func flattenComputeForwardingRuleLoadBalancingScheme(v interface{}, d *schema.ResourceData) interface{} { return v } @@ -762,10 +743,6 @@ func expandComputeForwardingRuleBackendService(v interface{}, d TerraformResourc return url + v.(string), nil } -func expandComputeForwardingRuleIpVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - func expandComputeForwardingRuleLoadBalancingScheme(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/google/resource_compute_forwarding_rule_generated_test.go b/google/resource_compute_forwarding_rule_generated_test.go index dacf54d6099..5370a9f9cc4 100644 --- a/google/resource_compute_forwarding_rule_generated_test.go +++ b/google/resource_compute_forwarding_rule_generated_test.go @@ -52,7 +52,7 @@ func testAccComputeForwardingRule_forwardingRuleBasicExample(context map[string] return Nprintf(` resource "google_compute_forwarding_rule" "default" { name = "website-forwarding-rule%{random_suffix}" - target = "${google_compute_target_pool.default.self_link}" + target = google_compute_target_pool.default.self_link port_range = "80" } @@ -90,20 +90,20 @@ func testAccComputeForwardingRule_forwardingRuleInternallbExample(context map[st return Nprintf(` // Forwarding rule for Internal Load Balancing resource "google_compute_forwarding_rule" "default" { - name = "website-forwarding-rule%{random_suffix}" - region = "us-central1" + name = "website-forwarding-rule%{random_suffix}" + region = "us-central1" load_balancing_scheme = "INTERNAL" - backend_service = "${google_compute_region_backend_service.backend.self_link}" + backend_service = google_compute_region_backend_service.backend.self_link all_ports = true - network = "${google_compute_network.default.name}" - subnetwork = "${google_compute_subnetwork.default.name}" + network = google_compute_network.default.name + subnetwork = google_compute_subnetwork.default.name } resource "google_compute_region_backend_service" "backend" { - name = "website-backend%{random_suffix}" - region = "us-central1" - health_checks = ["${google_compute_health_check.hc.self_link}"] + name = "website-backend%{random_suffix}" + region = "us-central1" + health_checks = [google_compute_health_check.hc.self_link] } resource "google_compute_health_check" "hc" { @@ -117,7 +117,7 @@ resource "google_compute_health_check" "hc" { } resource "google_compute_network" "default" { - name = "website-net%{random_suffix}" + name = "website-net%{random_suffix}" auto_create_subnetworks = false } @@ -125,7 +125,7 @@ resource "google_compute_subnetwork" "default" { name = "website-net%{random_suffix}" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } `, context) } diff --git a/google/resource_compute_global_address.go b/google/resource_compute_global_address.go index 6021f6dd5dc..0413046dbd5 100644 --- a/google/resource_compute_global_address.go +++ b/google/resource_compute_global_address.go @@ -209,7 +209,7 @@ func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{} } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/addresses/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -341,7 +341,7 @@ func resourceComputeGlobalAddressImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/addresses/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_global_forwarding_rule.go b/google/resource_compute_global_forwarding_rule.go index ee947f78e21..47744002f9f 100644 --- a/google/resource_compute_global_forwarding_rule.go +++ b/google/resource_compute_global_forwarding_rule.go @@ -65,10 +65,11 @@ For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are valid.`, }, "ip_address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validateIpAddress, Description: `The IP address that this forwarding rule is serving on behalf of. Addresses are restricted based on the forwarding rule's load balancing @@ -88,15 +89,11 @@ forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule. -~> **NOTE** The address should be specified as a literal IP address, -e.g. '100.1.2.3' to avoid a permanent diff, as the server returns the -IP address regardless of the input value. - -The server accepts a literal IP address or a URL reference to an existing -Address resource. The following examples are all valid but only the first -will prevent a permadiff. If you are using 'google_compute_address' or -similar, interpolate using '.address' instead of '.self_link' or similar -to prevent a diff on re-apply.`, +An address must be specified by a literal IP address. ~> **NOTE**: While +the API allows you to specify various resource paths for an address resource +instead, Terraform requires this to specifically be an IP address to +avoid needing to fetching the IP address from resource paths on refresh +or unnecessary diffs.`, }, "ip_protocol": { Type: schema.TypeString, @@ -321,7 +318,7 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -504,7 +501,7 @@ func resourceComputeGlobalForwardingRuleImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_global_forwarding_rule_generated_test.go b/google/resource_compute_global_forwarding_rule_generated_test.go index 0087723e63c..dd4129639e0 100644 --- a/google/resource_compute_global_forwarding_rule_generated_test.go +++ b/google/resource_compute_global_forwarding_rule_generated_test.go @@ -52,20 +52,20 @@ func testAccComputeGlobalForwardingRule_globalForwardingRuleHttpExample(context return Nprintf(` resource "google_compute_global_forwarding_rule" "default" { name = "global-rule%{random_suffix}" - target = "${google_compute_target_http_proxy.default.self_link}" + target = google_compute_target_http_proxy.default.self_link port_range = "80" } resource "google_compute_target_http_proxy" "default" { name = "target-proxy%{random_suffix}" description = "a description" - url_map = "${google_compute_url_map.default.self_link}" + url_map = google_compute_url_map.default.self_link } resource "google_compute_url_map" "default" { name = "url-map-target-proxy%{random_suffix}" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -74,11 +74,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -89,7 +89,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/google/resource_compute_health_check.go b/google/resource_compute_health_check.go index f803fa78fef..0a98919b385 100644 --- a/google/resource_compute_health_check.go +++ b/google/resource_compute_health_check.go @@ -175,18 +175,21 @@ consecutive successes. The default value is 2.`, Description: `The value of the host header in the HTTP2 health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.`, + AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, }, "port": { Type: schema.TypeInt, Optional: true, Description: `The TCP port number for the HTTP2 health check request. The default value is 443.`, + AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, }, "port_name": { Type: schema.TypeString, Optional: true, Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.`, + AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, }, "port_specification": { Type: schema.TypeString, @@ -206,6 +209,7 @@ following values: If not specified, HTTP2 health check follows behavior specified in 'port' and 'portName' fields.`, + AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, }, "proxy_header": { Type: schema.TypeString, @@ -213,14 +217,16 @@ If not specified, HTTP2 health check follows behavior specified in 'port' and ValidateFunc: validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), Description: `Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.`, - Default: "NONE", + Default: "NONE", + AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, }, "request_path": { Type: schema.TypeString, Optional: true, Description: `The request path of the HTTP2 health check request. The default value is /.`, - Default: "/", + Default: "/", + AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, }, "response": { Type: schema.TypeString, @@ -228,10 +234,11 @@ The default value is /.`, Description: `The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.`, + AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, }, }, }, - ConflictsWith: []string{"http_health_check", "https_health_check", "tcp_health_check", "ssl_health_check"}, + ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check"}, }, "http_health_check": { Type: schema.TypeList, @@ -247,18 +254,21 @@ can only be ASCII.`, Description: `The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.`, + AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, }, "port": { Type: schema.TypeInt, Optional: true, Description: `The TCP port number for the HTTP health check request. The default value is 80.`, + AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, }, "port_name": { Type: schema.TypeString, Optional: true, Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.`, + AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, }, "port_specification": { Type: schema.TypeString, @@ -278,6 +288,7 @@ following values: If not specified, HTTP health check follows behavior specified in 'port' and 'portName' fields.`, + AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, }, "proxy_header": { Type: schema.TypeString, @@ -285,14 +296,16 @@ If not specified, HTTP health check follows behavior specified in 'port' and ValidateFunc: validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), Description: `Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.`, - Default: "NONE", + Default: "NONE", + AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, }, "request_path": { Type: schema.TypeString, Optional: true, Description: `The request path of the HTTP health check request. The default value is /.`, - Default: "/", + Default: "/", + AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, }, "response": { Type: schema.TypeString, @@ -300,10 +313,11 @@ The default value is /.`, Description: `The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.`, + AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, }, }, }, - ConflictsWith: []string{"https_health_check", "tcp_health_check", "ssl_health_check", "http2_health_check"}, + ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check"}, }, "https_health_check": { Type: schema.TypeList, @@ -319,18 +333,21 @@ can only be ASCII.`, Description: `The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.`, + AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, }, "port": { Type: schema.TypeInt, Optional: true, Description: `The TCP port number for the HTTPS health check request. The default value is 443.`, + AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, }, "port_name": { Type: schema.TypeString, Optional: true, Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.`, + AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, }, "port_specification": { Type: schema.TypeString, @@ -350,6 +367,7 @@ following values: If not specified, HTTPS health check follows behavior specified in 'port' and 'portName' fields.`, + AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, }, "proxy_header": { Type: schema.TypeString, @@ -357,14 +375,16 @@ If not specified, HTTPS health check follows behavior specified in 'port' and ValidateFunc: validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), Description: `Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.`, - Default: "NONE", + Default: "NONE", + AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, }, "request_path": { Type: schema.TypeString, Optional: true, Description: `The request path of the HTTPS health check request. The default value is /.`, - Default: "/", + Default: "/", + AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, }, "response": { Type: schema.TypeString, @@ -372,10 +392,11 @@ The default value is /.`, Description: `The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.`, + AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, }, }, }, - ConflictsWith: []string{"http_health_check", "tcp_health_check", "ssl_health_check", "http2_health_check"}, + ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check"}, }, "ssl_health_check": { Type: schema.TypeList, @@ -390,12 +411,14 @@ can only be ASCII.`, Optional: true, Description: `The TCP port number for the SSL health check request. The default value is 443.`, + AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, }, "port_name": { Type: schema.TypeString, Optional: true, Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.`, + AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, }, "port_specification": { Type: schema.TypeString, @@ -415,6 +438,7 @@ following values: If not specified, SSL health check follows behavior specified in 'port' and 'portName' fields.`, + AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, }, "proxy_header": { Type: schema.TypeString, @@ -422,7 +446,8 @@ If not specified, SSL health check follows behavior specified in 'port' and ValidateFunc: validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), Description: `Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.`, - Default: "NONE", + Default: "NONE", + AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, }, "request": { Type: schema.TypeString, @@ -431,6 +456,7 @@ backend, either NONE or PROXY_V1. The default is NONE.`, established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.`, + AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, }, "response": { Type: schema.TypeString, @@ -438,10 +464,11 @@ data can only be ASCII.`, Description: `The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.`, + AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, }, }, }, - ConflictsWith: []string{"http_health_check", "https_health_check", "tcp_health_check", "http2_health_check"}, + ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check"}, }, "tcp_health_check": { Type: schema.TypeList, @@ -456,12 +483,14 @@ can only be ASCII.`, Optional: true, Description: `The TCP port number for the TCP health check request. The default value is 443.`, + AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, }, "port_name": { Type: schema.TypeString, Optional: true, Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.`, + AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, }, "port_specification": { Type: schema.TypeString, @@ -481,6 +510,7 @@ following values: If not specified, TCP health check follows behavior specified in 'port' and 'portName' fields.`, + AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, }, "proxy_header": { Type: schema.TypeString, @@ -488,7 +518,8 @@ If not specified, TCP health check follows behavior specified in 'port' and ValidateFunc: validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), Description: `Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.`, - Default: "NONE", + Default: "NONE", + AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, }, "request": { Type: schema.TypeString, @@ -497,6 +528,7 @@ backend, either NONE or PROXY_V1. The default is NONE.`, established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.`, + AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, }, "response": { Type: schema.TypeString, @@ -504,10 +536,11 @@ data can only be ASCII.`, Description: `The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.`, + AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, }, }, }, - ConflictsWith: []string{"http_health_check", "https_health_check", "ssl_health_check", "http2_health_check"}, + ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check"}, }, "timeout_sec": { Type: schema.TypeInt, @@ -640,7 +673,7 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/healthChecks/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -894,7 +927,7 @@ func resourceComputeHealthCheckImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/healthChecks/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_health_check_generated_test.go b/google/resource_compute_health_check_generated_test.go index 32e8743b630..eecd8f2368b 100644 --- a/google/resource_compute_health_check_generated_test.go +++ b/google/resource_compute_health_check_generated_test.go @@ -51,14 +51,14 @@ func TestAccComputeHealthCheck_healthCheckTcpExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckTcpExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "tcp-health-check" { - name = "tcp-health-check%{random_suffix}" + name = "tcp-health-check%{random_suffix}" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - tcp_health_check { - port = "80" - } + tcp_health_check { + port = "80" + } } `, context) } @@ -90,7 +90,7 @@ func TestAccComputeHealthCheck_healthCheckTcpFullExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckTcpFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "tcp-health-check" { - name = "tcp-health-check%{random_suffix}" + name = "tcp-health-check%{random_suffix}" description = "Health check via tcp" timeout_sec = 1 @@ -99,11 +99,11 @@ resource "google_compute_health_check" "tcp-health-check" { unhealthy_threshold = 5 tcp_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - request = "ARE YOU HEALTHY?" - proxy_header = "NONE" - response = "I AM HEALTHY" + request = "ARE YOU HEALTHY?" + proxy_header = "NONE" + response = "I AM HEALTHY" } } `, context) @@ -136,14 +136,14 @@ func TestAccComputeHealthCheck_healthCheckSslExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckSslExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "ssl-health-check" { - name = "ssl-health-check%{random_suffix}" + name = "ssl-health-check%{random_suffix}" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - ssl_health_check { - port = "443" - } + ssl_health_check { + port = "443" + } } `, context) } @@ -175,7 +175,7 @@ func TestAccComputeHealthCheck_healthCheckSslFullExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckSslFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "ssl-health-check" { - name = "ssl-health-check%{random_suffix}" + name = "ssl-health-check%{random_suffix}" description = "Health check via ssl" timeout_sec = 1 @@ -184,11 +184,11 @@ resource "google_compute_health_check" "ssl-health-check" { unhealthy_threshold = 5 ssl_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - request = "ARE YOU HEALTHY?" - proxy_header = "NONE" - response = "I AM HEALTHY" + request = "ARE YOU HEALTHY?" + proxy_header = "NONE" + response = "I AM HEALTHY" } } `, context) @@ -221,14 +221,14 @@ func TestAccComputeHealthCheck_healthCheckHttpExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckHttpExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "http-health-check" { - name = "http-health-check%{random_suffix}" + name = "http-health-check%{random_suffix}" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - http_health_check { - port = 80 - } + http_health_check { + port = 80 + } } `, context) } @@ -260,7 +260,7 @@ func TestAccComputeHealthCheck_healthCheckHttpFullExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckHttpFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "http-health-check" { - name = "http-health-check%{random_suffix}" + name = "http-health-check%{random_suffix}" description = "Health check via http" timeout_sec = 1 @@ -269,12 +269,12 @@ resource "google_compute_health_check" "http-health-check" { unhealthy_threshold = 5 http_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } `, context) @@ -307,14 +307,14 @@ func TestAccComputeHealthCheck_healthCheckHttpsExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckHttpsExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "https-health-check" { - name = "https-health-check%{random_suffix}" + name = "https-health-check%{random_suffix}" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - https_health_check { - port = "443" - } + https_health_check { + port = "443" + } } `, context) } @@ -346,7 +346,7 @@ func TestAccComputeHealthCheck_healthCheckHttpsFullExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckHttpsFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "https-health-check" { - name = "https-health-check%{random_suffix}" + name = "https-health-check%{random_suffix}" description = "Health check via https" timeout_sec = 1 @@ -355,12 +355,12 @@ resource "google_compute_health_check" "https-health-check" { unhealthy_threshold = 5 https_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } `, context) @@ -393,14 +393,14 @@ func TestAccComputeHealthCheck_healthCheckHttp2Example(t *testing.T) { func testAccComputeHealthCheck_healthCheckHttp2Example(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "http2-health-check" { - name = "http2-health-check%{random_suffix}" + name = "http2-health-check%{random_suffix}" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - http2_health_check { - port = "443" - } + http2_health_check { + port = "443" + } } `, context) } @@ -432,7 +432,7 @@ func TestAccComputeHealthCheck_healthCheckHttp2FullExample(t *testing.T) { func testAccComputeHealthCheck_healthCheckHttp2FullExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_health_check" "http2-health-check" { - name = "http2-health-check%{random_suffix}" + name = "http2-health-check%{random_suffix}" description = "Health check via http2" timeout_sec = 1 @@ -441,12 +441,12 @@ resource "google_compute_health_check" "http2-health-check" { unhealthy_threshold = 5 http2_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } `, context) diff --git a/google/resource_compute_health_check_test.go b/google/resource_compute_health_check_test.go index f91419cb960..6fa1f5f2cf0 100644 --- a/google/resource_compute_health_check_test.go +++ b/google/resource_compute_health_check_test.go @@ -164,6 +164,7 @@ resource "google_compute_health_check" "foobar" { timeout_sec = 2 unhealthy_threshold = 3 tcp_health_check { + port = 443 } } `, hckName) @@ -326,8 +327,10 @@ resource "google_compute_health_check" "foobar" { unhealthy_threshold = 3 tcp_health_check { + port = 443 } ssl_health_check { + port = 443 } } `, hckName) diff --git a/google/resource_compute_http_health_check.go b/google/resource_compute_http_health_check.go index 2e331dfcdae..47b7921bcf0 100644 --- a/google/resource_compute_http_health_check.go +++ b/google/resource_compute_http_health_check.go @@ -205,7 +205,7 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/httpHealthChecks/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -433,7 +433,7 @@ func resourceComputeHttpHealthCheckImport(d *schema.ResourceData, meta interface } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/httpHealthChecks/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_http_health_check_test.go b/google/resource_compute_http_health_check_test.go index d1af7d14c7c..cd18b78846c 100644 --- a/google/resource_compute_http_health_check_test.go +++ b/google/resource_compute_http_health_check_test.go @@ -55,19 +55,19 @@ func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.Htt return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No name is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.HttpHealthChecks.Get( - config.Project, rs.Primary.ID).Do() + config.Project, rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("HttpHealthCheck not found") } diff --git a/google/resource_compute_https_health_check.go b/google/resource_compute_https_health_check.go index a90603598e3..e9b97eccd82 100644 --- a/google/resource_compute_https_health_check.go +++ b/google/resource_compute_https_health_check.go @@ -205,7 +205,7 @@ func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interfac } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/httpsHealthChecks/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -433,7 +433,7 @@ func resourceComputeHttpsHealthCheckImport(d *schema.ResourceData, meta interfac } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/httpsHealthChecks/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_image.go b/google/resource_compute_image.go index f491a2be6bb..686092d1f3b 100644 --- a/google/resource_compute_image.go +++ b/google/resource_compute_image.go @@ -189,9 +189,9 @@ func computeImageGuestOsFeaturesSchema() *schema.Resource { Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"MULTI_IP_SUBNET", "SECURE_BOOT", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", ""}, false), + ValidateFunc: validation.StringInSlice([]string{"MULTI_IP_SUBNET", "SECURE_BOOT", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS"}, false), Description: `The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options.`, }, }, @@ -279,7 +279,7 @@ func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/images/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -475,7 +475,7 @@ func resourceComputeImageImport(d *schema.ResourceData, meta interface{}) ([]*sc } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/images/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_image_test.go b/google/resource_compute_image_test.go index 59a956375e1..336a6eb931b 100644 --- a/google/resource_compute_image_test.go +++ b/google/resource_compute_image_test.go @@ -107,19 +107,19 @@ func testAccCheckComputeImageExists(n string, image *compute.Image) resource.Tes return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No name is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Images.Get( - config.Project, rs.Primary.ID).Do() + config.Project, rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Image not found") } diff --git a/google/resource_compute_instance.go b/google/resource_compute_instance.go index 0eb67fba119..a2354137ed4 100644 --- a/google/resource_compute_instance.go +++ b/google/resource_compute_instance.go @@ -7,7 +7,6 @@ import ( "fmt" "log" "strings" - "time" "github.com/hashicorp/errwrap" @@ -19,6 +18,44 @@ import ( "google.golang.org/api/compute/v1" ) +var ( + bootDiskKeys = []string{ + "boot_disk.0.auto_delete", + "boot_disk.0.device_name", + "boot_disk.0.disk_encryption_key_raw", + "boot_disk.0.kms_key_self_link", + "boot_disk.0.initialize_params", + "boot_disk.0.mode", + "boot_disk.0.source", + } + + initializeParamsKeys = []string{ + "boot_disk.0.initialize_params.0.size", + "boot_disk.0.initialize_params.0.type", + "boot_disk.0.initialize_params.0.image", + "boot_disk.0.initialize_params.0.labels", + } + + accessConfigKeys = []string{ + "network_interface.%d.access_config.%d.nat_ip", + "network_interface.%d.access_config.%d.network_tier", + "network_interface.%d.access_config.%d.public_ptr_domain_name", + } + + schedulingKeys = []string{ + "scheduling.0.on_host_maintenance", + "scheduling.0.automatic_restart", + "scheduling.0.preemptible", + "scheduling.0.node_affinities", + } + + shieldedInstanceConfigKeys = []string{ + "shielded_instance_config.0.enable_secure_boot", + "shielded_instance_config.0.enable_vtpm", + "shielded_instance_config.0.enable_integrity_monitoring", + } +) + func resourceComputeInstance() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceCreate, @@ -50,24 +87,28 @@ func resourceComputeInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "auto_delete": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Default: true, + ForceNew: true, }, "device_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Computed: true, + ForceNew: true, }, "disk_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + ForceNew: true, + ConflictsWith: []string{"boot_disk.0.kms_key_self_link"}, + Sensitive: true, }, "disk_encryption_key_sha256": { @@ -78,6 +119,7 @@ func resourceComputeInstance() *schema.Resource { "kms_key_self_link": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: bootDiskKeys, ForceNew: true, ConflictsWith: []string{"boot_disk.0.disk_encryption_key_raw"}, DiffSuppressFunc: compareSelfLinkRelativePaths, @@ -85,16 +127,18 @@ func resourceComputeInstance() *schema.Resource { }, "initialize_params": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Computed: true, + ForceNew: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "size": { Type: schema.TypeInt, Optional: true, + AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, ValidateFunc: validation.IntAtLeast(1), @@ -103,6 +147,7 @@ func resourceComputeInstance() *schema.Resource { "type": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd"}, false), @@ -111,16 +156,18 @@ func resourceComputeInstance() *schema.Resource { "image": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, DiffSuppressFunc: diskImageDiffSuppress, }, "labels": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + Computed: true, + ForceNew: true, }, }, }, @@ -129,6 +176,7 @@ func resourceComputeInstance() *schema.Resource { "mode": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: bootDiskKeys, ForceNew: true, Default: "READ_WRITE", ValidateFunc: validation.StringInSlice([]string{"READ_WRITE", "READ_ONLY"}, false), @@ -137,6 +185,7 @@ func resourceComputeInstance() *schema.Resource { "source": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: bootDiskKeys, Computed: true, ForceNew: true, ConflictsWith: []string{"boot_disk.initialize_params"}, @@ -216,12 +265,6 @@ func resourceComputeInstance() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), }, - "assigned_nat_ip": { - Type: schema.TypeString, - Computed: true, - Removed: "Use network_interface.access_config.nat_ip instead.", - }, - "public_ptr_domain_name": { Type: schema.TypeString, Optional: true, @@ -247,14 +290,6 @@ func resourceComputeInstance() *schema.Resource { }, }, }, - - "address": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Removed: "Please use network_ip", - }, }, }, }, @@ -328,72 +363,6 @@ func resourceComputeInstance() *schema.Resource { Default: false, }, - "disk": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Removed: "Use boot_disk, scratch_disk, and attached_disk instead", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // TODO(mitchellh): one of image or disk is required - - "disk": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "image": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "scratch": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "auto_delete": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - }, - - "size": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "device_name": { - Type: schema.TypeString, - Optional: true, - }, - - "disk_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - - "disk_encryption_key_sha256": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "enable_display": { Type: schema.TypeBool, Optional: true, @@ -460,27 +429,31 @@ func resourceComputeInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "on_host_maintenance": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: schedulingKeys, }, "automatic_restart": { - Type: schema.TypeBool, - Optional: true, - Default: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingKeys, + Default: true, }, "preemptible": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + AtLeastOneOf: schedulingKeys, + ForceNew: true, }, "node_affinities": { Type: schema.TypeSet, Optional: true, + AtLeastOneOf: schedulingKeys, ForceNew: true, Elem: instanceSchedulingNodeAffinitiesElemSchema(), DiffSuppressFunc: emptyOrDefaultStringSuppress(""), @@ -497,8 +470,7 @@ func resourceComputeInstance() *schema.Resource { Schema: map[string]*schema.Schema{ "interface": { Type: schema.TypeString, - Optional: true, - Default: "SCSI", + Required: true, ValidateFunc: validation.StringInSlice([]string{"SCSI", "NVME"}, false), }, }, @@ -543,21 +515,24 @@ func resourceComputeInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enable_secure_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: false, }, "enable_vtpm": { - Type: schema.TypeBool, - Optional: true, - Default: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: true, }, "enable_integrity_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: true, }, }, }, @@ -633,7 +608,7 @@ func getInstance(config *Config, d *schema.ResourceData) (*computeBeta.Instance, if err != nil { return nil, err } - instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Get("name").(string)).Do() if err != nil { return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) } @@ -776,7 +751,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } // Store the ID now - d.SetId(instance.Name) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) // Wait for the operation to complete waitErr := computeSharedOperationWaitTime(config.clientCompute, op, project, createTimeout, "instance to create") @@ -952,6 +927,8 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } } + zone := GetResourceNameFromSelfLink(instance.Zone) + d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)) d.Set("attached_disk", ads) d.Set("scratch_disk", scratchDisks) @@ -965,11 +942,11 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)) d.Set("instance_id", fmt.Sprintf("%d", instance.Id)) d.Set("project", project) - d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)) + d.Set("zone", zone) d.Set("name", instance.Name) d.Set("description", instance.Description) d.Set("hostname", instance.Hostname) - d.SetId(instance.Name) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, zone, instance.Name)) return nil } @@ -989,9 +966,9 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Use beta api directly in order to read network_interface.fingerprint without having to put it in the schema. // Change back to getInstance(config, d) once updating alias ips is GA. - instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Get("name").(string)).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", instance.Name)) } // Enable partial mode for the resource since it is possible @@ -1013,14 +990,14 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err func() error { // retrieve up-to-date metadata from the API in case several updates hit simultaneously. instances // sometimes but not always share metadata fingerprints. - instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + instance, err := config.clientComputeBeta.Instances.Get(project, zone, instance.Name).Do() if err != nil { return fmt.Errorf("Error retrieving metadata: %s", err) } metadataV1.Fingerprint = instance.Metadata.Fingerprint - op, err := config.clientCompute.Instances.SetMetadata(project, zone, d.Id(), metadataV1).Do() + op, err := config.clientCompute.Instances.SetMetadata(project, zone, instance.Name, metadataV1).Do() if err != nil { return fmt.Errorf("Error updating metadata: %s", err) } @@ -1048,7 +1025,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return err } op, err := config.clientCompute.Instances.SetTags( - project, zone, d.Id(), tagsV1).Do() + project, zone, d.Get("name").(string), tagsV1).Do() if err != nil { return fmt.Errorf("Error updating tags: %s", err) } @@ -1066,7 +1043,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err labelFingerprint := d.Get("label_fingerprint").(string) req := compute.InstancesSetLabelsRequest{Labels: labels, LabelFingerprint: labelFingerprint} - op, err := config.clientCompute.Instances.SetLabels(project, zone, d.Id(), &req).Do() + op, err := config.clientCompute.Instances.SetLabels(project, zone, instance.Name, &req).Do() if err != nil { return fmt.Errorf("Error updating labels: %s", err) } @@ -1086,7 +1063,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } op, err := config.clientComputeBeta.Instances.SetScheduling( - project, zone, d.Id(), scheduling).Do() + project, zone, instance.Name, scheduling).Do() if err != nil { return fmt.Errorf("Error updating scheduling policy: %s", err) } @@ -1127,7 +1104,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Delete any accessConfig that currently exists in instNetworkInterface for _, ac := range instNetworkInterface.AccessConfigs { op, err := config.clientCompute.Instances.DeleteAccessConfig( - project, zone, d.Id(), ac.Name, networkName).Do() + project, zone, instance.Name, ac.Name, networkName).Do() if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } @@ -1152,7 +1129,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } op, err := config.clientComputeBeta.Instances.AddAccessConfig( - project, zone, d.Id(), networkName, ac).Do() + project, zone, instance.Name, networkName, ac).Do() if err != nil { return fmt.Errorf("Error adding new access_config: %s", err) } @@ -1172,7 +1149,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err Fingerprint: instNetworkInterface.Fingerprint, ForceSendFields: []string{"AliasIpRanges"}, } - op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, d.Id(), networkName, ni).Do() + op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() if err != nil { return errwrap.Wrapf("Error removing alias_ip_range: {{err}}", err) } @@ -1186,7 +1163,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err ranges := d.Get(prefix + ".alias_ip_range").([]interface{}) if len(ranges) > 0 { if rereadFingerprint { - instance, err = config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + instance, err = config.clientComputeBeta.Instances.Get(project, zone, instance.Name).Do() if err != nil { return err } @@ -1196,7 +1173,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err AliasIpRanges: expandAliasIpRanges(ranges), Fingerprint: instNetworkInterface.Fingerprint, } - op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, d.Id(), networkName, ni).Do() + op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() if err != nil { return errwrap.Wrapf("Error adding alias_ip_range: {{err}}", err) } @@ -1322,7 +1299,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("deletion_protection") { nDeletionProtection := d.Get("deletion_protection").(bool) - op, err := config.clientCompute.Instances.SetDeletionProtection(project, zone, d.Id()).DeletionProtection(nDeletionProtection).Do() + op, err := config.clientCompute.Instances.SetDeletionProtection(project, zone, d.Get("name").(string)).DeletionProtection(nDeletionProtection).Do() if err != nil { return fmt.Errorf("Error updating deletion protection flag: %s", err) } @@ -1441,7 +1418,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("shielded_instance_config") { shieldedVmConfig := expandShieldedVmConfigs(d) - op, err := config.clientComputeBeta.Instances.UpdateShieldedVmConfig(project, zone, d.Id(), shieldedVmConfig).Do() + op, err := config.clientComputeBeta.Instances.UpdateShieldedVmConfig(project, zone, instance.Name, shieldedVmConfig).Do() if err != nil { return fmt.Errorf("Error updating shielded vm config: %s", err) } @@ -1595,12 +1572,12 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err if err != nil { return err } - log.Printf("[INFO] Requesting instance deletion: %s", d.Id()) + log.Printf("[INFO] Requesting instance deletion: %s", d.Get("name").(string)) if d.Get("deletion_protection").(bool) { - return fmt.Errorf("Cannot delete instance %s: instance Deletion Protection is enabled. Set deletion_protection to false for this resource and run \"terraform apply\" before attempting to delete it.", d.Id()) + return fmt.Errorf("Cannot delete instance %s: instance Deletion Protection is enabled. Set deletion_protection to false for this resource and run \"terraform apply\" before attempting to delete it.", d.Get("name").(string)) } else { - op, err := config.clientCompute.Instances.Delete(project, zone, d.Id()).Do() + op, err := config.clientCompute.Instances.Delete(project, zone, d.Get("name").(string)).Do() if err != nil { return fmt.Errorf("Error deleting instance: %s", err) } @@ -1625,7 +1602,8 @@ func resourceComputeInstanceImportState(d *schema.ResourceData, meta interface{} d.Set("project", parts[0]) d.Set("zone", parts[1]) - d.SetId(parts[2]) + d.Set("name", parts[2]) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", parts[0], parts[1], parts[2])) return []*schema.ResourceData{d}, nil } diff --git a/google/resource_compute_instance_from_template.go b/google/resource_compute_instance_from_template.go index 4150af0a69d..423a73a3cca 100644 --- a/google/resource_compute_instance_from_template.go +++ b/google/resource_compute_instance_from_template.go @@ -153,7 +153,7 @@ func resourceComputeInstanceFromTemplateCreate(d *schema.ResourceData, meta inte } // Store the ID now - d.SetId(instance.Name) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) // Wait for the operation to complete waitErr := computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), "instance to create") diff --git a/google/resource_compute_instance_from_template_test.go b/google/resource_compute_instance_from_template_test.go index 84d5d77d064..283fa38e367 100644 --- a/google/resource_compute_instance_from_template_test.go +++ b/google/resource_compute_instance_from_template_test.go @@ -246,6 +246,7 @@ resource "google_compute_instance_template" "foobar" { disk_type = "local-ssd" type = "SCRATCH" interface = "NVME" + disk_size_gb = 375 } network_interface { @@ -447,6 +448,8 @@ resource "google_compute_instance_template" "template" { disk { type = "SCRATCH" + disk_type = "local-ssd" + disk_size_gb = 375 interface = "SCSI" auto_delete = true boot = false diff --git a/google/resource_compute_instance_group.go b/google/resource_compute_instance_group.go index 4ff3a889e2e..0a4748fff62 100644 --- a/google/resource_compute_instance_group.go +++ b/google/resource_compute_instance_group.go @@ -156,7 +156,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} } // It probably maybe worked, so store the ID now - d.SetId(fmt.Sprintf("%s/%s", zone, name)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", project, zone, name)) // Wait for the operation to complete err = computeOperationWait(config.clientCompute, op, project, "Creating InstanceGroup") @@ -379,18 +379,19 @@ func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{} } func resourceComputeInstanceGroupImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - if len(parts) == 2 { - d.Set("zone", parts[0]) - d.Set("name", parts[1]) - } else if len(parts) == 3 { - d.Set("project", parts[0]) - d.Set("zone", parts[1]) - d.Set("name", parts[2]) - d.SetId(parts[1] + "/" + parts[2]) - } else { - return nil, fmt.Errorf("Invalid compute instance group specifier. Expecting {zone}/{name} or {project}/{zone}/{name}") + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{name}}") + if err != nil { + return nil, err } + d.SetId(id) return []*schema.ResourceData{d}, nil } diff --git a/google/resource_compute_instance_group_manager.go b/google/resource_compute_instance_group_manager.go index 10b653737cf..f5068640bbd 100644 --- a/google/resource_compute_instance_group_manager.go +++ b/google/resource_compute_instance_group_manager.go @@ -40,15 +40,13 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - Deprecated: "This field will be replaced by `version.instance_template` in 3.0.0", - ConflictsWith: []string{"version"}, + Removed: "This field has been replaced by `version.instance_template`", DiffSuppressFunc: compareSelfLinkRelativePaths, }, "version": { Type: schema.TypeList, - Optional: true, - Computed: true, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -145,21 +143,10 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "REPLACE", - Deprecated: "This field will be replaced by `update_policy` in 3.0.0", - ConflictsWith: []string{"update_policy"}, - ValidateFunc: validation.StringInSlice([]string{"RESTART", "NONE", "ROLLING_UPDATE", "REPLACE"}, false), - DiffSuppressFunc: func(key, old, new string, d *schema.ResourceData) bool { - if old == "REPLACE" && new == "RESTART" { - return true - } - if old == "RESTART" && new == "REPLACE" { - return true - } - return false - }, + Type: schema.TypeString, + Optional: true, + Default: "REPLACE", + Removed: "This field has been replaced by `update_policy`", }, "target_pools": { @@ -198,58 +185,6 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, - "rolling_update_policy": { - Computed: true, - Type: schema.TypeList, - Removed: "This field has been replaced by update_policy.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "minimal_action": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), - }, - - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), - }, - - "max_surge_fixed": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "max_surge_percent": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "max_unavailable_fixed": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "max_unavailable_percent": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "min_ready_sec": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 3600), - }, - }, - }, - }, "update_policy": { Computed: true, Type: schema.TypeList, @@ -359,7 +294,6 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte Name: d.Get("name").(string), Description: d.Get("description").(string), BaseInstanceName: d.Get("base_instance_name").(string), - InstanceTemplate: d.Get("instance_template").(string), TargetSize: int64(d.Get("target_size").(int)), NamedPorts: getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()), TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)), @@ -379,7 +313,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte } // It probably maybe worked, so store the ID now - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") if err != nil { return err } @@ -434,9 +368,6 @@ func flattenFixedOrPercent(fixedOrPercent *computeBeta.FixedOrPercent) []map[str func getManager(d *schema.ResourceData, meta interface{}) (*computeBeta.InstanceGroupManager, error) { config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } project, err := getProject(d, config) if err != nil { @@ -480,7 +411,6 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf } d.Set("base_instance_name", manager.BaseInstanceName) - d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate)) d.Set("name", manager.Name) d.Set("zone", GetResourceNameFromSelfLink(manager.Zone)) d.Set("description", manager.Description) @@ -496,14 +426,6 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup)) d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)) - update_strategy, ok := d.GetOk("update_strategy") - if !ok { - update_strategy = "REPLACE" - } - d.Set("update_strategy", update_strategy.(string)) - - d.Set("rolling_update_policy", nil) - if err = d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil { return fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error()) } @@ -530,49 +452,9 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf return nil } -// Updates an instance group manager by applying the update strategy (REPLACE, RESTART) -// and rolling update policy (PROACTIVE, OPPORTUNISTIC). Updates performed by API -// are OPPORTUNISTIC by default. -func performZoneUpdate(d *schema.ResourceData, config *Config, id string, updateStrategy string, project string, zone string) error { - if updateStrategy == "RESTART" || updateStrategy == "REPLACE" { - managedInstances, err := config.clientComputeBeta.InstanceGroupManagers.ListManagedInstances(project, zone, id).Do() - if err != nil { - return fmt.Errorf("Error getting instance group managers instances: %s", err) - } - - managedInstanceCount := len(managedInstances.ManagedInstances) - instances := make([]string, managedInstanceCount) - for i, v := range managedInstances.ManagedInstances { - instances[i] = v.Instance - } - - recreateInstances := &computeBeta.InstanceGroupManagersRecreateInstancesRequest{ - Instances: instances, - } - - op, err := config.clientComputeBeta.InstanceGroupManagers.RecreateInstances(project, zone, id, recreateInstances).Do() - if err != nil { - return fmt.Errorf("Error restarting instance group managers instances: %s", err) - } - - // Wait for the operation to complete - timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) - err = computeSharedOperationWaitTime(config.clientCompute, op, project, timeoutInMinutes, "Restarting InstanceGroupManagers instances") - if err != nil { - return err - } - } - - return nil -} - func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return err - } - project, err := getProject(d, config) if err != nil { return err @@ -671,37 +553,6 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte d.SetPartial("target_size") } - // If instance_template changes then update - if d.HasChange("instance_template") { - d.Partial(true) - - name := d.Get("name").(string) - // Build the parameter - setInstanceTemplate := &computeBeta.InstanceGroupManagersSetInstanceTemplateRequest{ - InstanceTemplate: d.Get("instance_template").(string), - } - - op, err := config.clientComputeBeta.InstanceGroupManagers.SetInstanceTemplate(project, zone, name, setInstanceTemplate).Do() - - if err != nil { - return fmt.Errorf("Error updating InstanceGroupManager: %s", err) - } - - // Wait for the operation to complete - timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) - err = computeSharedOperationWaitTime(config.clientCompute, op, project, timeoutInMinutes, "Updating InstanceGroupManager") - if err != nil { - return err - } - - updateStrategy := d.Get("update_strategy").(string) - err = performZoneUpdate(d, config, name, updateStrategy, project, zone) - if err != nil { - return err - } - d.SetPartial("instance_template") - } - d.Partial(false) return resourceComputeInstanceGroupManagerRead(d, meta) @@ -710,9 +561,6 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return err - } project, err := getProject(d, config) if err != nil { return err @@ -899,12 +747,12 @@ func flattenUpdatePolicy(updatePolicy *computeBeta.InstanceGroupManagerUpdatePol func resourceInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { d.Set("wait_for_instances", false) config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + if err := parseImportId([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_instance_group_manager_test.go b/google/resource_compute_instance_group_manager_test.go index 63e96eb595a..f4c29d6b4ac 100644 --- a/google/resource_compute_instance_group_manager_test.go +++ b/google/resource_compute_instance_group_manager_test.go @@ -128,28 +128,6 @@ func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) { }) } -func TestAccInstanceGroupManager_updateStrategy(t *testing.T) { - t.Parallel() - - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceGroupManager_updateStrategy(igm), - }, - { - ResourceName: "google_compute_instance_group_manager.igm-update-strategy", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccInstanceGroupManager_updatePolicy(t *testing.T) { t.Parallel() @@ -282,33 +260,6 @@ func TestAccInstanceGroupManager_autoHealingPolicies(t *testing.T) { }) } -func TestAccInstanceGroupManager_upgradeInstanceTemplate(t *testing.T) { - t.Parallel() - - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceGroupManager_upgradeInstanceTemplate1(igm), - }, - { - ResourceName: "google_compute_instance_group_manager.igm-instance-template-upgrade", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccInstanceGroupManager_upgradeInstanceTemplate2(igm), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -627,52 +578,6 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { }`, tag, igm) } -func testAccInstanceGroupManager_updateStrategy(igm string) string { - return fmt.Sprintf(` - data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" - } - - resource "google_compute_instance_template" "igm-update-strategy" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - - lifecycle { - create_before_destroy = true - } - } - - resource "google_compute_instance_group_manager" "igm-update-strategy" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}" - base_instance_name = "igm-update-strategy" - zone = "us-central1-c" - target_size = 2 - update_strategy = "REPLACE" - named_port { - name = "customhttp" - port = 8080 - } - }`, igm) -} - func testAccInstanceGroupManager_rollingUpdatePolicy(igm string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -1103,90 +1008,3 @@ resource "google_compute_instance_group_manager" "igm-basic" { } `, primaryTemplate, canaryTemplate, igm) } -func testAccInstanceGroupManager_upgradeInstanceTemplate1(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-instance-template-upgrade" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_instance_group_manager" "igm-instance-template-upgrade" { - description = "Terraform test instance group manager" - name = "%s" - - instance_template = "${google_compute_instance_template.igm-instance-template-upgrade.self_link}" - - target_size = 3 - base_instance_name = "igm-instance-template-upgrade" - - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} - -func testAccInstanceGroupManager_upgradeInstanceTemplate2(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-instance-template-upgrade" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_instance_group_manager" "igm-instance-template-upgrade" { - description = "Terraform test instance group manager" - name = "%s" - - version { - instance_template = "${google_compute_instance_template.igm-instance-template-upgrade.self_link}" - } - - target_size = 3 - base_instance_name = "igm-instance-template-upgrade" - - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} diff --git a/google/resource_compute_instance_group_test.go b/google/resource_compute_instance_group_test.go index 928ecc50a32..a7f6c2edf41 100644 --- a/google/resource_compute_instance_group_test.go +++ b/google/resource_compute_instance_group_test.go @@ -302,7 +302,7 @@ func testAccComputeInstanceGroup_hasCorrectNetwork(nInstanceGroup string, nNetwo return fmt.Errorf("No ID is set") } network, err := config.clientCompute.Networks.Get( - config.Project, rsNetwork.Primary.ID).Do() + config.Project, rsNetwork.Primary.Attributes["name"]).Do() if err != nil { return err } diff --git a/google/resource_compute_instance_template.go b/google/resource_compute_instance_template.go index 3143f056ac3..5a86f949704 100644 --- a/google/resource_compute_instance_template.go +++ b/google/resource_compute_instance_template.go @@ -3,25 +3,46 @@ package google import ( "fmt" "reflect" + "strings" "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + computeBeta "google.golang.org/api/compute/v0.beta" ) +var ( + schedulingInstTemplateKeys = []string{ + "scheduling.0.on_host_maintenance", + "scheduling.0.automatic_restart", + "scheduling.0.preemptible", + "scheduling.0.node_affinities", + } + + shieldedInstanceTemplateConfigKeys = []string{ + "shielded_instance_config.0.enable_secure_boot", + "shielded_instance_config.0.enable_vtpm", + "shielded_instance_config.0.enable_integrity_monitoring", + } +) + func resourceComputeInstanceTemplate() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceTemplateCreate, Read: resourceComputeInstanceTemplateRead, Delete: resourceComputeInstanceTemplateDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceComputeInstanceTemplateImportState, }, SchemaVersion: 1, - CustomizeDiff: resourceComputeInstanceTemplateSourceImageCustomizeDiff, - MigrateState: resourceComputeInstanceTemplateMigrateState, + CustomizeDiff: customdiff.All( + resourceComputeInstanceTemplateSourceImageCustomizeDiff, + resourceComputeInstanceTemplateScratchDiskCustomizeDiff, + ), + MigrateState: resourceComputeInstanceTemplateMigrateState, // A compute instance template is more or less a subset of a compute // instance. Please attempt to maintain consistency with the @@ -151,7 +172,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Schema: map[string]*schema.Schema{ "kms_key_self_link": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, DiffSuppressFunc: compareSelfLinkRelativePaths, }, @@ -168,13 +189,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { ForceNew: true, }, - "automatic_restart": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Removed: "Use 'scheduling.automatic_restart' instead.", - }, - "can_ip_forward": { Type: schema.TypeBool, Optional: true, @@ -270,11 +284,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { Computed: true, ValidateFunc: validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), }, - "assigned_nat_ip": { - Type: schema.TypeString, - Computed: true, - Removed: "Use network_interface.access_config.nat_ip instead.", - }, }, }, }, @@ -299,24 +308,10 @@ func resourceComputeInstanceTemplate() *schema.Resource { }, }, }, - - "address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Removed: "Please use network_ip", - }, }, }, }, - "on_host_maintenance": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Removed: "Use 'scheduling.on_host_maintenance' instead.", - }, - "project": { Type: schema.TypeString, Optional: true, @@ -340,29 +335,33 @@ func resourceComputeInstanceTemplate() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "preemptible": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Default: false, + ForceNew: true, }, "automatic_restart": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Default: true, + ForceNew: true, }, "on_host_maintenance": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: schedulingInstTemplateKeys, + ForceNew: true, }, "node_affinities": { Type: schema.TypeSet, Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, ForceNew: true, Elem: instanceSchedulingNodeAffinitiesElemSchema(), DiffSuppressFunc: emptyOrDefaultStringSuppress(""), @@ -418,24 +417,27 @@ func resourceComputeInstanceTemplate() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enable_secure_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: false, + ForceNew: true, }, "enable_vtpm": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: true, + ForceNew: true, }, "enable_integrity_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: true, + ForceNew: true, }, }, }, @@ -548,6 +550,34 @@ func resourceComputeInstanceTemplateSourceImageCustomizeDiff(diff *schema.Resour return nil } +func resourceComputeInstanceTemplateScratchDiskCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff) +} + +func resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff TerraformResourceDiff) error { + numDisks := diff.Get("disk.#").(int) + for i := 0; i < numDisks; i++ { + // misspelled on purpose, type is a special symbol + typee := diff.Get(fmt.Sprintf("disk.%d.type", i)).(string) + diskType := diff.Get(fmt.Sprintf("disk.%d.disk_type", i)).(string) + if typee == "SCRATCH" && diskType != "local-ssd" { + return fmt.Errorf("SCRATCH disks must have a disk_type of local-ssd. disk %d has disk_type %s", i, diskType) + } + + if diskType == "local-ssd" && typee != "SCRATCH" { + return fmt.Errorf("disks with a disk_type of local-ssd must be SCRATCH disks. disk %d is a %s disk", i, typee) + } + + diskSize := diff.Get(fmt.Sprintf("disk.%d.disk_size_gb", i)).(int) + if typee == "SCRATCH" && diskSize != 375 { + return fmt.Errorf("SCRATCH disks must be exactly 375GB, disk %d is %d", i, diskSize) + } + } + + return nil +} + func buildDisks(d *schema.ResourceData, config *Config) ([]*computeBeta.AttachedDisk, error) { project, err := getProject(d, config) if err != nil { @@ -737,7 +767,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac } // Store the ID now - d.SetId(instanceTemplate.Name) + d.SetId(fmt.Sprintf("projects/%s/global/instanceTemplates/%s", project, instanceTemplate.Name)) err = computeSharedOperationWait(config.clientCompute, op, project, "Creating Instance Template") if err != nil { @@ -959,7 +989,8 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ return err } - instanceTemplate, err := config.clientComputeBeta.InstanceTemplates.Get(project, d.Id()).Do() + splits := strings.Split(d.Id(), "/") + instanceTemplate, err := config.clientComputeBeta.InstanceTemplates.Get(project, splits[len(splits)-1]).Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) } @@ -1092,8 +1123,9 @@ func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interfac return err } + splits := strings.Split(d.Id(), "/") op, err := config.clientCompute.InstanceTemplates.Delete( - project, d.Id()).Do() + project, splits[len(splits)-1]).Do() if err != nil { return fmt.Errorf("Error deleting instance template: %s", err) } @@ -1130,3 +1162,19 @@ func expandResourceComputeInstanceTemplateScheduling(d *schema.ResourceData, met } return expanded, nil } + +func resourceComputeInstanceTemplateImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{"projects/(?P[^/]+)/global/instanceTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/global/instanceTemplates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/resource_compute_instance_template_test.go b/google/resource_compute_instance_template_test.go index ca453fbcb8f..43695f5cf11 100644 --- a/google/resource_compute_instance_template_test.go +++ b/google/resource_compute_instance_template_test.go @@ -161,6 +161,54 @@ func TestComputeInstanceTemplate_reorderDisks(t *testing.T) { } } +func TestComputeInstanceTemplate_scratchDiskSizeCustomizeDiff(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + Typee string // misspelled on purpose, type is a special symbol + DiskType string + DiskSize int + ExpectError bool + }{ + "scratch disk correct size": { + Typee: "SCRATCH", + DiskType: "local-ssd", + DiskSize: 375, + ExpectError: false, + }, + "scratch disk incorrect size": { + Typee: "SCRATCH", + DiskType: "local-ssd", + DiskSize: 300, + ExpectError: true, + }, + "non-scratch disk": { + Typee: "PERSISTENT", + DiskType: "", + DiskSize: 300, + ExpectError: false, + }, + } + + for tn, tc := range cases { + d := &ResourceDiffMock{ + After: map[string]interface{}{ + "disk.#": 1, + "disk.0.type": tc.Typee, + "disk.0.disk_type": tc.DiskType, + "disk.0.disk_size_gb": tc.DiskSize, + }, + } + err := resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(d) + if tc.ExpectError && err == nil { + t.Errorf("%s failed, expected error but was none", tn) + } + if !tc.ExpectError && err != nil { + t.Errorf("%s failed, found unexpected error: %s", tn, err) + } + } +} + func TestAccComputeInstanceTemplate_basic(t *testing.T) { t.Parallel() @@ -756,6 +804,21 @@ func TestAccComputeInstanceTemplate_enableDisplay(t *testing.T) { }) } +func TestAccComputeInstanceTemplate_invalidDiskType(t *testing.T) { + t.Parallel() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_invalidDiskType(), + ExpectError: regexp.MustCompile("SCRATCH disks must have a disk_type of local-ssd"), + }, + }, + }) +} + func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -764,8 +827,9 @@ func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { continue } + splits := strings.Split(rs.Primary.ID, "/") _, err := config.clientCompute.InstanceTemplates.Get( - config.Project, rs.Primary.ID).Do() + config.Project, splits[len(splits)-1]).Do() if err == nil { return fmt.Errorf("Instance template still exists") } @@ -802,13 +866,15 @@ func testAccCheckComputeInstanceTemplateExistsInProject(n, p string, instanceTem config := testAccProvider.Meta().(*Config) + splits := strings.Split(rs.Primary.ID, "/") + templateName := splits[len(splits)-1] found, err := config.clientCompute.InstanceTemplates.Get( - p, rs.Primary.ID).Do() + p, templateName).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != templateName { return fmt.Errorf("Instance template not found") } @@ -831,13 +897,15 @@ func testAccCheckComputeBetaInstanceTemplateExistsInProject(n, p string, instanc config := testAccProvider.Meta().(*Config) + splits := strings.Split(rs.Primary.ID, "/") + templateName := splits[len(splits)-1] found, err := config.clientComputeBeta.InstanceTemplates.Get( - p, rs.Primary.ID).Do() + p, templateName).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != templateName { return fmt.Errorf("Instance template not found") } @@ -1895,22 +1963,52 @@ data "google_compute_image" "my_image" { family = "centos-7" project = "gce-uefi-images" } - resource "google_compute_instance_template" "foobar" { name = "instancet-test-%s" machine_type = "n1-standard-1" can_ip_forward = false - disk { source_image = "${data.google_compute_image.my_image.self_link}" auto_delete = true boot = true } - network_interface { network = "default" } - enable_display = true }`, acctest.RandString(10)) } + +func testAccComputeInstanceTemplate_invalidDiskType() string { + return fmt.Sprintf(` +# Use this datasource insead of hardcoded values when https://github.com/hashicorp/terraform/issues/22679 +# is resolved. +# data "google_compute_image" "my_image" { +# family = "centos-7" +# project = "gce-uefi-images" +# } +resource "google_compute_instance_template" "foobar" { + name = "instancet-test-%s" + machine_type = "n1-standard-1" + can_ip_forward = false + disk { + source_image = "https://www.googleapis.com/compute/v1/projects/gce-uefi-images/global/images/centos-7-v20190729" + auto_delete = true + boot = true + } + disk { + auto_delete = true + disk_size_gb = 375 + type = "SCRATCH" + disk_type = "local-ssd" + } + disk { + source_image = "https://www.googleapis.com/compute/v1/projects/gce-uefi-images/global/images/centos-7-v20190729" + auto_delete = true + type = "SCRATCH" + } + network_interface { + network = "default" + } +}`, acctest.RandString(10)) +} diff --git a/google/resource_compute_instance_test.go b/google/resource_compute_instance_test.go index 9972b8a50e5..1811593048c 100644 --- a/google/resource_compute_instance_test.go +++ b/google/resource_compute_instance_test.go @@ -1236,7 +1236,7 @@ func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFu config := testAccProvider.Meta().(*Config) - op, err := config.clientCompute.Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + op, err := config.clientCompute.Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err != nil { return fmt.Errorf("Could not stop instance: %s", err) } @@ -1250,7 +1250,7 @@ func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFu } op, err = config.clientCompute.Instances.SetMachineType( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID, &machineType).Do() + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"], &machineType).Do() if err != nil { return fmt.Errorf("Could not change machine type: %s", err) } @@ -1271,7 +1271,7 @@ func testAccCheckComputeInstanceDestroy(s *terraform.State) error { } _, err := config.clientCompute.Instances.Get( - config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err == nil { return fmt.Errorf("Instance still exists") } @@ -1309,12 +1309,12 @@ func testAccCheckComputeInstanceExistsInProject(n, p string, instance *compute.I config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Instances.Get( - p, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Instance not found") } @@ -1338,12 +1338,12 @@ func testAccCheckComputeBetaInstanceExistsInProject(n, p string, instance *compu config := testAccProvider.Meta().(*Config) found, err := config.clientComputeBeta.Instances.Get( - p, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Instance not found") } diff --git a/google/resource_compute_interconnect_attachment.go b/google/resource_compute_interconnect_attachment.go index 7a5e6bee75a..1c9e5978bc8 100644 --- a/google/resource_compute_interconnect_attachment.go +++ b/google/resource_compute_interconnect_attachment.go @@ -309,7 +309,7 @@ func resourceComputeInterconnectAttachmentCreate(d *schema.ResourceData, meta in } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -472,7 +472,7 @@ func resourceComputeInterconnectAttachmentImport(d *schema.ResourceData, meta in } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_network.go b/google/resource_compute_network.go index 49ef0d3347f..4817029eddc 100644 --- a/google/resource_compute_network.go +++ b/google/resource_compute_network.go @@ -73,20 +73,6 @@ the user can explicitly connect subnetwork resources.`, ForceNew: true, Description: `An optional description of this resource. The resource must be recreated to modify this field.`, - }, - "ipv4_range": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Legacy Networks are deprecated and you will no longer be able to create them using this field from Feb 1, 2020 onwards.", - ForceNew: true, - Description: `If this field is specified, a deprecated legacy network is created. -You will no longer be able to create a legacy network on Feb 1, 2020. -See the [legacy network docs](https://cloud.google.com/vpc/docs/legacy) -for more details. - -The range of internal addresses that are legal on this legacy network. -This range is a CIDR specification, for example: '192.168.0.0/16'. -The resource must be recreated to modify this field.`, }, "routing_mode": { Type: schema.TypeString, @@ -111,6 +97,11 @@ is selected by GCP.`, Optional: true, Default: false, }, + "ipv4_range": { + Type: schema.TypeString, + Computed: true, + Removed: "Legacy Networks are deprecated and you will no longer be able to create them using this field from Feb 1, 2020 onwards.", + }, "project": { Type: schema.TypeString, Optional: true, @@ -135,12 +126,6 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } - IPv4RangeProp, err := expandComputeNetworkIpv4Range(d.Get("ipv4_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ipv4_range"); !isEmptyValue(reflect.ValueOf(IPv4RangeProp)) && (ok || !reflect.DeepEqual(v, IPv4RangeProp)) { - obj["IPv4Range"] = IPv4RangeProp - } nameProp, err := expandComputeNetworkName(d.Get("name"), d, config) if err != nil { return err @@ -150,7 +135,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro autoCreateSubnetworksProp, err := expandComputeNetworkAutoCreateSubnetworks(d.Get("auto_create_subnetworks"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("auto_create_subnetworks"); !isEmptyValue(reflect.ValueOf(autoCreateSubnetworksProp)) && (ok || !reflect.DeepEqual(v, autoCreateSubnetworksProp)) { + } else if v, ok := d.GetOkExists("auto_create_subnetworks"); ok || !reflect.DeepEqual(v, autoCreateSubnetworksProp) { obj["autoCreateSubnetworks"] = autoCreateSubnetworksProp } routingConfigProp, err := expandComputeNetworkRoutingConfig(nil, d, config) @@ -160,11 +145,6 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro obj["routingConfig"] = routingConfigProp } - obj, err = resourceComputeNetworkEncoder(d, meta, obj) - if err != nil { - return err - } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks") if err != nil { return err @@ -181,7 +161,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -269,9 +249,6 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("gateway_ipv4", flattenComputeNetworkGatewayIpv4(res["gatewayIPv4"], d)); err != nil { return fmt.Errorf("Error reading Network: %s", err) } - if err := d.Set("ipv4_range", flattenComputeNetworkIpv4Range(res["IPv4Range"], d)); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } if err := d.Set("name", flattenComputeNetworkName(res["name"], d)); err != nil { return fmt.Errorf("Error reading Network: %s", err) } @@ -396,7 +373,7 @@ func resourceComputeNetworkImport(d *schema.ResourceData, meta interface{}) ([]* } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -416,10 +393,6 @@ func flattenComputeNetworkGatewayIpv4(v interface{}, d *schema.ResourceData) int return v } -func flattenComputeNetworkIpv4Range(v interface{}, d *schema.ResourceData) interface{} { - return v -} - func flattenComputeNetworkName(v interface{}, d *schema.ResourceData) interface{} { return v } @@ -449,10 +422,6 @@ func expandComputeNetworkDescription(v interface{}, d TerraformResourceData, con return v, nil } -func expandComputeNetworkIpv4Range(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - func expandComputeNetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -476,11 +445,3 @@ func expandComputeNetworkRoutingConfig(v interface{}, d TerraformResourceData, c func expandComputeNetworkRoutingConfigRoutingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } - -func resourceComputeNetworkEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - if _, ok := d.GetOk("ipv4_range"); !ok { - obj["autoCreateSubnetworks"] = d.Get("auto_create_subnetworks") - } - - return obj, nil -} diff --git a/google/resource_compute_network_endpoint_group.go b/google/resource_compute_network_endpoint_group.go index a50fea189c3..2f266f544e7 100644 --- a/google/resource_compute_network_endpoint_group.go +++ b/google/resource_compute_network_endpoint_group.go @@ -183,7 +183,7 @@ func resourceComputeNetworkEndpointGroupCreate(d *schema.ResourceData, meta inte } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -313,7 +313,7 @@ func resourceComputeNetworkEndpointGroupImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_network_endpoint_group_generated_test.go b/google/resource_compute_network_endpoint_group_generated_test.go index 9fc60ecd6eb..7d2784dda78 100644 --- a/google/resource_compute_network_endpoint_group_generated_test.go +++ b/google/resource_compute_network_endpoint_group_generated_test.go @@ -52,14 +52,14 @@ func testAccComputeNetworkEndpointGroup_networkEndpointGroupExample(context map[ return Nprintf(` resource "google_compute_network_endpoint_group" "neg" { name = "my-lb-neg%{random_suffix}" - network = "${google_compute_network.default.self_link}" - subnetwork = "${google_compute_subnetwork.default.self_link}" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link default_port = "90" zone = "us-central1-a" } resource "google_compute_network" "default" { - name = "neg-network%{random_suffix}" + name = "neg-network%{random_suffix}" auto_create_subnetworks = false } @@ -67,7 +67,7 @@ resource "google_compute_subnetwork" "default" { name = "neg-subnetwork%{random_suffix}" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } `, context) } diff --git a/google/resource_compute_network_peering.go b/google/resource_compute_network_peering.go index 593b3063201..a30316d8c0a 100644 --- a/google/resource_compute_network_peering.go +++ b/google/resource_compute_network_peering.go @@ -41,13 +41,12 @@ func resourceComputeNetworkPeering() *schema.Resource { DiffSuppressFunc: compareSelfLinkRelativePaths, }, // The API only accepts true as a value for exchange_subnet_routes or auto_create_routes (of which only one can be set in a valid request). - // Also, you can't set auto_create_routes if you use the networkPeering object. auto_create_routes is also deprecated + // Also, you can't set auto_create_routes if you use the networkPeering object. auto_create_routes is also removed "auto_create_routes": { - Type: schema.TypeBool, - Optional: true, - Deprecated: "auto_create_routes has been deprecated because it's redundant and not user-configurable. It can safely be removed from your config", - ForceNew: true, - Default: true, + Type: schema.TypeBool, + Optional: true, + Removed: "auto_create_routes has been removed because it's redundant and not user-configurable. It can safely be removed from your config", + ForceNew: true, }, "state": { Type: schema.TypeString, diff --git a/google/resource_compute_network_peering_test.go b/google/resource_compute_network_peering_test.go index 4de93ae9dbc..53727b81026 100644 --- a/google/resource_compute_network_peering_test.go +++ b/google/resource_compute_network_peering_test.go @@ -119,7 +119,6 @@ func testAccComputeNetworkPeering_basic() string { network = "${google_compute_network.network2.self_link}" peer_network = "${google_compute_network.network1.self_link}" name = "peering-test-2-%s" - auto_create_routes = true ` s = s + `}` diff --git a/google/resource_compute_network_test.go b/google/resource_compute_network_test.go index 4f1d6ccac59..ffea9c27cda 100644 --- a/google/resource_compute_network_test.go +++ b/google/resource_compute_network_test.go @@ -66,31 +66,6 @@ func TestAccComputeNetwork_customSubnet(t *testing.T) { }) } -func TestAccComputeNetwork_legacyNetwork(t *testing.T) { - t.Parallel() - - var network compute.Network - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, - Steps: []resource.TestStep{ - { - Config: testAccComputeNetwork_legacyNetwork(), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeNetworkExists("google_compute_network.default", &network), - resource.TestCheckResourceAttrSet("google_compute_network.default", "ipv4_range"), - ), - }, - { - ResourceName: "google_compute_network.default", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccComputeNetwork_routingModeAndUpdate(t *testing.T) { t.Parallel() @@ -172,19 +147,19 @@ func testAccCheckComputeNetworkExists(n string, network *compute.Network) resour return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { + if rs.Primary.Attributes["name"] == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Networks.Get( - config.Project, rs.Primary.ID).Do() + config.Project, rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("Network not found") } @@ -275,15 +250,6 @@ resource "google_compute_network" "bar" { }`, acctest.RandString(10)) } -func testAccComputeNetwork_legacyNetwork() string { - return fmt.Sprintf(` -resource "google_compute_network" "default" { - name = "network-test-%s" - auto_create_subnetworks = false - ipv4_range = "10.0.0.0/16" -}`, acctest.RandString(10)) -} - func testAccComputeNetwork_custom_subnet() string { return fmt.Sprintf(` resource "google_compute_network" "baz" { diff --git a/google/resource_compute_node_group.go b/google/resource_compute_node_group.go index 18473441335..040a8bf68d2 100644 --- a/google/resource_compute_node_group.go +++ b/google/resource_compute_node_group.go @@ -145,7 +145,7 @@ func resourceComputeNodeGroupCreate(d *schema.ResourceData, meta interface{}) er } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -320,7 +320,7 @@ func resourceComputeNodeGroupImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_node_group_generated_test.go b/google/resource_compute_node_group_generated_test.go index c414cc40567..78af896ac47 100644 --- a/google/resource_compute_node_group_generated_test.go +++ b/google/resource_compute_node_group_generated_test.go @@ -55,18 +55,18 @@ data "google_compute_node_types" "central1a" { } resource "google_compute_node_template" "soletenant-tmpl" { - name = "soletenant-tmpl%{random_suffix}" - region = "us-central1" - node_type = "${data.google_compute_node_types.central1a.names[0]}" + name = "soletenant-tmpl%{random_suffix}" + region = "us-central1" + node_type = data.google_compute_node_types.central1a.names[0] } resource "google_compute_node_group" "nodes" { - name = "soletenant-group%{random_suffix}" - zone = "us-central1-a" + name = "soletenant-group%{random_suffix}" + zone = "us-central1-a" description = "example google_compute_node_group for Terraform Google Provider" - size = 1 - node_template = "${google_compute_node_template.soletenant-tmpl.self_link}" + size = 1 + node_template = google_compute_node_template.soletenant-tmpl.self_link } `, context) } diff --git a/google/resource_compute_node_template.go b/google/resource_compute_node_template.go index 84652a89a19..0916632ba23 100644 --- a/google/resource_compute_node_template.go +++ b/google/resource_compute_node_template.go @@ -80,16 +80,18 @@ be specified.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cpus": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Number of virtual CPUs to use.`, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Number of virtual CPUs to use.`, + AtLeastOneOf: []string{"node_type_flexibility.0.cpus", "node_type_flexibility.0.memory"}, }, "memory": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Physical memory available to the node, defined in MB.`, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Physical memory available to the node, defined in MB.`, + AtLeastOneOf: []string{"node_type_flexibility.0.cpus", "node_type_flexibility.0.memory"}, }, "local_ssd": { Type: schema.TypeString, @@ -185,7 +187,7 @@ func resourceComputeNodeTemplateCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -312,7 +314,7 @@ func resourceComputeNodeTemplateImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_node_template_generated_test.go b/google/resource_compute_node_template_generated_test.go index ae31dbddd40..7700dfae7b3 100644 --- a/google/resource_compute_node_template_generated_test.go +++ b/google/resource_compute_node_template_generated_test.go @@ -55,9 +55,9 @@ data "google_compute_node_types" "central1a" { } resource "google_compute_node_template" "template" { - name = "soletenant-tmpl%{random_suffix}" - region = "us-central1" - node_type = "${data.google_compute_node_types.central1a.names[0]}" + name = "soletenant-tmpl%{random_suffix}" + region = "us-central1" + node_type = data.google_compute_node_types.central1a.names[0] } `, context) } diff --git a/google/resource_compute_region_autoscaler.go b/google/resource_compute_region_autoscaler.go index c01c3fcda2f..363996e6bdc 100644 --- a/google/resource_compute_region_autoscaler.go +++ b/google/resource_compute_region_autoscaler.go @@ -277,7 +277,7 @@ func resourceComputeRegionAutoscalerCreate(d *schema.ResourceData, meta interfac } // Store the ID now - id, err := replaceVars(d, config, "{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -470,7 +470,7 @@ func resourceComputeRegionAutoscalerImport(d *schema.ResourceData, meta interfac } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_region_autoscaler_generated_test.go b/google/resource_compute_region_autoscaler_generated_test.go index dd6282da39b..33fca4fab91 100644 --- a/google/resource_compute_region_autoscaler_generated_test.go +++ b/google/resource_compute_region_autoscaler_generated_test.go @@ -53,7 +53,7 @@ func testAccComputeRegionAutoscaler_regionAutoscalerBasicExample(context map[str resource "google_compute_region_autoscaler" "foobar" { name = "my-region-autoscaler%{random_suffix}" region = "us-central1" - target = "${google_compute_region_instance_group_manager.foobar.self_link}" + target = google_compute_region_instance_group_manager.foobar.self_link autoscaling_policy { max_replicas = 5 @@ -74,7 +74,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -99,17 +99,17 @@ resource "google_compute_region_instance_group_manager" "foobar" { region = "us-central1" version { - instance_template = "${google_compute_instance_template.foobar.self_link}" + instance_template = google_compute_instance_template.foobar.self_link name = "primary" } - target_pools = ["${google_compute_target_pool.foobar.self_link}"] + target_pools = [google_compute_target_pool.foobar.self_link] base_instance_name = "foobar" } data "google_compute_image" "debian_9" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } `, context) } diff --git a/google/resource_compute_region_backend_service.go b/google/resource_compute_region_backend_service.go index 1dace9cc30c..9fe2da9b99f 100644 --- a/google/resource_compute_region_backend_service.go +++ b/google/resource_compute_region_backend_service.go @@ -372,7 +372,7 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/backendServices/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -626,7 +626,7 @@ func resourceComputeRegionBackendServiceImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/backendServices/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_region_backend_service_generated_test.go b/google/resource_compute_region_backend_service_generated_test.go index 011a9117a94..39ffdc766f3 100644 --- a/google/resource_compute_region_backend_service_generated_test.go +++ b/google/resource_compute_region_backend_service_generated_test.go @@ -53,7 +53,7 @@ func testAccComputeRegionBackendService_regionBackendServiceBasicExample(context resource "google_compute_region_backend_service" "default" { name = "region-backend-service%{random_suffix}" region = "us-central1" - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] connection_draining_timeout_sec = 10 session_affinity = "CLIENT_IP" } diff --git a/google/resource_compute_region_backend_service_test.go b/google/resource_compute_region_backend_service_test.go index 54e28e410af..4eddc992666 100644 --- a/google/resource_compute_region_backend_service_test.go +++ b/google/resource_compute_region_backend_service_test.go @@ -165,6 +165,7 @@ resource "google_compute_health_check" "zero" { timeout_sec = 1 tcp_health_check { + port = 443 } } @@ -174,6 +175,7 @@ resource "google_compute_health_check" "one" { timeout_sec = 30 tcp_health_check { + port = 443 } } `, serviceName, checkOne, checkTwo) @@ -230,7 +232,7 @@ resource "google_compute_health_check" "default" { timeout_sec = 1 tcp_health_check { - + port = 443 } } `, serviceName, timeout, igName, itName, checkName) diff --git a/google/resource_compute_region_disk.go b/google/resource_compute_region_disk.go index ca6d628b0bd..d0ccf73010b 100644 --- a/google/resource_compute_region_disk.go +++ b/google/resource_compute_region_disk.go @@ -355,7 +355,7 @@ func resourceComputeRegionDiskCreate(d *schema.ResourceData, meta interface{}) e } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -671,7 +671,7 @@ func resourceComputeRegionDiskImport(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_region_disk_generated_test.go b/google/resource_compute_region_disk_generated_test.go index 0e11ed01f96..587a65c0601 100644 --- a/google/resource_compute_region_disk_generated_test.go +++ b/google/resource_compute_region_disk_generated_test.go @@ -51,27 +51,27 @@ func TestAccComputeRegionDisk_regionDiskBasicExample(t *testing.T) { func testAccComputeRegionDisk_regionDiskBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_region_disk" "regiondisk" { - name = "my-region-disk%{random_suffix}" - snapshot = "${google_compute_snapshot.snapdisk.self_link}" - type = "pd-ssd" - region = "us-central1" + name = "my-region-disk%{random_suffix}" + snapshot = google_compute_snapshot.snapdisk.self_link + type = "pd-ssd" + region = "us-central1" physical_block_size_bytes = 4096 replica_zones = ["us-central1-a", "us-central1-f"] } resource "google_compute_disk" "disk" { - name = "my-disk%{random_suffix}" + name = "my-disk%{random_suffix}" image = "debian-cloud/debian-9" - size = 50 - type = "pd-ssd" - zone = "us-central1-a" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" } resource "google_compute_snapshot" "snapdisk" { - name = "my-snapshot%{random_suffix}" - source_disk = "${google_compute_disk.disk.name}" - zone = "us-central1-a" + name = "my-snapshot%{random_suffix}" + source_disk = google_compute_disk.disk.name + zone = "us-central1-a" } `, context) } diff --git a/google/resource_compute_region_disk_test.go b/google/resource_compute_region_disk_test.go index cb1b2c498ef..9f7e737affd 100644 --- a/google/resource_compute_region_disk_test.go +++ b/google/resource_compute_region_disk_test.go @@ -183,19 +183,19 @@ func testAccCheckComputeRegionDiskExists(n string, disk *computeBeta.Disk) resou return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { + if rs.Primary.Attributes["name"] == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientComputeBeta.RegionDisks.Get( - p, rs.Primary.Attributes["region"], rs.Primary.ID).Do() + p, rs.Primary.Attributes["region"], rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("RegionDisk not found") } diff --git a/google/resource_compute_region_instance_group_manager.go b/google/resource_compute_region_instance_group_manager.go index 984ce9fb0c3..2f4cc6d7232 100644 --- a/google/resource_compute_region_instance_group_manager.go +++ b/google/resource_compute_region_instance_group_manager.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "regexp" "strings" "time" @@ -15,11 +14,6 @@ import ( computeBeta "google.golang.org/api/compute/v0.beta" ) -var ( - regionInstanceGroupManagerIdRegex = regexp.MustCompile("^" + ProjectRegex + "/[a-z0-9-]+/[a-z0-9-]+$") - regionInstanceGroupManagerIdNameRegex = regexp.MustCompile("^[a-z0-9-]+$") -) - func resourceComputeRegionInstanceGroupManager() *schema.Resource { return &schema.Resource{ Create: resourceComputeRegionInstanceGroupManagerCreate, @@ -43,18 +37,14 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, "instance_template": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "This field will be replaced by `version.instance_template` in 3.0.0", - ConflictsWith: []string{"version"}, - DiffSuppressFunc: compareSelfLinkRelativePaths, + Type: schema.TypeString, + Optional: true, + Removed: "This field has been replaced by `version.instance_template` in 3.0.0", }, "version": { Type: schema.TypeList, - Optional: true, - Computed: true, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -150,10 +140,9 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, "update_strategy": { - Type: schema.TypeString, - Deprecated: "This field will be replaced by `update_policy` in 3.0.0", - Optional: true, - ConflictsWith: []string{"update_policy"}, + Type: schema.TypeString, + Removed: "This field is removed.", + Optional: true, }, "target_pools": { @@ -212,65 +201,6 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, - "rolling_update_policy": { - Type: schema.TypeList, - Computed: true, - Removed: "This field has been replaced by update_policy.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "minimal_action": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), - }, - - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), - }, - - "max_surge_fixed": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "max_surge_percent": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "max_unavailable_fixed": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "max_unavailable_percent": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - }, - - "min_ready_sec": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 3600), - }, - "instance_redistribution_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"PROACTIVE", "NONE", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("PROACTIVE"), - }, - }, - }, - }, - "update_policy": { Type: schema.TypeList, Computed: true, @@ -353,7 +283,6 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met Name: d.Get("name").(string), Description: d.Get("description").(string), BaseInstanceName: d.Get("base_instance_name").(string), - InstanceTemplate: d.Get("instance_template").(string), TargetSize: int64(d.Get("target_size").(int)), NamedPorts: getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()), TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)), @@ -371,7 +300,11 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met return fmt.Errorf("Error creating RegionInstanceGroupManager: %s", err) } - d.SetId(regionInstanceGroupManagerId{Project: project, Region: region, Name: manager.Name}.terraformId()) + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) // Wait for the operation to complete timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) @@ -387,28 +320,20 @@ type getInstanceManagerFunc func(*schema.ResourceData, interface{}) (*computeBet func getRegionalManager(d *schema.ResourceData, meta interface{}) (*computeBeta.InstanceGroupManager, error) { config := meta.(*Config) - regionalID, err := parseRegionInstanceGroupManagerId(d.Id()) + project, err := getProject(d, config) if err != nil { return nil, err } - if regionalID.Project == "" { - regionalID.Project, err = getProject(d, config) - if err != nil { - return nil, err - } - } - - if regionalID.Region == "" { - regionalID.Region, err = getRegion(d, config) - if err != nil { - return nil, err - } + region, err := getRegion(d, config) + if err != nil { + return nil, err } - manager, err := config.clientComputeBeta.RegionInstanceGroupManagers.Get(regionalID.Project, regionalID.Region, regionalID.Name).Do() + name := d.Get("name").(string) + manager, err := config.clientComputeBeta.RegionInstanceGroupManagers.Get(project, region, name).Do() if err != nil { - return nil, handleNotFoundError(err, d, fmt.Sprintf("Region Instance Manager %q", regionalID.Name)) + return nil, handleNotFoundError(err, d, fmt.Sprintf("Region Instance Manager %q", name)) } return manager, nil @@ -441,23 +366,16 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta return nil } - regionalID, err := parseRegionInstanceGroupManagerId(d.Id()) + project, err := getProject(d, config) if err != nil { return err } - if regionalID.Project == "" { - regionalID.Project, err = getProject(d, config) - if err != nil { - return err - } - } d.Set("base_instance_name", manager.BaseInstanceName) - d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate)) d.Set("name", manager.Name) d.Set("region", GetResourceNameFromSelfLink(manager.Region)) d.Set("description", manager.Description) - d.Set("project", regionalID.Project) + d.Set("project", project) d.Set("target_size", manager.TargetSize) if err := d.Set("target_pools", mapStringArr(manager.TargetPools, ConvertSelfLinkToV1)); err != nil { return fmt.Errorf("Error setting target_pools in state: %s", err.Error()) @@ -472,8 +390,6 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta } d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)) - d.Set("rolling_update_policy", nil) - if err := d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil { return fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error()) } @@ -595,30 +511,6 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met d.SetPartial("target_size") } - if d.HasChange("instance_template") { - d.Partial(true) - // Build the parameter - setInstanceTemplate := &computeBeta.RegionInstanceGroupManagersSetTemplateRequest{ - InstanceTemplate: d.Get("instance_template").(string), - } - - op, err := config.clientComputeBeta.RegionInstanceGroupManagers.SetInstanceTemplate( - project, region, d.Get("name").(string), setInstanceTemplate).Do() - - if err != nil { - return fmt.Errorf("Error updating RegionInstanceGroupManager: %s", err) - } - - // Wait for the operation to complete - timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) - err = computeSharedOperationWaitTime(config.clientCompute, op, project, timeoutInMinutes, "Updating InstanceGroupManager") - if err != nil { - return err - } - - d.SetPartial("instance_template") - } - d.Partial(false) return resourceComputeRegionInstanceGroupManagerRead(d, meta) @@ -627,26 +519,19 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met func resourceComputeRegionInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - regionalID, err := parseRegionInstanceGroupManagerId(d.Id()) + project, err := getProject(d, config) if err != nil { return err } - if regionalID.Project == "" { - regionalID.Project, err = getProject(d, config) - if err != nil { - return err - } + region, err := getRegion(d, config) + if err != nil { + return err } - if regionalID.Region == "" { - regionalID.Region, err = getRegion(d, config) - if err != nil { - return err - } - } + name := d.Get("name").(string) - op, err := config.clientComputeBeta.RegionInstanceGroupManagers.Delete(regionalID.Project, regionalID.Region, regionalID.Name).Do() + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.Delete(project, region, name).Do() if err != nil { return fmt.Errorf("Error deleting region instance group manager: %s", err) @@ -654,7 +539,7 @@ func resourceComputeRegionInstanceGroupManagerDelete(d *schema.ResourceData, met // Wait for the operation to complete timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes()) - err = computeSharedOperationWaitTime(config.clientCompute, op, regionalID.Project, timeoutInMinutes, "Deleting RegionInstanceGroupManager") + err = computeSharedOperationWaitTime(config.clientCompute, op, project, timeoutInMinutes, "Deleting RegionInstanceGroupManager") if err != nil { return fmt.Errorf("Error waiting for delete to complete: %s", err) } @@ -776,40 +661,17 @@ func hashZoneFromSelfLinkOrResourceName(value interface{}) int { func resourceRegionInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { d.Set("wait_for_instances", false) - regionalID, err := parseRegionInstanceGroupManagerId(d.Id()) - if err != nil { + config := meta.(*Config) + if err := parseImportId([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } - d.Set("project", regionalID.Project) - d.Set("region", regionalID.Region) - d.Set("name", regionalID.Name) - return []*schema.ResourceData{d}, nil -} -type regionInstanceGroupManagerId struct { - Project string - Region string - Name string -} - -func (r regionInstanceGroupManagerId) terraformId() string { - return fmt.Sprintf("%s/%s/%s", r.Project, r.Region, r.Name) -} - -func parseRegionInstanceGroupManagerId(id string) (*regionInstanceGroupManagerId, error) { - switch { - case regionInstanceGroupManagerIdRegex.MatchString(id): - parts := strings.Split(id, "/") - return ®ionInstanceGroupManagerId{ - Project: parts[0], - Region: parts[1], - Name: parts[2], - }, nil - case regionInstanceGroupManagerIdNameRegex.MatchString(id): - return ®ionInstanceGroupManagerId{ - Name: id, - }, nil - default: - return nil, fmt.Errorf("Invalid region instance group manager specifier. Expecting either {projectId}/{region}/{name} or {name}, where {projectId} and {region} will be derived from the provider.") + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) } + d.SetId(id) + + return []*schema.ResourceData{d}, nil } diff --git a/google/resource_compute_region_instance_group_manager_test.go b/google/resource_compute_region_instance_group_manager_test.go index 51c398146a2..c0094fa1f82 100644 --- a/google/resource_compute_region_instance_group_manager_test.go +++ b/google/resource_compute_region_instance_group_manager_test.go @@ -129,28 +129,6 @@ func TestAccRegionInstanceGroupManager_updateLifecycle(t *testing.T) { }) } -func TestAccRegionInstanceGroupManager_updateStrategy(t *testing.T) { - t.Parallel() - - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRegionInstanceGroupManager_updateStrategy(igm), - }, - { - ResourceName: "google_compute_region_instance_group_manager.igm-update-strategy", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccRegionInstanceGroupManager_rollingUpdatePolicy(t *testing.T) { t.Parallel() @@ -295,33 +273,6 @@ func TestAccRegionInstanceGroupManager_distributionPolicy(t *testing.T) { }) } -func TestAccRegionInstanceGroupManager_upgradeInstanceTemplate(t *testing.T) { - t.Parallel() - - igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceGroupManagerDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRegionInstanceGroupManager_upgradeInstanceTemplate1(igm), - }, - { - ResourceName: "google_compute_region_instance_group_manager.igm-instance-template-upgrade", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccRegionInstanceGroupManager_upgradeInstanceTemplate2(igm), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) -} - func testAccCheckRegionInstanceGroupManagerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -329,18 +280,8 @@ func testAccCheckRegionInstanceGroupManagerDestroy(s *terraform.State) error { if rs.Type != "google_compute_region_instance_group_manager" { continue } - id, err := parseRegionInstanceGroupManagerId(rs.Primary.ID) - if err != nil { - return err - } - if id.Project == "" { - id.Project = config.Project - } - if id.Region == "" { - id.Region = rs.Primary.Attributes["region"] - } - _, err = config.clientCompute.RegionInstanceGroupManagers.Get( - id.Project, id.Region, id.Name).Do() + _, err := config.clientCompute.RegionInstanceGroupManagers.Get( + rs.Primary.Attributes["project"], rs.Primary.Attributes["region"], rs.Primary.Attributes["name"]).Do() if err == nil { return fmt.Errorf("RegionInstanceGroupManager still exists") } @@ -921,50 +862,6 @@ resource "google_compute_region_instance_group_manager" "igm-basic" { } `, template, igm, strings.Join(zones, "\",\"")) } -func testAccRegionInstanceGroupManager_updateStrategy(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-update-strategy" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_region_instance_group_manager" "igm-update-strategy" { - description = "Terraform test instance group manager" - name = "%s" - instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}" - base_instance_name = "rigm-update-strategy" - region = "us-central1" - target_size = 2 - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} func testAccRegionInstanceGroupManager_rollingUpdatePolicy(igm string) string { return fmt.Sprintf(` @@ -1135,94 +1032,3 @@ resource "google_compute_region_instance_group_manager" "igm-rolling-update-poli } }`, igm) } -func testAccRegionInstanceGroupManager_upgradeInstanceTemplate1(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-instance-template-upgrade" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_region_instance_group_manager" "igm-instance-template-upgrade" { - description = "Terraform test instance group manager" - name = "%s" - - instance_template = "${google_compute_instance_template.igm-instance-template-upgrade.self_link}" - - region = "us-central1" - distribution_policy_zones = ["us-central1-a", "us-central1-f"] - target_size = 3 - base_instance_name = "igm-instance-template-upgrade" - - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} - -func testAccRegionInstanceGroupManager_upgradeInstanceTemplate2(igm string) string { - return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-9" - project = "debian-cloud" -} - -resource "google_compute_instance_template" "igm-instance-template-upgrade" { - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["terraform-testing"] - - disk { - source_image = "${data.google_compute_image.my_image.self_link}" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - lifecycle { - create_before_destroy = true - } -} - -resource "google_compute_region_instance_group_manager" "igm-instance-template-upgrade" { - description = "Terraform test instance group manager" - name = "%s" - - version { - instance_template = "${google_compute_instance_template.igm-instance-template-upgrade.self_link}" - } - - region = "us-central1" - distribution_policy_zones = ["us-central1-a", "us-central1-f"] - target_size = 3 - base_instance_name = "igm-instance-template-upgrade" - - named_port { - name = "customhttp" - port = 8080 - } -}`, igm) -} diff --git a/google/resource_compute_reservation.go b/google/resource_compute_reservation.go index ced27a50f35..6dd747104a7 100644 --- a/google/resource_compute_reservation.go +++ b/google/resource_compute_reservation.go @@ -259,7 +259,7 @@ func resourceComputeReservationCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -389,7 +389,7 @@ func resourceComputeReservationImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_reservation_generated_test.go b/google/resource_compute_reservation_generated_test.go index 643dd418c51..8a8c9fa46cc 100644 --- a/google/resource_compute_reservation_generated_test.go +++ b/google/resource_compute_reservation_generated_test.go @@ -58,7 +58,7 @@ resource "google_compute_reservation" "gce_reservation" { count = 1 instance_properties { min_cpu_platform = "Intel Cascade Lake" - machine_type = "n2-standard-2" + machine_type = "n2-standard-2" } } } diff --git a/google/resource_compute_resource_policy.go b/google/resource_compute_resource_policy.go index 8b9b02c977e..d27320e6a64 100644 --- a/google/resource_compute_resource_policy.go +++ b/google/resource_compute_resource_policy.go @@ -102,6 +102,7 @@ both 13:00-5 and 08:00 are valid.`, }, }, }, + ExactlyOneOf: []string{"snapshot_schedule_policy.0.schedule.0.hourly_schedule", "snapshot_schedule_policy.0.schedule.0.daily_schedule", "snapshot_schedule_policy.0.schedule.0.weekly_schedule"}, }, "hourly_schedule": { Type: schema.TypeList, @@ -127,6 +128,7 @@ where HH : [00-23] and MM : [00-00] GMT.`, }, }, }, + ExactlyOneOf: []string{"snapshot_schedule_policy.0.schedule.0.hourly_schedule", "snapshot_schedule_policy.0.schedule.0.daily_schedule", "snapshot_schedule_policy.0.schedule.0.weekly_schedule"}, }, "weekly_schedule": { Type: schema.TypeList, @@ -148,6 +150,7 @@ where HH : [00-23] and MM : [00-00] GMT.`, }, }, }, + ExactlyOneOf: []string{"snapshot_schedule_policy.0.schedule.0.hourly_schedule", "snapshot_schedule_policy.0.schedule.0.daily_schedule", "snapshot_schedule_policy.0.schedule.0.weekly_schedule"}, }, }, }, @@ -188,17 +191,19 @@ Valid options are KEEP_AUTO_SNAPSHOTS and APPLY_RETENTION_POLICY`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "guest_flush": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to perform a 'guest aware' snapshot.`, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to perform a 'guest aware' snapshot.`, + AtLeastOneOf: []string{"snapshot_schedule_policy.0.snapshot_properties.0.labels", "snapshot_schedule_policy.0.snapshot_properties.0.storage_locations", "snapshot_schedule_policy.0.snapshot_properties.0.guest_flush"}, }, "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `A set of key-value pairs.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A set of key-value pairs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + AtLeastOneOf: []string{"snapshot_schedule_policy.0.snapshot_properties.0.labels", "snapshot_schedule_policy.0.snapshot_properties.0.storage_locations", "snapshot_schedule_policy.0.snapshot_properties.0.guest_flush"}, }, "storage_locations": { Type: schema.TypeSet, @@ -209,7 +214,8 @@ Valid options are KEEP_AUTO_SNAPSHOTS and APPLY_RETENTION_POLICY`, Elem: &schema.Schema{ Type: schema.TypeString, }, - Set: schema.HashString, + Set: schema.HashString, + AtLeastOneOf: []string{"snapshot_schedule_policy.0.snapshot_properties.0.labels", "snapshot_schedule_policy.0.snapshot_properties.0.storage_locations", "snapshot_schedule_policy.0.snapshot_properties.0.guest_flush"}, }, }, }, @@ -291,7 +297,7 @@ func resourceComputeResourcePolicyCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -406,7 +412,7 @@ func resourceComputeResourcePolicyImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_resource_policy_generated_test.go b/google/resource_compute_resource_policy_generated_test.go index 8b8e59f84cb..92d0648b4aa 100644 --- a/google/resource_compute_resource_policy_generated_test.go +++ b/google/resource_compute_resource_policy_generated_test.go @@ -51,13 +51,13 @@ func TestAccComputeResourcePolicy_resourcePolicyBasicExample(t *testing.T) { func testAccComputeResourcePolicy_resourcePolicyBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_resource_policy" "foo" { - name = "policy%{random_suffix}" + name = "policy%{random_suffix}" region = "us-central1" snapshot_schedule_policy { schedule { daily_schedule { days_in_cycle = 1 - start_time = "04:00" + start_time = "04:00" } } } @@ -92,17 +92,17 @@ func TestAccComputeResourcePolicy_resourcePolicyFullExample(t *testing.T) { func testAccComputeResourcePolicy_resourcePolicyFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_resource_policy" "bar" { - name = "policy%{random_suffix}" + name = "policy%{random_suffix}" region = "us-central1" snapshot_schedule_policy { schedule { hourly_schedule { hours_in_cycle = 20 - start_time = "23:00" + start_time = "23:00" } } retention_policy { - max_retention_days = 10 + max_retention_days = 10 on_source_disk_delete = "KEEP_AUTO_SNAPSHOTS" } snapshot_properties { @@ -110,7 +110,7 @@ resource "google_compute_resource_policy" "bar" { my_label = "value" } storage_locations = ["us"] - guest_flush = true + guest_flush = true } } } diff --git a/google/resource_compute_route.go b/google/resource_compute_route.go index 11eedf4db9f..eafc258a5c6 100644 --- a/google/resource_compute_route.go +++ b/google/resource_compute_route.go @@ -87,6 +87,7 @@ partial valid URL: * 'projects/project/global/gateways/default-internet-gateway' * 'global/gateways/default-internet-gateway' * The string 'default-internet-gateway'.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, }, "next_hop_instance": { Type: schema.TypeString, @@ -99,13 +100,15 @@ You can specify this as a full or partial URL. For example: * 'projects/project/zones/zone/instances/instance' * 'zones/zone/instances/instance' * Just the instance name, with the zone in 'next_hop_instance_zone'.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, }, "next_hop_ip": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Network IP address of an instance that should handle matching packets.`, + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Network IP address of an instance that should handle matching packets.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, }, "next_hop_vpn_tunnel": { Type: schema.TypeString, @@ -113,6 +116,7 @@ You can specify this as a full or partial URL. For example: ForceNew: true, DiffSuppressFunc: compareSelfLinkOrResourceName, Description: `URL to a VpnTunnel that should handle matching packets.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, }, "priority": { Type: schema.TypeInt, @@ -242,7 +246,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/routes/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -392,7 +396,7 @@ func resourceComputeRouteImport(d *schema.ResourceData, meta interface{}) ([]*sc } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/routes/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -493,11 +497,7 @@ func expandComputeRouteTags(v interface{}, d TerraformResourceData, config *Conf func expandComputeRouteNextHopGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { if v == "default-internet-gateway" { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - return fmt.Sprintf("projects/%s/global/gateways/default-internet-gateway", project), nil + return replaceVars(d, config, "projects/{{project}}/global/gateways/default-internet-gateway") } else { return v, nil } diff --git a/google/resource_compute_route_generated_test.go b/google/resource_compute_route_generated_test.go index 5cb8d31c9d8..96cbb9f081b 100644 --- a/google/resource_compute_route_generated_test.go +++ b/google/resource_compute_route_generated_test.go @@ -53,7 +53,7 @@ func testAccComputeRoute_routeBasicExample(context map[string]interface{}) strin resource "google_compute_route" "default" { name = "network-route%{random_suffix}" dest_range = "15.0.0.0/24" - network = "${google_compute_network.default.name}" + network = google_compute_network.default.name next_hop_ip = "10.132.1.5" priority = 100 } diff --git a/google/resource_compute_router.go b/google/resource_compute_router.go index bd21cbcf2d7..6d574c89d12 100644 --- a/google/resource_compute_router.go +++ b/google/resource_compute_router.go @@ -111,17 +111,17 @@ ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "description": { - Type: schema.TypeString, - Optional: true, - Description: `User-specified description for the IP range.`, - }, "range": { Type: schema.TypeString, - Optional: true, + Required: true, Description: `The IP range to advertise. The value must be a CIDR-formatted string.`, }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `User-specified description for the IP range.`, + }, }, }, }, @@ -218,7 +218,7 @@ func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error } // Store the ID now - id, err := replaceVars(d, config, "{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -407,7 +407,7 @@ func resourceComputeRouterImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_router_generated_test.go b/google/resource_compute_router_generated_test.go index e41fd7b7420..93375926bfa 100644 --- a/google/resource_compute_router_generated_test.go +++ b/google/resource_compute_router_generated_test.go @@ -52,7 +52,7 @@ func testAccComputeRouter_routerBasicExample(context map[string]interface{}) str return Nprintf(` resource "google_compute_router" "foobar" { name = "my-router%{random_suffix}" - network = "${google_compute_network.foobar.name}" + network = google_compute_network.foobar.name bgp { asn = 64514 advertise_mode = "CUSTOM" @@ -67,7 +67,7 @@ resource "google_compute_router" "foobar" { } resource "google_compute_network" "foobar" { - name = "my-network%{random_suffix}" + name = "my-network%{random_suffix}" auto_create_subnetworks = false } `, context) diff --git a/google/resource_compute_router_peer.go b/google/resource_compute_router_peer.go index 88ec144407c..0201ca16d50 100644 --- a/google/resource_compute_router_peer.go +++ b/google/resource_compute_router_peer.go @@ -85,7 +85,7 @@ func resourceComputeRouterPeer() *schema.Resource { }, "range": { Type: schema.TypeString, - Optional: true, + Required: true, }, }, }, diff --git a/google/resource_compute_security_policy.go b/google/resource_compute_security_policy.go index 566c05bd084..6f560a7f8df 100644 --- a/google/resource_compute_security_policy.go +++ b/google/resource_compute_security_policy.go @@ -19,7 +19,7 @@ func resourceComputeSecurityPolicy() *schema.Resource { Update: resourceComputeSecurityPolicyUpdate, Delete: resourceComputeSecurityPolicyDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceSecurityPolicyStateImporter, }, Timeouts: &schema.ResourceTimeout{ @@ -148,7 +148,11 @@ func resourceComputeSecurityPolicyCreate(d *schema.ResourceData, meta interface{ return errwrap.Wrapf("Error creating SecurityPolicy: {{err}}", err) } - d.SetId(securityPolicy.Name) + id, err := replaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), fmt.Sprintf("Creating SecurityPolicy %q", sp)) if err != nil { @@ -166,7 +170,8 @@ func resourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) return err } - securityPolicy, err := config.clientComputeBeta.SecurityPolicies.Get(project, d.Id()).Do() + sp := d.Get("name").(string) + securityPolicy, err := config.clientComputeBeta.SecurityPolicies.Get(project, sp).Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("SecurityPolicy %q", d.Id())) } @@ -191,7 +196,7 @@ func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{ return err } - sp := d.Id() + sp := d.Get("name").(string) if d.HasChange("description") { securityPolicy := &compute.SecurityPolicy{ @@ -282,7 +287,7 @@ func resourceComputeSecurityPolicyDelete(d *schema.ResourceData, meta interface{ } // Delete the SecurityPolicy - op, err := config.clientComputeBeta.SecurityPolicies.Delete(project, d.Id()).Do() + op, err := config.clientComputeBeta.SecurityPolicies.Delete(project, d.Get("name").(string)).Do() if err != nil { return errwrap.Wrapf("Error deleting SecurityPolicy: {{err}}", err) } @@ -363,3 +368,19 @@ func flattenSecurityPolicyRules(rules []*compute.SecurityPolicyRule) []map[strin } return rulesSchema } + +func resourceSecurityPolicyStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{"projects/(?P[^/]+)/global/securityPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/resource_compute_security_policy_test.go b/google/resource_compute_security_policy_test.go index 0cc9124f0f1..69700ee37b4 100644 --- a/google/resource_compute_security_policy_test.go +++ b/google/resource_compute_security_policy_test.go @@ -101,7 +101,7 @@ func testAccCheckComputeSecurityPolicyDestroy(s *terraform.State) error { continue } - pol := rs.Primary.ID + pol := rs.Primary.Attributes["name"] _, err := config.clientComputeBeta.SecurityPolicies.Get(config.Project, pol).Do() if err == nil { diff --git a/google/resource_compute_snapshot.go b/google/resource_compute_snapshot.go index eea0538102d..6441effa22a 100644 --- a/google/resource_compute_snapshot.go +++ b/google/resource_compute_snapshot.go @@ -85,7 +85,7 @@ source snapshot is protected by a customer-supplied encryption key.`, Schema: map[string]*schema.Schema{ "raw_key": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, Description: `Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource.`, @@ -173,32 +173,6 @@ creation/deletion.`, Type: schema.TypeString, Computed: true, }, - - "snapshot_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - Removed: "Use snapshot_encryption_key.raw_key instead.", - }, - - "snapshot_encryption_key_sha256": { - Type: schema.TypeString, - Computed: true, - Removed: "Use snapshot_encryption_key.sha256 instead.", - }, - - "source_disk_encryption_key_raw": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, - Removed: "Use source_disk_encryption_key.raw_key instead.", - }, - - "source_disk_encryption_key_sha256": { - Type: schema.TypeString, - Computed: true, - Removed: "Use source_disk_encryption_key.sha256 instead.", - }, "project": { Type: schema.TypeString, Optional: true, @@ -282,7 +256,7 @@ func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) err } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/snapshots/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -490,7 +464,7 @@ func resourceComputeSnapshotImport(d *schema.ResourceData, meta interface{}) ([] } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/snapshots/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_snapshot_generated_test.go b/google/resource_compute_snapshot_generated_test.go index d0c78b17273..c121e275733 100644 --- a/google/resource_compute_snapshot_generated_test.go +++ b/google/resource_compute_snapshot_generated_test.go @@ -52,25 +52,25 @@ func TestAccComputeSnapshot_snapshotBasicExample(t *testing.T) { func testAccComputeSnapshot_snapshotBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_snapshot" "snapshot" { - name = "my-snapshot%{random_suffix}" - source_disk = "${google_compute_disk.persistent.name}" - zone = "us-central1-a" - labels = { - my_label = "value" - } + name = "my-snapshot%{random_suffix}" + source_disk = google_compute_disk.persistent.name + zone = "us-central1-a" + labels = { + my_label = "value" + } } data "google_compute_image" "debian" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } resource "google_compute_disk" "persistent" { - name = "debian-disk%{random_suffix}" - image = "${data.google_compute_image.debian.self_link}" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" + name = "debian-disk%{random_suffix}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" } `, context) } diff --git a/google/resource_compute_ssl_certificate.go b/google/resource_compute_ssl_certificate.go index 8761e663ca4..8e85de5ba7a 100644 --- a/google/resource_compute_ssl_certificate.go +++ b/google/resource_compute_ssl_certificate.go @@ -168,7 +168,7 @@ func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -288,7 +288,7 @@ func resourceComputeSslCertificateImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_ssl_certificate_generated_test.go b/google/resource_compute_ssl_certificate_generated_test.go index 253dfda2127..89857d16163 100644 --- a/google/resource_compute_ssl_certificate_generated_test.go +++ b/google/resource_compute_ssl_certificate_generated_test.go @@ -54,8 +54,8 @@ func testAccComputeSslCertificate_sslCertificateBasicExample(context map[string] resource "google_compute_ssl_certificate" "default" { name_prefix = "my-certificate-" description = "a description" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" + private_key = file("test-fixtures/ssl_cert/test.key") + certificate = file("test-fixtures/ssl_cert/test.crt") lifecycle { create_before_destroy = true @@ -95,9 +95,9 @@ func testAccComputeSslCertificate_sslCertificateRandomProviderExample(context ma resource "google_compute_ssl_certificate" "default" { # The name will contain 8 random hex digits, # e.g. "my-certificate-48ab27cd2a" - name = "${random_id.certificate.hex}" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" + name = random_id.certificate.hex + private_key = file("test-fixtures/ssl_cert/test.key") + certificate = file("test-fixtures/ssl_cert/test.crt") lifecycle { create_before_destroy = true @@ -110,8 +110,8 @@ resource "random_id" "certificate" { # For security, do not expose raw certificate values in the output keepers = { - private_key = "${base64sha256(file("test-fixtures/ssl_cert/test.key"))}" - certificate = "${base64sha256(file("test-fixtures/ssl_cert/test.crt"))}" + private_key = filebase64sha256("test-fixtures/ssl_cert/test.key") + certificate = filebase64sha256("test-fixtures/ssl_cert/test.crt") } } `, context) @@ -156,8 +156,8 @@ func testAccComputeSslCertificate_sslCertificateTargetHttpsProxiesExample(contex resource "google_compute_ssl_certificate" "default" { name_prefix = "my-certificate-" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" + private_key = file("test-fixtures/ssl_cert/test.key") + certificate = file("test-fixtures/ssl_cert/test.crt") lifecycle { create_before_destroy = true @@ -166,15 +166,15 @@ resource "google_compute_ssl_certificate" "default" { resource "google_compute_target_https_proxy" "default" { name = "test-proxy%{random_suffix}" - url_map = "${google_compute_url_map.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + url_map = google_compute_url_map.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_url_map" "default" { name = "url-map%{random_suffix}" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -183,11 +183,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -198,7 +198,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/google/resource_compute_ssl_certificate_test.go b/google/resource_compute_ssl_certificate_test.go index 20cde1201f0..4970fef84f1 100644 --- a/google/resource_compute_ssl_certificate_test.go +++ b/google/resource_compute_ssl_certificate_test.go @@ -45,14 +45,16 @@ func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + // We don't specify a name, but it is saved during create + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.SslCertificates.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("Certificate not found") } diff --git a/google/resource_compute_ssl_policy.go b/google/resource_compute_ssl_policy.go index 9b161a14eee..7493fa7d1c3 100644 --- a/google/resource_compute_ssl_policy.go +++ b/google/resource_compute_ssl_policy.go @@ -213,7 +213,7 @@ func resourceComputeSslPolicyCreate(d *schema.ResourceData, meta interface{}) er } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/sslPolicies/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -404,7 +404,7 @@ func resourceComputeSslPolicyImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/sslPolicies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_ssl_policy_test.go b/google/resource_compute_ssl_policy_test.go index 401a293de67..b63b1e57fc7 100644 --- a/google/resource_compute_ssl_policy_test.go +++ b/google/resource_compute_ssl_policy_test.go @@ -177,7 +177,7 @@ func testAccCheckComputeSslPolicyExists(n string, sslPolicy *compute.SslPolicy) return fmt.Errorf("Error Reading SSL Policy %s: %s", name, err) } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("SSL Policy not found") } diff --git a/google/resource_compute_subnetwork.go b/google/resource_compute_subnetwork.go index fbc95876daf..76418b02eee 100644 --- a/google/resource_compute_subnetwork.go +++ b/google/resource_compute_subnetwork.go @@ -19,7 +19,6 @@ import ( "log" "net" "reflect" - "strings" "time" "github.com/apparentlymart/go-cidr/cidr" @@ -49,13 +48,6 @@ func isShrinkageIpCidr(old, new, _ interface{}) bool { return true } -func splitSubnetID(id string) (region string, name string) { - parts := strings.Split(id, "/") - region = parts[0] - name = parts[1] - return -} - func resourceComputeSubnetwork() *schema.Resource { return &schema.Resource{ Create: resourceComputeSubnetworkCreate, @@ -117,19 +109,12 @@ Only networks that are in the distributed mode can have subnetworks.`, you create the resource. This field can be set only at resource creation time.`, }, - "enable_flow_logs": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Deprecated: "This field is being removed in favor of log_config. If log_config is present, flow logs are enabled.", - Description: `Whether to enable flow logging for this subnetwork.`, - }, "log_config": { Type: schema.TypeList, - Computed: true, Optional: true, Description: `Denotes the logging options for the subnetwork flow logs. If logging is enabled -logs will be exported to Stackdriver.`, +logs will be exported to Stackdriver. This field cannot be set if the 'purpose' of this +subnetwork is 'INTERNAL_HTTPS_LOAD_BALANCER'`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -143,7 +128,8 @@ interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection. Possible values are INTERVAL_5_SEC, INTERVAL_30_SEC, INTERVAL_1_MIN, INTERVAL_5_MIN, INTERVAL_10_MIN, INTERVAL_15_MIN`, - Default: "INTERVAL_5_SEC", + Default: "INTERVAL_5_SEC", + AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata"}, }, "flow_sampling": { Type: schema.TypeFloat, @@ -153,7 +139,8 @@ The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 which means half of all collected logs are reported.`, - Default: 0.5, + Default: 0.5, + AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata"}, }, "metadata": { Type: schema.TypeString, @@ -162,7 +149,8 @@ half of all collected logs are reported.`, Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. Configures whether metadata fields should be added to the reported VPC flow logs. Default is 'INCLUDE_ALL_METADATA'.`, - Default: "INCLUDE_ALL_METADATA", + Default: "INCLUDE_ALL_METADATA", + AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata"}, }, }, }, @@ -233,6 +221,12 @@ updates of this resource.`, Description: `The gateway address for default routes to reach destination addresses outside this subnetwork.`, }, + "enable_flow_logs": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Removed: "This field is being removed in favor of log_config. If log_config is present, flow logs are enabled. Please remove this field", + }, "project": { Type: schema.TypeString, Optional: true, @@ -317,12 +311,6 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { obj["network"] = networkProp } - enableFlowLogsProp, err := expandComputeSubnetworkEnableFlowLogs(d.Get("enable_flow_logs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_flow_logs"); ok || !reflect.DeepEqual(v, enableFlowLogsProp) { - obj["enableFlowLogs"] = enableFlowLogsProp - } fingerprintProp, err := expandComputeSubnetworkFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err @@ -350,7 +338,7 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e logConfigProp, err := expandComputeSubnetworkLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { obj["logConfig"] = logConfigProp } @@ -436,9 +424,6 @@ func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("network", flattenComputeSubnetworkNetwork(res["network"], d)); err != nil { return fmt.Errorf("Error reading Subnetwork: %s", err) } - if err := d.Set("enable_flow_logs", flattenComputeSubnetworkEnableFlowLogs(res["enableFlowLogs"], d)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } if err := d.Set("fingerprint", flattenComputeSubnetworkFingerprint(res["fingerprint"], d)); err != nil { return fmt.Errorf("Error reading Subnetwork: %s", err) } @@ -506,57 +491,6 @@ func resourceComputeSubnetworkUpdate(d *schema.ResourceData, meta interface{}) e d.SetPartial("ip_cidr_range") } - if d.HasChange("enable_flow_logs") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - getRes, err := sendRequest(config, "GET", project, getUrl, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - enableFlowLogsProp, err := expandComputeSubnetworkEnableFlowLogs(d.Get("enable_flow_logs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_flow_logs"); ok || !reflect.DeepEqual(v, enableFlowLogsProp) { - obj["enableFlowLogs"] = enableFlowLogsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - res, err := sendRequestWithTimeout(config, "PATCH", project, url, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } - - op := &compute.Operation{} - err = Convert(res, op) - if err != nil { - return err - } - - err = computeOperationWaitTime( - config.clientCompute, op, project, "Updating Subnetwork", - int(d.Timeout(schema.TimeoutUpdate).Minutes())) - - if err != nil { - return err - } - - d.SetPartial("enable_flow_logs") - } if d.HasChange("secondary_ip_range") { obj := make(map[string]interface{}) @@ -665,7 +599,7 @@ func resourceComputeSubnetworkUpdate(d *schema.ResourceData, meta interface{}) e logConfigProp, err := expandComputeSubnetworkLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { obj["logConfig"] = logConfigProp } @@ -787,10 +721,6 @@ func flattenComputeSubnetworkNetwork(v interface{}, d *schema.ResourceData) inte return ConvertSelfLinkToV1(v.(string)) } -func flattenComputeSubnetworkEnableFlowLogs(v interface{}, d *schema.ResourceData) interface{} { - return v -} - func flattenComputeSubnetworkFingerprint(v interface{}, d *schema.ResourceData) interface{} { return v } @@ -874,10 +804,6 @@ func expandComputeSubnetworkNetwork(v interface{}, d TerraformResourceData, conf return f.RelativeLink(), nil } -func expandComputeSubnetworkEnableFlowLogs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - func expandComputeSubnetworkFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -933,21 +859,27 @@ func expandComputeSubnetworkRegion(v interface{}, d TerraformResourceData, confi func expandComputeSubnetworkLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) + transformed := make(map[string]interface{}) if len(l) == 0 || l[0] == nil { - return nil, nil + purpose, ok := d.GetOkExists("purpose") + + if ok && purpose.(string) == "INTERNAL_HTTPS_LOAD_BALANCER" { + // Subnetworks for L7ILB do not accept any values for logConfig + return nil, nil + } + // send enable = false to ensure logging is disabled if there is no config + transformed["enable"] = false + return transformed, nil } + raw := l[0] original := raw.(map[string]interface{}) - v, ok := d.GetOkExists("enable_flow_logs") - - transformed := make(map[string]interface{}) - if !ok || v.(bool) { - transformed["enable"] = true - transformed["aggregationInterval"] = original["aggregation_interval"] - transformed["flowSampling"] = original["flow_sampling"] - transformed["metadata"] = original["metadata"] - } + // The log_config block is specified, so logging should be enabled + transformed["enable"] = true + transformed["aggregationInterval"] = original["aggregation_interval"] + transformed["flowSampling"] = original["flow_sampling"] + transformed["metadata"] = original["metadata"] return transformed, nil } diff --git a/google/resource_compute_subnetwork_generated_test.go b/google/resource_compute_subnetwork_generated_test.go index dc99e0f8050..fcefdc999c3 100644 --- a/google/resource_compute_subnetwork_generated_test.go +++ b/google/resource_compute_subnetwork_generated_test.go @@ -54,7 +54,7 @@ resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" name = "test-subnetwork%{random_suffix}" ip_cidr_range = "10.2.0.0/16" region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" + network = google_compute_network.custom-test.self_link secondary_ip_range { range_name = "tf-test-secondary-range-update1" ip_cidr_range = "192.168.10.0/24" @@ -98,7 +98,7 @@ resource "google_compute_subnetwork" "subnet-with-logging" { name = "log-test-subnetwork%{random_suffix}" ip_cidr_range = "10.2.0.0/16" region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" + network = google_compute_network.custom-test.self_link log_config { aggregation_interval = "INTERVAL_10_MIN" diff --git a/google/resource_compute_subnetwork_test.go b/google/resource_compute_subnetwork_test.go index 95db1986404..bf86bacec48 100644 --- a/google/resource_compute_subnetwork_test.go +++ b/google/resource_compute_subnetwork_test.go @@ -318,8 +318,8 @@ func testAccCheckComputeSubnetworkExists(n string, subnetwork *compute.Subnetwor } config := testAccProvider.Meta().(*Config) - - region, subnet_name := splitSubnetID(rs.Primary.ID) + region := rs.Primary.Attributes["region"] + subnet_name := rs.Primary.Attributes["name"] found, err := config.clientCompute.Subnetworks.Get( config.Project, region, subnet_name).Do() @@ -549,7 +549,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = true log_config { aggregation_interval = "INTERVAL_5_SEC" flow_sampling = 0.5 @@ -571,7 +570,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = true log_config { aggregation_interval = "INTERVAL_30_SEC" flow_sampling = 0.8 @@ -593,7 +591,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = false } `, cnName, subnetworkName) } @@ -610,7 +607,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = true log_config { aggregation_interval = "INTERVAL_30_SEC" flow_sampling = 0.6 @@ -653,7 +649,6 @@ resource "google_compute_subnetwork" "network-with-flow-logs" { ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" - enable_flow_logs = true log_config { aggregation_interval = "INTERVAL_30_SEC" flow_sampling = 0.8 diff --git a/google/resource_compute_target_http_proxy.go b/google/resource_compute_target_http_proxy.go index f8f6dedf974..587ac5da95b 100644 --- a/google/resource_compute_target_http_proxy.go +++ b/google/resource_compute_target_http_proxy.go @@ -131,7 +131,7 @@ func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpProxies/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -302,7 +302,7 @@ func resourceComputeTargetHttpProxyImport(d *schema.ResourceData, meta interface } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpProxies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_target_http_proxy_generated_test.go b/google/resource_compute_target_http_proxy_generated_test.go index 6a2db4217a0..b3ff0d4dca6 100644 --- a/google/resource_compute_target_http_proxy_generated_test.go +++ b/google/resource_compute_target_http_proxy_generated_test.go @@ -51,13 +51,13 @@ func TestAccComputeTargetHttpProxy_targetHttpProxyBasicExample(t *testing.T) { func testAccComputeTargetHttpProxy_targetHttpProxyBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_target_http_proxy" "default" { - name = "test-proxy%{random_suffix}" - url_map = "${google_compute_url_map.default.self_link}" + name = "test-proxy%{random_suffix}" + url_map = google_compute_url_map.default.self_link } resource "google_compute_url_map" "default" { - name = "url-map%{random_suffix}" - default_service = "${google_compute_backend_service.default.self_link}" + name = "url-map%{random_suffix}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -66,11 +66,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -81,7 +81,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/google/resource_compute_target_http_proxy_test.go b/google/resource_compute_target_http_proxy_test.go index 194b5c8f46c..e8be2e57a0e 100644 --- a/google/resource_compute_target_http_proxy_test.go +++ b/google/resource_compute_target_http_proxy_test.go @@ -54,14 +54,15 @@ func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.TargetHttpProxies.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("TargetHttpProxy not found") } diff --git a/google/resource_compute_target_https_proxy.go b/google/resource_compute_target_https_proxy.go index f535993d0e2..65ab3dd44e9 100644 --- a/google/resource_compute_target_https_proxy.go +++ b/google/resource_compute_target_https_proxy.go @@ -181,7 +181,7 @@ func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interfac } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpsProxies/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -466,7 +466,7 @@ func resourceComputeTargetHttpsProxyImport(d *schema.ResourceData, meta interfac } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpsProxies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_target_https_proxy_generated_test.go b/google/resource_compute_target_https_proxy_generated_test.go index 7332375e825..88c16987c1a 100644 --- a/google/resource_compute_target_https_proxy_generated_test.go +++ b/google/resource_compute_target_https_proxy_generated_test.go @@ -52,21 +52,21 @@ func testAccComputeTargetHttpsProxy_targetHttpsProxyBasicExample(context map[str return Nprintf(` resource "google_compute_target_https_proxy" "default" { name = "test-proxy%{random_suffix}" - url_map = "${google_compute_url_map.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + url_map = google_compute_url_map.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_ssl_certificate" "default" { name = "my-certificate%{random_suffix}" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" + private_key = file("test-fixtures/ssl_cert/test.key") + certificate = file("test-fixtures/ssl_cert/test.crt") } resource "google_compute_url_map" "default" { name = "url-map%{random_suffix}" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -75,11 +75,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -90,7 +90,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/google/resource_compute_target_https_proxy_test.go b/google/resource_compute_target_https_proxy_test.go index 4529b86b529..250645783f9 100644 --- a/google/resource_compute_target_https_proxy_test.go +++ b/google/resource_compute_target_https_proxy_test.go @@ -61,14 +61,15 @@ func testAccCheckComputeTargetHttpsProxyExists(n string, proxy *compute.TargetHt } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.TargetHttpsProxies.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("TargetHttpsProxy not found") } diff --git a/google/resource_compute_target_instance.go b/google/resource_compute_target_instance.go index 57af1dea2e9..f52dbca987c 100644 --- a/google/resource_compute_target_instance.go +++ b/google/resource_compute_target_instance.go @@ -159,7 +159,7 @@ func resourceComputeTargetInstanceCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -283,7 +283,7 @@ func resourceComputeTargetInstanceImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_target_instance_generated_test.go b/google/resource_compute_target_instance_generated_test.go index d539a13f3e8..723a4555fa7 100644 --- a/google/resource_compute_target_instance_generated_test.go +++ b/google/resource_compute_target_instance_generated_test.go @@ -51,8 +51,8 @@ func TestAccComputeTargetInstance_targetInstanceBasicExample(t *testing.T) { func testAccComputeTargetInstance_targetInstanceBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_compute_target_instance" "default" { - name = "target%{random_suffix}" - instance = "${google_compute_instance.target-vm.self_link}" + name = "target%{random_suffix}" + instance = google_compute_instance.target-vm.self_link } data "google_compute_image" "vmimage" { @@ -66,8 +66,8 @@ resource "google_compute_instance" "target-vm" { zone = "us-central1-a" boot_disk { - initialize_params{ - image = "${data.google_compute_image.vmimage.self_link}" + initialize_params { + image = data.google_compute_image.vmimage.self_link } } diff --git a/google/resource_compute_target_pool.go b/google/resource_compute_target_pool.go index 9596bd944c1..a51a05dbf6b 100644 --- a/google/resource_compute_target_pool.go +++ b/google/resource_compute_target_pool.go @@ -20,7 +20,7 @@ func resourceComputeTargetPool() *schema.Resource { Delete: resourceComputeTargetPoolDelete, Update: resourceComputeTargetPoolUpdate, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceTargetPoolStateImporter, }, Schema: map[string]*schema.Schema{ @@ -207,7 +207,11 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e } // It probably maybe worked, so store the ID now - d.SetId(tpool.Name) + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) err = computeOperationWait(config.clientCompute, op, project, "Creating Target Pool") if err != nil { @@ -229,6 +233,8 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return err } + name := d.Get("name").(string) + d.Partial(true) if d.HasChange("health_checks") { @@ -251,7 +257,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err := config.clientCompute.TargetPools.RemoveHealthCheck( - project, region, d.Id(), removeReq).Do() + project, region, name, removeReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } @@ -267,7 +273,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err = config.clientCompute.TargetPools.AddHealthCheck( - project, region, d.Id(), addReq).Do() + project, region, name, addReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } @@ -301,7 +307,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e addReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err := config.clientCompute.TargetPools.AddInstance( - project, region, d.Id(), addReq).Do() + project, region, name, addReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } @@ -317,7 +323,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e removeReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err = config.clientCompute.TargetPools.RemoveInstance( - project, region, d.Id(), removeReq).Do() + project, region, name, removeReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } @@ -334,7 +340,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e Target: bpool_name, } op, err := config.clientCompute.TargetPools.SetBackup( - project, region, d.Id(), tref).Do() + project, region, name, tref).Do() if err != nil { return fmt.Errorf("Error updating backup_pool: %s", err) } @@ -375,7 +381,7 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err } tpool, err := config.clientCompute.TargetPools.Get( - project, region, d.Id()).Do() + project, region, d.Get("name").(string)).Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) } @@ -412,7 +418,7 @@ func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) e // Delete the TargetPool op, err := config.clientCompute.TargetPools.Delete( - project, region, d.Id()).Do() + project, region, d.Get("name").(string)).Do() if err != nil { return fmt.Errorf("Error deleting TargetPool: %s", err) } @@ -424,3 +430,24 @@ func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) e d.SetId("") return nil } + +func resourceTargetPoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/targetPools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/resource_compute_target_pool_test.go b/google/resource_compute_target_pool_test.go index a297d6c2f2b..7a0eafd2e14 100644 --- a/google/resource_compute_target_pool_test.go +++ b/google/resource_compute_target_pool_test.go @@ -89,7 +89,7 @@ func testAccCheckComputeTargetPoolDestroy(s *terraform.State) error { } _, err := config.clientCompute.TargetPools.Get( - config.Project, config.Region, rs.Primary.ID).Do() + config.Project, config.Region, rs.Primary.Attributes["name"]).Do() if err == nil { return fmt.Errorf("TargetPool still exists") } @@ -112,12 +112,12 @@ func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.TargetPools.Get( - config.Project, config.Region, rs.Primary.ID).Do() + config.Project, config.Region, rs.Primary.Attributes["name"]).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != rs.Primary.Attributes["name"] { return fmt.Errorf("TargetPool not found") } @@ -139,7 +139,7 @@ func testAccCheckComputeTargetPoolHealthCheck(targetPool, healthCheck string) re hcLink := healthCheckRes.Primary.Attributes["self_link"] if targetPoolRes.Primary.Attributes["health_checks.0"] != hcLink { - return fmt.Errorf("Health check not set up. Expected %q", hcLink) + return fmt.Errorf("Health check not set up. Expected %q to equal %q", targetPoolRes.Primary.Attributes["health_checks.0"], hcLink) } return nil diff --git a/google/resource_compute_target_ssl_proxy.go b/google/resource_compute_target_ssl_proxy.go index 2959d68da2a..f2921b9f8ad 100644 --- a/google/resource_compute_target_ssl_proxy.go +++ b/google/resource_compute_target_ssl_proxy.go @@ -177,7 +177,7 @@ func resourceComputeTargetSslProxyCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/targetSslProxies/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -462,7 +462,7 @@ func resourceComputeTargetSslProxyImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/targetSslProxies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_target_ssl_proxy_generated_test.go b/google/resource_compute_target_ssl_proxy_generated_test.go index e090ee3985d..a0aadaf74b2 100644 --- a/google/resource_compute_target_ssl_proxy_generated_test.go +++ b/google/resource_compute_target_ssl_proxy_generated_test.go @@ -52,20 +52,20 @@ func testAccComputeTargetSslProxy_targetSslProxyBasicExample(context map[string] return Nprintf(` resource "google_compute_target_ssl_proxy" "default" { name = "test-proxy%{random_suffix}" - backend_service = "${google_compute_backend_service.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + backend_service = google_compute_backend_service.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_ssl_certificate" "default" { name = "default-cert%{random_suffix}" - private_key = "${file("test-fixtures/ssl_cert/test.key")}" - certificate = "${file("test-fixtures/ssl_cert/test.crt")}" + private_key = file("test-fixtures/ssl_cert/test.key") + certificate = file("test-fixtures/ssl_cert/test.crt") } resource "google_compute_backend_service" "default" { name = "backend-service%{random_suffix}" protocol = "SSL" - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] } resource "google_compute_health_check" "default" { diff --git a/google/resource_compute_target_ssl_proxy_test.go b/google/resource_compute_target_ssl_proxy_test.go index 55c55dbebb8..b2e8c5f5303 100644 --- a/google/resource_compute_target_ssl_proxy_test.go +++ b/google/resource_compute_target_ssl_proxy_test.go @@ -53,14 +53,15 @@ func testAccCheckComputeTargetSslProxy(n, proxyHeader, sslCert string) resource. } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.TargetSslProxies.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("TargetSslProxy not found") } diff --git a/google/resource_compute_target_tcp_proxy.go b/google/resource_compute_target_tcp_proxy.go index c8235c5b2dc..a21329c0a24 100644 --- a/google/resource_compute_target_tcp_proxy.go +++ b/google/resource_compute_target_tcp_proxy.go @@ -145,7 +145,7 @@ func resourceComputeTargetTcpProxyCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/targetTcpProxies/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -354,7 +354,7 @@ func resourceComputeTargetTcpProxyImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/targetTcpProxies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_target_tcp_proxy_generated_test.go b/google/resource_compute_target_tcp_proxy_generated_test.go index 4662ed0aa20..f1c855c8d4b 100644 --- a/google/resource_compute_target_tcp_proxy_generated_test.go +++ b/google/resource_compute_target_tcp_proxy_generated_test.go @@ -52,15 +52,15 @@ func testAccComputeTargetTcpProxy_targetTcpProxyBasicExample(context map[string] return Nprintf(` resource "google_compute_target_tcp_proxy" "default" { name = "test-proxy%{random_suffix}" - backend_service = "${google_compute_backend_service.default.self_link}" + backend_service = google_compute_backend_service.default.self_link } resource "google_compute_backend_service" "default" { - name = "backend-service%{random_suffix}" - protocol = "TCP" - timeout_sec = 10 + name = "backend-service%{random_suffix}" + protocol = "TCP" + timeout_sec = 10 - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] } resource "google_compute_health_check" "default" { diff --git a/google/resource_compute_target_tcp_proxy_test.go b/google/resource_compute_target_tcp_proxy_test.go index 7214981fd8f..a9b744350ff 100644 --- a/google/resource_compute_target_tcp_proxy_test.go +++ b/google/resource_compute_target_tcp_proxy_test.go @@ -51,14 +51,15 @@ func testAccCheckComputeTargetTcpProxyExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.TargetTcpProxies.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("TargetTcpProxy not found") } diff --git a/google/resource_compute_url_map.go b/google/resource_compute_url_map.go index a46e856fd35..3cfb7bf4817 100644 --- a/google/resource_compute_url_map.go +++ b/google/resource_compute_url_map.go @@ -282,7 +282,7 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/urlMaps/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -495,7 +495,7 @@ func resourceComputeUrlMapImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/global/urlMaps/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -681,8 +681,6 @@ func flattenComputeUrlMapTestService(v interface{}, d *schema.ResourceData) inte return ConvertSelfLinkToV1(v.(string)) } -// ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. Just read the self_link string -// instead of extracting the name and making a self_link out of it. func expandComputeUrlMapDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -792,8 +790,6 @@ func expandComputeUrlMapPathMatcher(v interface{}, d TerraformResourceData, conf return req, nil } -// ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. Just read the self_link string -// instead of extracting the name and making a self_link out of it. func expandComputeUrlMapPathMatcherDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -840,8 +836,6 @@ func expandComputeUrlMapPathMatcherPathRulePaths(v interface{}, d TerraformResou return v, nil } -// ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. Just read the self_link string -// instead of extracting the name and making a self_link out of it. func expandComputeUrlMapPathMatcherPathRuleService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } @@ -901,8 +895,6 @@ func expandComputeUrlMapTestPath(v interface{}, d TerraformResourceData, config return v, nil } -// ResourceRef only supports 1 type and UrlMap has references to a BackendBucket or BackendService. Just read the self_link string -// instead of extracting the name and making a self_link out of it. func expandComputeUrlMapTestService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/google/resource_compute_url_map_generated_test.go b/google/resource_compute_url_map_generated_test.go index 6e04caadaff..6e841894015 100644 --- a/google/resource_compute_url_map_generated_test.go +++ b/google/resource_compute_url_map_generated_test.go @@ -54,7 +54,7 @@ resource "google_compute_url_map" "urlmap" { name = "urlmap%{random_suffix}" description = "a description" - default_service = "${google_compute_backend_service.home.self_link}" + default_service = google_compute_backend_service.home.self_link host_rule { hosts = ["mysite.com"] @@ -63,26 +63,26 @@ resource "google_compute_url_map" "urlmap" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.home.self_link}" + default_service = google_compute_backend_service.home.self_link path_rule { paths = ["/home"] - service = "${google_compute_backend_service.home.self_link}" + service = google_compute_backend_service.home.self_link } path_rule { paths = ["/login"] - service = "${google_compute_backend_service.login.self_link}" + service = google_compute_backend_service.login.self_link } path_rule { paths = ["/static"] - service = "${google_compute_backend_bucket.static.self_link}" + service = google_compute_backend_bucket.static.self_link } } test { - service = "${google_compute_backend_service.home.self_link}" + service = google_compute_backend_service.home.self_link host = "hi.com" path = "/home" } @@ -94,7 +94,7 @@ resource "google_compute_backend_service" "login" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_backend_service" "home" { @@ -103,7 +103,7 @@ resource "google_compute_backend_service" "home" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { @@ -115,7 +115,7 @@ resource "google_compute_http_health_check" "default" { resource "google_compute_backend_bucket" "static" { name = "static-asset-backend-bucket%{random_suffix}" - bucket_name = "${google_storage_bucket.static.name}" + bucket_name = google_storage_bucket.static.name enable_cdn = true } diff --git a/google/resource_compute_url_map_test.go b/google/resource_compute_url_map_test.go index 9251aabdc0d..82e5d8b087e 100644 --- a/google/resource_compute_url_map_test.go +++ b/google/resource_compute_url_map_test.go @@ -107,14 +107,15 @@ func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] found, err := config.clientCompute.UrlMaps.Get( - config.Project, rs.Primary.ID).Do() + config.Project, name).Do() if err != nil { return err } - if found.Name != rs.Primary.ID { + if found.Name != name { return fmt.Errorf("Url map not found") } return nil diff --git a/google/resource_compute_vpn_gateway.go b/google/resource_compute_vpn_gateway.go index e52e342ebfe..74dd4be75b2 100644 --- a/google/resource_compute_vpn_gateway.go +++ b/google/resource_compute_vpn_gateway.go @@ -143,7 +143,7 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -267,7 +267,7 @@ func resourceComputeVpnGatewayImport(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_vpn_gateway_generated_test.go b/google/resource_compute_vpn_gateway_generated_test.go index ae0d0fa4e7a..7963e496107 100644 --- a/google/resource_compute_vpn_gateway_generated_test.go +++ b/google/resource_compute_vpn_gateway_generated_test.go @@ -52,38 +52,38 @@ func testAccComputeVpnGateway_targetVpnGatewayBasicExample(context map[string]in return Nprintf(` resource "google_compute_vpn_gateway" "target_gateway" { name = "vpn1%{random_suffix}" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - name = "network1%{random_suffix}" + name = "network1%{random_suffix}" } resource "google_compute_address" "vpn_static_ip" { - name = "vpn-static-ip%{random_suffix}" + name = "vpn-static-ip%{random_suffix}" } resource "google_compute_forwarding_rule" "fr_esp" { name = "fr-esp%{random_suffix}" ip_protocol = "ESP" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp500" { name = "fr-udp500%{random_suffix}" ip_protocol = "UDP" port_range = "500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp4500" { name = "fr-udp4500%{random_suffix}" ip_protocol = "UDP" port_range = "4500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_vpn_tunnel" "tunnel1" { @@ -91,22 +91,22 @@ resource "google_compute_vpn_tunnel" "tunnel1" { peer_ip = "15.0.0.120" shared_secret = "a secret message" - target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}" + target_vpn_gateway = google_compute_vpn_gateway.target_gateway.self_link depends_on = [ - "google_compute_forwarding_rule.fr_esp", - "google_compute_forwarding_rule.fr_udp500", - "google_compute_forwarding_rule.fr_udp4500", + google_compute_forwarding_rule.fr_esp, + google_compute_forwarding_rule.fr_udp500, + google_compute_forwarding_rule.fr_udp4500, ] } resource "google_compute_route" "route1" { name = "route1%{random_suffix}" - network = "${google_compute_network.network1.name}" + network = google_compute_network.network1.name dest_range = "15.0.0.0/24" priority = 1000 - next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}" + next_hop_vpn_tunnel = google_compute_vpn_tunnel.tunnel1.self_link } `, context) } diff --git a/google/resource_compute_vpn_tunnel.go b/google/resource_compute_vpn_tunnel.go index f8082e9f7d0..43c055e5bf8 100644 --- a/google/resource_compute_vpn_tunnel.go +++ b/google/resource_compute_vpn_tunnel.go @@ -359,7 +359,7 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -504,7 +504,7 @@ func resourceComputeVpnTunnelImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_compute_vpn_tunnel_generated_test.go b/google/resource_compute_vpn_tunnel_generated_test.go index 591cf2675a2..91b9220ebea 100644 --- a/google/resource_compute_vpn_tunnel_generated_test.go +++ b/google/resource_compute_vpn_tunnel_generated_test.go @@ -56,58 +56,58 @@ resource "google_compute_vpn_tunnel" "tunnel1" { peer_ip = "15.0.0.120" shared_secret = "a secret message" - target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}" + target_vpn_gateway = google_compute_vpn_gateway.target_gateway.self_link depends_on = [ - "google_compute_forwarding_rule.fr_esp", - "google_compute_forwarding_rule.fr_udp500", - "google_compute_forwarding_rule.fr_udp4500", + google_compute_forwarding_rule.fr_esp, + google_compute_forwarding_rule.fr_udp500, + google_compute_forwarding_rule.fr_udp4500, ] } resource "google_compute_vpn_gateway" "target_gateway" { name = "vpn1%{random_suffix}" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - name = "network1%{random_suffix}" + name = "network1%{random_suffix}" } resource "google_compute_address" "vpn_static_ip" { - name = "vpn-static-ip%{random_suffix}" + name = "vpn-static-ip%{random_suffix}" } resource "google_compute_forwarding_rule" "fr_esp" { name = "fr-esp%{random_suffix}" ip_protocol = "ESP" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp500" { name = "fr-udp500%{random_suffix}" ip_protocol = "UDP" port_range = "500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp4500" { name = "fr-udp4500%{random_suffix}" ip_protocol = "UDP" port_range = "4500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_route" "route1" { name = "route1%{random_suffix}" - network = "${google_compute_network.network1.name}" + network = google_compute_network.network1.name dest_range = "15.0.0.0/24" priority = 1000 - next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}" + next_hop_vpn_tunnel = google_compute_vpn_tunnel.tunnel1.self_link } `, context) } diff --git a/google/resource_container_analysis_note.go b/google/resource_container_analysis_note.go index c1ca33a9cdc..58275e96ed1 100644 --- a/google/resource_container_analysis_note.go +++ b/google/resource_container_analysis_note.go @@ -137,7 +137,7 @@ func resourceContainerAnalysisNoteCreate(d *schema.ResourceData, meta interface{ } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -273,7 +273,7 @@ func resourceContainerAnalysisNoteImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_container_cluster.go b/google/resource_container_cluster.go index f5518e6f4a3..d24455793e3 100644 --- a/google/resource_container_cluster.go +++ b/google/resource_container_cluster.go @@ -23,7 +23,10 @@ var ( networkConfig = &schema.Resource{ Schema: map[string]*schema.Schema{ "cidr_blocks": { - Type: schema.TypeSet, + Type: schema.TypeSet, + // Despite being the only entry in a nested block, this should be kept + // Optional. Expressing the parent with no entries and omitting the + // parent entirely are semantically different. Optional: true, Elem: cidrBlockConfig, }, @@ -43,9 +46,14 @@ var ( }, } - ipAllocationSubnetFields = []string{"ip_allocation_policy.0.create_subnetwork", "ip_allocation_policy.0.subnetwork_name"} - ipAllocationCidrBlockFields = []string{"ip_allocation_policy.0.cluster_ipv4_cidr_block", "ip_allocation_policy.0.services_ipv4_cidr_block", "ip_allocation_policy.0.node_ipv4_cidr_block"} + ipAllocationCidrBlockFields = []string{"ip_allocation_policy.0.cluster_ipv4_cidr_block", "ip_allocation_policy.0.services_ipv4_cidr_block"} ipAllocationRangeFields = []string{"ip_allocation_policy.0.cluster_secondary_range_name", "ip_allocation_policy.0.services_secondary_range_name"} + + addonsConfigKeys = []string{ + "addons_config.0.http_load_balancing", + "addons_config.0.horizontal_pod_autoscaling", + "addons_config.0.network_policy_config", + } ) func resourceContainerCluster() *schema.Resource { @@ -56,7 +64,6 @@ func resourceContainerCluster() *schema.Resource { Delete: resourceContainerClusterDelete, CustomizeDiff: customdiff.All( - resourceContainerClusterIpAllocationCustomizeDiff, resourceNodeConfigEmptyGuestAccelerator, containerClusterPrivateClusterConfigCustomDiff, ), @@ -103,29 +110,22 @@ func resourceContainerCluster() *schema.Resource { }, "location": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"zone", "region"}, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, "region": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Deprecated: "Use location instead", - ConflictsWith: []string{"zone", "location"}, + Type: schema.TypeString, + Optional: true, + Removed: "Use location instead", }, "zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Deprecated: "Use location instead", - ConflictsWith: []string{"region", "location"}, + Type: schema.TypeString, + Optional: true, + Removed: "Use location instead", }, "node_locations": { @@ -136,11 +136,10 @@ func resourceContainerCluster() *schema.Resource { }, "additional_zones": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Deprecated: "Use node_locations instead", - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeSet, + Optional: true, + Removed: "Use node_locations instead", + Elem: &schema.Schema{Type: schema.TypeString}, }, "addons_config": { @@ -151,59 +150,60 @@ func resourceContainerCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "http_load_balancing": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, - Optional: true, + Required: true, }, }, }, }, "horizontal_pod_autoscaling": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, - Optional: true, + Required: true, }, }, }, }, "kubernetes_dashboard": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Deprecated: "The Kubernetes Dashboard addon is deprecated for clusters on GKE.", - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Removed: "The Kubernetes Dashboard addon is removed for clusters on GKE.", + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, Optional: true, - Default: true, }, }, }, }, "network_policy_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disabled": { Type: schema.TypeBool, - Optional: true, + Required: true, }, }, }, @@ -248,11 +248,12 @@ func resourceContainerCluster() *schema.Resource { }, "cluster_ipv4_cidr": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: orEmpty(validateRFC1918Network(8, 32)), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: orEmpty(validateRFC1918Network(8, 32)), + ConflictsWith: []string{"ip_allocation_policy"}, }, "description": { @@ -298,7 +299,7 @@ func resourceContainerCluster() *schema.Resource { "logging_service": { Type: schema.TypeString, Optional: true, - Computed: true, + Default: "logging.googleapis.com/kubernetes", ValidateFunc: validation.StringInSlice([]string{"logging.googleapis.com", "logging.googleapis.com/kubernetes", "none"}, false), }, @@ -340,14 +341,16 @@ func resourceContainerCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "password": { - Type: schema.TypeString, - Optional: true, - Sensitive: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"master_auth.0.password", "master_auth.0.username", "master_auth.0.client_certificate_config"}, + Sensitive: true, }, "username": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"master_auth.0.password", "master_auth.0.username", "master_auth.0.client_certificate_config"}, }, // Ideally, this would be Optional (and not Computed). @@ -355,11 +358,12 @@ func resourceContainerCluster() *schema.Resource { // though, being unset was considered identical to set // and the issue_client_certificate value being true. "client_certificate_config": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + AtLeastOneOf: []string{"master_auth.0.password", "master_auth.0.username", "master_auth.0.client_certificate_config"}, + ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "issue_client_certificate": { @@ -405,7 +409,7 @@ func resourceContainerCluster() *schema.Resource { "monitoring_service": { Type: schema.TypeString, Optional: true, - Computed: true, + Default: "monitoring.googleapis.com/kubernetes", ValidateFunc: validation.StringInSlice([]string{"monitoring.googleapis.com", "monitoring.googleapis.com/kubernetes", "none"}, false), }, @@ -426,8 +430,7 @@ func resourceContainerCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "enabled": { Type: schema.TypeBool, - Optional: true, - Default: false, + Required: true, }, "provider": { Type: schema.TypeString, @@ -511,41 +514,13 @@ func resourceContainerCluster() *schema.Resource { }, "ip_allocation_policy": { - Type: schema.TypeList, - MaxItems: 1, - ForceNew: true, - Optional: true, - Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeList, + MaxItems: 1, + ForceNew: true, + Optional: true, + ConflictsWith: []string{"cluster_ipv4_cidr"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "use_ip_aliases": { - Type: schema.TypeBool, - Deprecated: "This field is being removed in 3.0.0. If set to true, remove it from your config. If false, remove i.", - Optional: true, - Default: true, - ForceNew: true, - }, - - // GKE creates subnetwork automatically - "create_subnetwork": { - Type: schema.TypeBool, - Deprecated: "This field is being removed in 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", - Computed: true, - Optional: true, - ForceNew: true, - ConflictsWith: ipAllocationRangeFields, - }, - - "subnetwork_name": { - Type: schema.TypeString, - Deprecated: "This field is being removed in 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", - Computed: true, - Optional: true, - ForceNew: true, - ConflictsWith: ipAllocationRangeFields, - }, - // GKE creates/deletes secondary ranges in VPC "cluster_ipv4_cidr_block": { Type: schema.TypeString, @@ -555,6 +530,7 @@ func resourceContainerCluster() *schema.Resource { ConflictsWith: ipAllocationRangeFields, DiffSuppressFunc: cidrOrSizeDiffSuppress, }, + "services_ipv4_cidr_block": { Type: schema.TypeString, Optional: true, @@ -563,15 +539,6 @@ func resourceContainerCluster() *schema.Resource { ConflictsWith: ipAllocationRangeFields, DiffSuppressFunc: cidrOrSizeDiffSuppress, }, - "node_ipv4_cidr_block": { - Type: schema.TypeString, - Deprecated: "This field is being removed in 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", - Computed: true, - Optional: true, - ForceNew: true, - ConflictsWith: ipAllocationRangeFields, - DiffSuppressFunc: cidrOrSizeDiffSuppress, - }, // User manages secondary ranges manually "cluster_secondary_range_name": { @@ -579,14 +546,44 @@ func resourceContainerCluster() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ConflictsWith: append(ipAllocationSubnetFields, ipAllocationCidrBlockFields...), + ConflictsWith: ipAllocationCidrBlockFields, }, + "services_secondary_range_name": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, - ConflictsWith: append(ipAllocationSubnetFields, ipAllocationCidrBlockFields...), + ConflictsWith: ipAllocationCidrBlockFields, + }, + + "use_ip_aliases": { + Type: schema.TypeBool, + Removed: "This field is removed as of 3.0.0. If previously set to true, remove it from your config. If false, remove it.", + Computed: true, + Optional: true, + }, + + // GKE creates subnetwork automatically + "create_subnetwork": { + Type: schema.TypeBool, + Removed: "This field is removed as of 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", + Computed: true, + Optional: true, + }, + + "subnetwork_name": { + Type: schema.TypeString, + Removed: "This field is removed as of 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", + Computed: true, + Optional: true, + }, + + "node_ipv4_cidr_block": { + Type: schema.TypeString, + Removed: "This field is removed as of 3.0.0. Define an explicit google_compute_subnetwork and use subnetwork instead.", + Computed: true, + Optional: true, }, }, }, @@ -607,7 +604,7 @@ func resourceContainerCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "enable_private_endpoint": { Type: schema.TypeBool, - Optional: true, + Required: true, ForceNew: true, DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, }, @@ -714,36 +711,6 @@ func resourceNodeConfigEmptyGuestAccelerator(diff *schema.ResourceDiff, meta int return nil } -func resourceContainerClusterIpAllocationCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error { - // separate func to allow unit testing - return resourceContainerClusterIpAllocationCustomizeDiffFunc(diff) -} - -func resourceContainerClusterIpAllocationCustomizeDiffFunc(diff TerraformResourceDiff) error { - o, n := diff.GetChange("ip_allocation_policy") - - oList := o.([]interface{}) - nList := n.([]interface{}) - if len(oList) > 0 || len(nList) == 0 { - // we only care about going from unset to set, so return early if the field was set before - // or is unset now - return nil - } - - // Unset is equivalent to a block where all the values are zero - // This might change if use_ip_aliases ends up defaulting to true server-side. - // The console says it will eventually, but it's unclear whether that's in the API - // too or just client code. - polMap := nList[0].(map[string]interface{}) - for _, v := range polMap { - if !isEmptyValue(reflect.ValueOf(v)) { - // found a non-empty value, so continue with the diff as it was - return nil - } - } - return diff.Clear("ip_allocation_policy") -} - func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -757,14 +724,6 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er return err } - // When parsing a subnetwork by name, we expect region or zone to be set. - // Users may have set location to either value, so set that value. - if isZone(location) { - d.Set("zone", location) - } else { - d.Set("region", location) - } - clusterName := d.Get("name").(string) cluster := &containerBeta.Cluster{ @@ -808,20 +767,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er if v, ok := d.GetOk("node_locations"); ok { locationsSet := v.(*schema.Set) if locationsSet.Contains(location) { - return fmt.Errorf("when using a multi-zonal cluster, additional_zones should not contain the original 'zone'") - } - - // GKE requires a full list of node locations - // but when using a multi-zonal cluster our schema only asks for the - // additional zones, so append the cluster location if it's a zone - if isZone(location) { - locationsSet.Add(location) - } - cluster.Locations = convertStringSet(locationsSet) - } else if v, ok := d.GetOk("additional_zones"); ok { - locationsSet := v.(*schema.Set) - if locationsSet.Contains(location) { - return fmt.Errorf("when using a multi-zonal cluster, additional_zones should not contain the original 'zone'") + return fmt.Errorf("when using a multi-zonal cluster, node_locations should not contain the original 'zone'") } // GKE requires a full list of node locations @@ -892,7 +838,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er return err } - d.SetId(clusterName) + d.SetId(containerClusterFullName(project, location, clusterName)) // Wait until it's created timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) @@ -972,16 +918,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro } d.Set("location", cluster.Location) - if isZone(cluster.Location) { - d.Set("zone", cluster.Location) - } else { - d.Set("region", cluster.Location) - } locations := schema.NewSet(schema.HashString, convertStringArrToInterface(cluster.Locations)) locations.Remove(cluster.Zone) // Remove the original zone since we only store additional zones d.Set("node_locations", locations) - d.Set("additional_zones", locations) d.Set("endpoint", cluster.Endpoint) if err := d.Set("maintenance_policy", flattenMaintenancePolicy(cluster.MaintenancePolicy)); err != nil { @@ -1150,57 +1090,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er d.SetPartial("maintenance_policy") } - // we can only ever see a change to one of additional_zones and node_locations; because - // thy conflict with each other and are each computed, Terraform will suppress the diff - // on one of them even when migrating from one to the other. - if d.HasChange("additional_zones") { - azSetOldI, azSetNewI := d.GetChange("additional_zones") - azSetNew := azSetNewI.(*schema.Set) - azSetOld := azSetOldI.(*schema.Set) - if azSetNew.Contains(location) { - return fmt.Errorf("additional_zones should not contain the original 'zone'") - } - // Since we can't add & remove zones in the same request, first add all the - // zones, then remove the ones we aren't using anymore. - azSet := azSetOld.Union(azSetNew) - - if isZone(location) { - azSet.Add(location) - } - - req := &containerBeta.UpdateClusterRequest{ - Update: &containerBeta.ClusterUpdate{ - DesiredLocations: convertStringSet(azSet), - }, - } - - updateF := updateFunc(req, "updating GKE cluster node locations") - // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - if isZone(location) { - azSetNew.Add(location) - } - if !azSet.Equal(azSetNew) { - req = &containerBeta.UpdateClusterRequest{ - Update: &containerBeta.ClusterUpdate{ - DesiredLocations: convertStringSet(azSetNew), - }, - } - - updateF := updateFunc(req, "updating GKE cluster node locations") - // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - } - - log.Printf("[INFO] GKE cluster %s node locations have been updated to %v", d.Id(), azSet.List()) - - d.SetPartial("additional_zones") - } else if d.HasChange("node_locations") { + if d.HasChange("node_locations") { azSetOldI, azSetNewI := d.GetChange("node_locations") azSetNew := azSetNewI.(*schema.Set) azSetOld := azSetOldI.(*schema.Set) @@ -1693,14 +1583,6 @@ func expandClusterAddonsConfig(configured interface{}) *containerBeta.AddonsConf } } - if v, ok := config["kubernetes_dashboard"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - ac.KubernetesDashboard = &containerBeta.KubernetesDashboard{ - Disabled: addon["disabled"].(bool), - ForceSendFields: []string{"Disabled"}, - } - } - if v, ok := config["network_policy_config"]; ok && len(v.([]interface{})) > 0 { addon := v.([]interface{})[0].(map[string]interface{}) ac.NetworkPolicyConfig = &containerBeta.NetworkPolicyConfig{ @@ -1715,20 +1597,17 @@ func expandClusterAddonsConfig(configured interface{}) *containerBeta.AddonsConf func expandIPAllocationPolicy(configured interface{}) *containerBeta.IPAllocationPolicy { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { - return nil + return &containerBeta.IPAllocationPolicy{ + UseIpAliases: false, + ForceSendFields: []string{"UseIpAliases"}, + } } config := l[0].(map[string]interface{}) - return &containerBeta.IPAllocationPolicy{ - UseIpAliases: config["use_ip_aliases"].(bool), - - CreateSubnetwork: config["create_subnetwork"].(bool), - SubnetworkName: config["subnetwork_name"].(string), - + UseIpAliases: true, ClusterIpv4CidrBlock: config["cluster_ipv4_cidr_block"].(string), ServicesIpv4CidrBlock: config["services_ipv4_cidr_block"].(string), - NodeIpv4CidrBlock: config["node_ipv4_cidr_block"].(string), ClusterSecondaryRangeName: config["cluster_secondary_range_name"].(string), ServicesSecondaryRangeName: config["services_secondary_range_name"].(string), @@ -1921,13 +1800,6 @@ func flattenClusterAddonsConfig(c *containerBeta.AddonsConfig) []map[string]inte }, } } - if c.KubernetesDashboard != nil { - result["kubernetes_dashboard"] = []map[string]interface{}{ - { - "disabled": c.KubernetesDashboard.Disabled, - }, - } - } if c.NetworkPolicyConfig != nil { result["network_policy_config"] = []map[string]interface{}{ { @@ -1969,33 +1841,16 @@ func flattenPrivateClusterConfig(c *containerBeta.PrivateClusterConfig) []map[st } func flattenIPAllocationPolicy(c *containerBeta.Cluster, d *schema.ResourceData, config *Config) []map[string]interface{} { - if c == nil || c.IpAllocationPolicy == nil { + // If IP aliasing isn't enabled, none of the values in this block can be set. + if c == nil || c.IpAllocationPolicy == nil || c.IpAllocationPolicy.UseIpAliases == false { return nil } - nodeCidrBlock := "" - if c.Subnetwork != "" { - subnetwork, err := ParseSubnetworkFieldValue(c.Subnetwork, d, config) - if err == nil { - sn, err := config.clientCompute.Subnetworks.Get(subnetwork.Project, subnetwork.Region, subnetwork.Name).Do() - if err == nil { - nodeCidrBlock = sn.IpCidrRange - } - } else { - log.Printf("[WARN] Unable to parse subnetwork name, got error while trying to get new subnetwork: %s", err) - } - } + p := c.IpAllocationPolicy return []map[string]interface{}{ { - "use_ip_aliases": p.UseIpAliases, - - "create_subnetwork": p.CreateSubnetwork, - "subnetwork_name": p.SubnetworkName, - - "cluster_ipv4_cidr_block": p.ClusterIpv4CidrBlock, - "services_ipv4_cidr_block": p.ServicesIpv4CidrBlock, - "node_ipv4_cidr_block": nodeCidrBlock, - + "cluster_ipv4_cidr_block": p.ClusterIpv4CidrBlock, + "services_ipv4_cidr_block": p.ServicesIpv4CidrBlock, "cluster_secondary_range_name": p.ClusterSecondaryRangeName, "services_secondary_range_name": p.ServicesSecondaryRangeName, }, @@ -2071,42 +1926,28 @@ func flattenMasterAuthorizedNetworksConfig(c *containerBeta.MasterAuthorizedNetw func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) - parts := strings.Split(d.Id(), "/") - var project, location, clusterName string - switch len(parts) { - case 2: - location = parts[0] - clusterName = parts[1] - case 3: - project = parts[0] - location = parts[1] - clusterName = parts[2] - default: - return nil, fmt.Errorf("Invalid container cluster specifier. Expecting {location}/{name} or {project}/{location}/{name}") - } - - if len(project) == 0 { - var err error - project, err = getProject(d, config) - if err != nil { - return nil, err - } + if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + project, err := getProject(d, config) + if err != nil { + return nil, err } - d.Set("project", project) - d.Set("location", location) - if isZone(location) { - d.Set("zone", location) - } else { - d.Set("region", location) + location, err := getLocation(d, config) + if err != nil { + return nil, err } - d.Set("name", clusterName) - d.SetId(clusterName) + clusterName := d.Get("name").(string) + + d.Set("location", location) if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutCreate)); err != nil { return nil, err } + d.SetId(containerClusterFullName(project, location, clusterName)) + return []*schema.ResourceData{d}, nil } diff --git a/google/resource_container_cluster_test.go b/google/resource_container_cluster_test.go index 01b65c9c2a5..f0739e1a316 100644 --- a/google/resource_container_cluster_test.go +++ b/google/resource_container_cluster_test.go @@ -12,69 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestContainerClusterIpAllocationCustomizeDiff(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - BeforePolicy []interface{} - AfterPolicy []interface{} - ExpectDiffCleared bool - }{ - "empty to false value": { - BeforePolicy: []interface{}{}, - AfterPolicy: []interface{}{ - map[string]interface{}{ - "use_ip_aliases": false, - }, - }, - ExpectDiffCleared: true, - }, - "empty to true value": { - BeforePolicy: []interface{}{}, - AfterPolicy: []interface{}{ - map[string]interface{}{ - "use_ip_aliases": true, - }, - }, - ExpectDiffCleared: false, - }, - "empty to empty": { - BeforePolicy: []interface{}{}, - AfterPolicy: []interface{}{}, - ExpectDiffCleared: false, - }, - "non-empty to non-empty": { - BeforePolicy: []interface{}{ - map[string]interface{}{ - "use_ip_aliases": false, - }, - }, - AfterPolicy: []interface{}{ - map[string]interface{}{ - "use_ip_aliases": false, - }, - }, - }, - } - - for tn, tc := range cases { - d := &ResourceDiffMock{ - Before: map[string]interface{}{ - "ip_allocation_policy": tc.BeforePolicy, - }, - After: map[string]interface{}{ - "ip_allocation_policy": tc.AfterPolicy, - }, - } - if err := resourceContainerClusterIpAllocationCustomizeDiffFunc(d); err != nil { - t.Errorf("%s failed, error calculating diff: %s", tn, err) - } - if _, ok := d.Cleared["ip_allocation_policy"]; ok != tc.ExpectDiffCleared { - t.Errorf("%s failed, expected cleared to be %v, was %v", tn, tc.ExpectDiffCleared, ok) - } - } -} - func TestAccContainerCluster_basic(t *testing.T) { t.Parallel() @@ -91,16 +28,21 @@ func TestAccContainerCluster_basic(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("us-central1-a/%s", clusterName), + ImportState: true, + ImportStateVerify: true, }, { - ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: fmt.Sprintf("%s/us-central1-a/", getTestProjectFromEnv()), - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("%s/us-central1-a/%s", getTestProjectFromEnv(), clusterName), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -124,7 +66,6 @@ func TestAccContainerCluster_misc(t *testing.T) { }, { ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -134,7 +75,6 @@ func TestAccContainerCluster_misc(t *testing.T) { }, { ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -157,19 +97,17 @@ func TestAccContainerCluster_withAddons(t *testing.T) { Config: testAccContainerCluster_withAddons(clusterName), }, { - ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_updateAddons(clusterName), }, { - ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -189,10 +127,9 @@ func TestAccContainerCluster_withMasterAuthConfig(t *testing.T) { Config: testAccContainerCluster_withMasterAuth(clusterName), }, { - ResourceName: "google_container_cluster.with_master_auth", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_master_auth", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_updateMasterAuth(clusterName), @@ -202,10 +139,9 @@ func TestAccContainerCluster_withMasterAuthConfig(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_master_auth", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_master_auth", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_disableMasterAuth(clusterName), @@ -215,10 +151,9 @@ func TestAccContainerCluster_withMasterAuthConfig(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_master_auth", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_master_auth", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_updateMasterAuth(clusterName), @@ -228,10 +163,9 @@ func TestAccContainerCluster_withMasterAuthConfig(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_master_auth", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_master_auth", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -252,10 +186,9 @@ func TestAccContainerCluster_withMasterAuthConfig_NoCert(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_master_auth_no_cert", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_master_auth_no_cert", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -280,7 +213,6 @@ func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { }, { ResourceName: "google_container_cluster.with_network_policy_enabled", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -294,7 +226,6 @@ func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { }, { ResourceName: "google_container_cluster.with_network_policy_enabled", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -308,7 +239,6 @@ func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { }, { ResourceName: "google_container_cluster.with_network_policy_enabled", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -322,7 +252,6 @@ func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { }, { ResourceName: "google_container_cluster.with_network_policy_enabled", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"remove_default_node_pool"}, @@ -363,19 +292,17 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksConfig(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_master_authorized_networks", - ImportState: true, - ImportStateVerify: true, - ImportStateIdPrefix: "us-central1-a/", + ResourceName: "google_container_cluster.with_master_authorized_networks", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName, []string{"10.0.0.0/8", "8.8.8.8/32"}, ""), }, { - ResourceName: "google_container_cluster.with_master_authorized_networks", - ImportState: true, - ImportStateVerify: true, - ImportStateIdPrefix: "us-central1-a/", + ResourceName: "google_container_cluster.with_master_authorized_networks", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName, []string{}, ""), @@ -385,19 +312,17 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksConfig(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_master_authorized_networks", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_master_authorized_networks", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_removeMasterAuthorizedNetworksConfig(clusterName), }, { - ResourceName: "google_container_cluster.with_master_authorized_networks", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_master_authorized_networks", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -417,10 +342,9 @@ func TestAccContainerCluster_regional(t *testing.T) { Config: testAccContainerCluster_regional(clusterName), }, { - ResourceName: "google_container_cluster.regional", - ImportStateIdPrefix: "us-central1/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.regional", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -441,16 +365,15 @@ func TestAccContainerCluster_regionalWithNodePool(t *testing.T) { Config: testAccContainerCluster_regionalWithNodePool(clusterName, npName), }, { - ResourceName: "google_container_cluster.regional", - ImportStateIdPrefix: "us-central1/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.regional", + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccContainerCluster_regionalWithAdditionalZones(t *testing.T) { +func TestAccContainerCluster_regionalWithNodeLocations(t *testing.T) { t.Parallel() clusterName := fmt.Sprintf("cluster-test-%s", acctest.RandString(10)) @@ -461,22 +384,20 @@ func TestAccContainerCluster_regionalWithAdditionalZones(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccContainerCluster_regionalAdditionalZones(clusterName), + Config: testAccContainerCluster_regionalNodeLocations(clusterName), }, { - ResourceName: "google_container_cluster.with_additional_zones", - ImportStateIdPrefix: "us-central1/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_locations", + ImportState: true, + ImportStateVerify: true, }, { - Config: testAccContainerCluster_regionalUpdateAdditionalZones(clusterName), + Config: testAccContainerCluster_regionalUpdateNodeLocations(clusterName), }, { - ResourceName: "google_container_cluster.with_additional_zones", - ImportStateIdPrefix: "us-central1/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_locations", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -496,10 +417,9 @@ func TestAccContainerCluster_withPrivateClusterConfig(t *testing.T) { Config: testAccContainerCluster_withPrivateClusterConfig(clusterName), }, { - ResourceName: "google_container_cluster.with_private_cluster", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_private_cluster", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -538,7 +458,6 @@ func TestAccContainerCluster_withVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_version", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -562,7 +481,6 @@ func TestAccContainerCluster_updateVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_version", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -572,7 +490,6 @@ func TestAccContainerCluster_updateVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_version", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -595,19 +512,17 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { Config: testAccContainerCluster_withNodeConfig(clusterName), }, { - ResourceName: "google_container_cluster.with_node_config", - ImportStateIdPrefix: "us-central1-f/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_config", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_withNodeConfigUpdate(clusterName), }, { - ResourceName: "google_container_cluster.with_node_config", - ImportStateIdPrefix: "us-central1-f/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_config", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -625,10 +540,9 @@ func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) { Config: testAccContainerCluster_withNodeConfigScopeAlias(), }, { - ResourceName: "google_container_cluster.with_node_config_scope_alias", - ImportStateIdPrefix: "us-central1-f/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_config_scope_alias", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -648,10 +562,9 @@ func TestAccContainerCluster_withNodeConfigShieldedInstanceConfig(t *testing.T) Config: testAccContainerCluster_withNodeConfigShieldedInstanceConfig(clusterName), }, { - ResourceName: "google_container_cluster.with_node_config", - ImportStateIdPrefix: "us-central1-f/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_config", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -669,16 +582,14 @@ func TestAccContainerCluster_network(t *testing.T) { Config: testAccContainerCluster_networkRef(), }, { - ResourceName: "google_container_cluster.with_net_ref_by_url", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_net_ref_by_url", + ImportState: true, + ImportStateVerify: true, }, { - ResourceName: "google_container_cluster.with_net_ref_by_name", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_net_ref_by_name", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -696,10 +607,9 @@ func TestAccContainerCluster_backend(t *testing.T) { Config: testAccContainerCluster_backendRef(), }, { - ResourceName: "google_container_cluster.primary", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -720,10 +630,9 @@ func TestAccContainerCluster_withNodePoolBasic(t *testing.T) { Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName), }, { - ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -745,7 +654,6 @@ func TestAccContainerCluster_withNodePoolUpdateVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -755,7 +663,6 @@ func TestAccContainerCluster_withNodePoolUpdateVersion(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version"}, @@ -775,16 +682,15 @@ func TestAccContainerCluster_withNodePoolResize(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccContainerCluster_withNodePoolAdditionalZones(clusterName, npName), + Config: testAccContainerCluster_withNodePoolNodeLocations(clusterName, npName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.node_count", "2"), ), }, { - ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_withNodePoolResize(clusterName, npName), @@ -793,10 +699,9 @@ func TestAccContainerCluster_withNodePoolResize(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -821,10 +726,9 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_withNodePoolUpdateAutoscaling(clusterName, npName), @@ -834,10 +738,9 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName), @@ -847,10 +750,9 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { ), }, { - ResourceName: "google_container_cluster.with_node_pool", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -869,7 +771,6 @@ func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) { }, { ResourceName: "google_container_cluster.with_node_pool_name_prefix", - ImportStateIdPrefix: "us-central1-a/", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"node_pool.0.name_prefix"}, @@ -890,10 +791,9 @@ func TestAccContainerCluster_withNodePoolMultiple(t *testing.T) { Config: testAccContainerCluster_withNodePoolMultiple(), }, { - ResourceName: "google_container_cluster.with_node_pool_multiple", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_pool_multiple", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -927,10 +827,9 @@ func TestAccContainerCluster_withNodePoolNodeConfig(t *testing.T) { Config: testAccContainerCluster_withNodePoolNodeConfig(), }, { - ResourceName: "google_container_cluster.with_node_pool_node_config", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_node_pool_node_config", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -950,10 +849,9 @@ func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) { Config: testAccContainerCluster_withMaintenanceWindow(clusterName, "03:00"), }, { - ResourceName: resourceName, - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, { Config: testAccContainerCluster_withMaintenanceWindow(clusterName, ""), @@ -963,10 +861,9 @@ func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, // maintenance_policy.# = 0 is equivalent to no maintenance policy at all, // but will still cause an import diff ImportStateVerifyIgnore: []string{"maintenance_policy.#"}, @@ -988,10 +885,9 @@ func TestAccContainerCluster_withIPAllocationPolicy_existingSecondaryRanges(t *t Config: testAccContainerCluster_withIPAllocationPolicy_existingSecondaryRanges(cluster), }, { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_ip_allocation_policy", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -1010,10 +906,9 @@ func TestAccContainerCluster_withIPAllocationPolicy_specificIPRanges(t *testing. Config: testAccContainerCluster_withIPAllocationPolicy_specificIPRanges(cluster), }, { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_ip_allocation_policy", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -1032,76 +927,9 @@ func TestAccContainerCluster_withIPAllocationPolicy_specificSizes(t *testing.T) Config: testAccContainerCluster_withIPAllocationPolicy_specificSizes(cluster), }, { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccContainerCluster_withIPAllocationPolicy_createSubnetwork(t *testing.T) { - t.Parallel() - - cluster := fmt.Sprintf("cluster-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccContainerCluster_withIPAllocationPolicy_createSubnetwork(cluster), - }, - { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccContainerCluster_withIPAllocationPolicy_createSubnetworkUpdated(cluster), - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -// This test will intentionally perform a recreate. Without attr syntax, there's -// no way to go from allocation policy set -> unset without one. -func TestAccContainerCluster_withIPAllocationPolicy_explicitEmpty(t *testing.T) { - t.Parallel() - - cluster := fmt.Sprintf("cluster-test-%s", acctest.RandString(10)) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckContainerClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccContainerCluster_withIPAllocationPolicy_createSubnetwork(cluster), - }, - { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccContainerCluster_withIPAllocationPolicy_explicitEmpty(cluster), - }, - { - ResourceName: "google_container_cluster.with_ip_allocation_policy", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_ip_allocation_policy", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -1126,10 +954,9 @@ func TestAccContainerCluster_errorCleanDanglingCluster(t *testing.T) { Config: initConfig, }, { - ResourceName: "google_container_cluster.cidr_error_preempt", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.cidr_error_preempt", + ImportState: true, + ImportStateVerify: true, }, { Config: overlapConfig, @@ -1178,10 +1005,9 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksDisabled(t *testing.T) ), }, { - ResourceName: "google_container_cluster.with_private_cluster", - ImportStateIdPrefix: "us-central1-a/", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_container_cluster.with_private_cluster", + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -1198,7 +1024,7 @@ func testAccContainerCluster_masterAuthorizedNetworksDisabled(resource_name stri attributes := rs.Primary.Attributes cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( - config.Project, attributes["zone"], attributes["name"]).Do() + config.Project, attributes["location"], attributes["name"]).Do() if err != nil { return err } @@ -1221,7 +1047,7 @@ func testAccCheckContainerClusterDestroy(s *terraform.State) error { attributes := rs.Primary.Attributes _, err := config.clientContainer.Projects.Zones.Clusters.Get( - config.Project, attributes["zone"], attributes["name"]).Do() + config.Project, attributes["location"], attributes["name"]).Do() if err == nil { return fmt.Errorf("Cluster still exists") } @@ -1334,13 +1160,12 @@ func testAccContainerCluster_misc(name string) string { return fmt.Sprintf(` resource "google_container_cluster" "primary" { name = "%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true - # This uses zone/additional_zones over location/node_locations to ensure we can update from old -> new - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c" ] @@ -1348,8 +1173,8 @@ resource "google_container_cluster" "primary" { enable_kubernetes_alpha = true enable_legacy_abac = true - logging_service = "logging.googleapis.com/kubernetes" - monitoring_service = "monitoring.googleapis.com/kubernetes" + logging_service = "logging.googleapis.com" + monitoring_service = "monitoring.googleapis.com" resource_labels = { created-by = "terraform" @@ -1396,7 +1221,6 @@ resource "google_container_cluster" "primary" { addons_config { http_load_balancing { disabled = true } horizontal_pod_autoscaling { disabled = true } - kubernetes_dashboard { disabled = true } network_policy_config { disabled = true } } }`, clusterName) @@ -1411,7 +1235,6 @@ resource "google_container_cluster" "primary" { addons_config { http_load_balancing { disabled = false } - kubernetes_dashboard { disabled = false } horizontal_pod_autoscaling { disabled = false } network_policy_config { disabled = false } } @@ -1421,8 +1244,8 @@ resource "google_container_cluster" "primary" { func testAccContainerCluster_withMasterAuth(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_master_auth" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 master_auth { @@ -1435,8 +1258,8 @@ resource "google_container_cluster" "with_master_auth" { func testAccContainerCluster_updateMasterAuth(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_master_auth" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 master_auth { @@ -1449,8 +1272,8 @@ resource "google_container_cluster" "with_master_auth" { func testAccContainerCluster_disableMasterAuth(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_master_auth" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 master_auth { @@ -1463,8 +1286,8 @@ resource "google_container_cluster" "with_master_auth" { func testAccContainerCluster_withMasterAuthNoCert() string { return fmt.Sprintf(` resource "google_container_cluster" "with_master_auth_no_cert" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 3 master_auth { @@ -1480,8 +1303,8 @@ resource "google_container_cluster" "with_master_auth_no_cert" { func testAccContainerCluster_withNetworkPolicyEnabled(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_network_policy_enabled" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true @@ -1501,8 +1324,8 @@ resource "google_container_cluster" "with_network_policy_enabled" { func testAccContainerCluster_removeNetworkPolicy(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_network_policy_enabled" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true }`, clusterName) @@ -1511,8 +1334,8 @@ resource "google_container_cluster" "with_network_policy_enabled" { func testAccContainerCluster_withNetworkPolicyDisabled(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_network_policy_enabled" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true @@ -1523,8 +1346,8 @@ resource "google_container_cluster" "with_network_policy_enabled" { func testAccContainerCluster_withNetworkPolicyConfigDisabled(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_network_policy_enabled" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 remove_default_node_pool = true @@ -1586,8 +1409,8 @@ resource "google_container_cluster" "regional" { func testAccContainerCluster_regionalWithNodePool(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "regional" { - name = "%s" - region = "us-central1" + name = "%s" + location = "us-central1" node_pool { name = "%s" @@ -1595,24 +1418,23 @@ resource "google_container_cluster" "regional" { }`, cluster, nodePool) } -// This uses region/additional_zones over location/node_locations to ensure we can update from old -> new -func testAccContainerCluster_regionalAdditionalZones(clusterName string) string { +func testAccContainerCluster_regionalNodeLocations(clusterName string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_additional_zones" { - name = "%s" - region = "us-central1" +resource "google_container_cluster" "with_node_locations" { + name = "%s" + location = "us-central1" initial_node_count = 1 - additional_zones = [ + node_locations = [ "us-central1-f", "us-central1-c", ] }`, clusterName) } -func testAccContainerCluster_regionalUpdateAdditionalZones(clusterName string) string { +func testAccContainerCluster_regionalUpdateNodeLocations(clusterName string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_additional_zones" { +resource "google_container_cluster" "with_node_locations" { name = "%s" location = "us-central1" initial_node_count = 1 @@ -1627,12 +1449,12 @@ resource "google_container_cluster" "with_additional_zones" { func testAccContainerCluster_withVersion(clusterName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_version" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" initial_node_count = 1 }`, clusterName) @@ -1641,12 +1463,12 @@ resource "google_container_cluster" "with_version" { func testAccContainerCluster_withLowerVersion(clusterName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_version" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.valid_master_versions.2}" initial_node_count = 1 }`, clusterName) @@ -1655,12 +1477,12 @@ resource "google_container_cluster" "with_version" { func testAccContainerCluster_updateVersion(clusterName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_version" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.valid_master_versions.1}" node_version = "${data.google_container_engine_versions.central1a.valid_node_versions.1}" initial_node_count = 1 @@ -1670,8 +1492,8 @@ resource "google_container_cluster" "with_version" { func testAccContainerCluster_withNodeConfig(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_config" { - name = "%s" - zone = "us-central1-f" + name = "%s" + location = "us-central1-f" initial_node_count = 1 node_config { @@ -1718,8 +1540,8 @@ resource "google_container_cluster" "with_node_config" { func testAccContainerCluster_withNodeConfigUpdate(clusterName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_config" { - name = "%s" - zone = "us-central1-f" + name = "%s" + location = "us-central1-f" initial_node_count = 1 node_config { @@ -1766,8 +1588,8 @@ resource "google_container_cluster" "with_node_config" { func testAccContainerCluster_withNodeConfigScopeAlias() string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_config_scope_alias" { - name = "cluster-test-%s" - zone = "us-central1-f" + name = "cluster-test-%s" + location = "us-central1-f" initial_node_count = 1 node_config { @@ -1782,7 +1604,7 @@ func testAccContainerCluster_withNodeConfigShieldedInstanceConfig(clusterName st return fmt.Sprintf(` resource "google_container_cluster" "with_node_config" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 1 node_config { @@ -1827,16 +1649,16 @@ resource "google_compute_network" "container_network" { } resource "google_container_cluster" "with_net_ref_by_url" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.self_link}" } resource "google_container_cluster" "with_net_ref_by_name" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.name}" @@ -1866,10 +1688,10 @@ resource "google_compute_http_health_check" "default" { resource "google_container_cluster" "primary" { name = "terraform-test-%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 3 - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c", ] @@ -1889,8 +1711,8 @@ resource "google_container_cluster" "primary" { func testAccContainerCluster_withNodePoolBasic(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" node_pool { name = "%s" @@ -1902,12 +1724,12 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolLowerVersion(cluster, nodePool string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.valid_master_versions.1}" @@ -1922,12 +1744,12 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolUpdateVersion(cluster, nodePool string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" min_master_version = "${data.google_container_engine_versions.central1a.valid_master_versions.1}" @@ -1939,13 +1761,13 @@ resource "google_container_cluster" "with_node_pool" { }`, cluster, nodePool) } -func testAccContainerCluster_withNodePoolAdditionalZones(cluster, nodePool string) string { +func testAccContainerCluster_withNodePoolNodeLocations(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c" ] @@ -1960,10 +1782,10 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolResize(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c" ] @@ -1978,8 +1800,8 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" node_pool { name = "%s" @@ -1995,8 +1817,8 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolUpdateAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" node_pool { name = "%s" @@ -2012,8 +1834,8 @@ resource "google_container_cluster" "with_node_pool" { func testAccContainerCluster_withNodePoolNamePrefix() string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool_name_prefix" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" node_pool { name_prefix = "tf-np-test" @@ -2025,8 +1847,8 @@ resource "google_container_cluster" "with_node_pool_name_prefix" { func testAccContainerCluster_withNodePoolMultiple() string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool_multiple" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" node_pool { name = "tf-cluster-nodepool-test-%s" @@ -2043,8 +1865,8 @@ resource "google_container_cluster" "with_node_pool_multiple" { func testAccContainerCluster_withNodePoolConflictingNameFields() string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool_multiple" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" node_pool { # ERROR: name and name_prefix cannot be both specified @@ -2059,8 +1881,8 @@ func testAccContainerCluster_withNodePoolNodeConfig() string { testId := acctest.RandString(10) return fmt.Sprintf(` resource "google_container_cluster" "with_node_pool_node_config" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" node_pool { name = "tf-cluster-nodepool-test-%s" node_count = 2 @@ -2104,8 +1926,8 @@ func testAccContainerCluster_withMaintenanceWindow(clusterName string, startTime return fmt.Sprintf(` resource "google_container_cluster" "with_maintenance_window" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 %s @@ -2120,31 +1942,31 @@ resource "google_compute_network" "container_network" { } resource "google_compute_subnetwork" "container_subnetwork" { - name = "${google_compute_network.container_network.name}" - network = "${google_compute_network.container_network.name}" + name = "${google_compute_network.container_network.name}" + network = "${google_compute_network.container_network.name}" + region = "us-central1" + ip_cidr_range = "10.0.0.0/24" - region = "us-central1" secondary_ip_range { - range_name = "pods" + range_name = "pods" ip_cidr_range = "10.1.0.0/16" } secondary_ip_range { - range_name = "services" + range_name = "services" ip_cidr_range = "10.2.0.0/20" } } resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" network = "${google_compute_network.container_network.name}" subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" initial_node_count = 1 ip_allocation_policy { - use_ip_aliases = true cluster_secondary_range_name = "pods" services_secondary_range_name = "services" } @@ -2158,18 +1980,24 @@ resource "google_compute_network" "container_network" { auto_create_subnetworks = false } +resource "google_compute_subnetwork" "container_subnetwork" { + name = "${google_compute_network.container_network.name}" + network = "${google_compute_network.container_network.name}" + region = "us-central1" + + ip_cidr_range = "10.2.0.0/16" +} + resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" network = "${google_compute_network.container_network.name}" + subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" initial_node_count = 1 ip_allocation_policy { - use_ip_aliases = true - create_subnetwork = true cluster_ipv4_cidr_block = "10.0.0.0/16" services_ipv4_cidr_block = "10.1.0.0/16" - node_ipv4_cidr_block = "10.2.0.0/16" } }`, cluster, cluster) } @@ -2182,92 +2010,24 @@ resource "google_compute_network" "container_network" { } resource "google_compute_subnetwork" "container_subnetwork" { - name = "${google_compute_network.container_network.name}" - network = "${google_compute_network.container_network.name}" - ip_cidr_range = "10.0.0.0/24" - region = "us-central1" + name = "${google_compute_network.container_network.name}" + network = "${google_compute_network.container_network.name}" + region = "us-central1" + + ip_cidr_range = "10.2.0.0/16" } resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - zone = "us-central1-a" - - network = "${google_compute_network.container_network.name}" + name = "%s" + location = "us-central1-a" + network = "${google_compute_network.container_network.name}" + subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" initial_node_count = 1 ip_allocation_policy { - use_ip_aliases = true - create_subnetwork = true - subnetwork_name = "tf-test-%s" cluster_ipv4_cidr_block = "/16" services_ipv4_cidr_block = "/22" - node_ipv4_cidr_block = "/22" } -}`, cluster, cluster, cluster) -} - -func testAccContainerCluster_withIPAllocationPolicy_createSubnetwork(cluster string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-network" - auto_create_subnetworks = false -} - -resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - location = "us-central1-a" - network = "${google_compute_network.container_network.name}" - - initial_node_count = 1 - - ip_allocation_policy { - use_ip_aliases = true - create_subnetwork = true - subnetwork_name = "%s-subnet" - cluster_ipv4_cidr_block = "10.0.0.0/16" - services_ipv4_cidr_block = "10.1.0.0/16" - node_ipv4_cidr_block = "10.2.0.0/16" - } -}`, cluster, cluster, cluster) -} - -func testAccContainerCluster_withIPAllocationPolicy_createSubnetworkUpdated(cluster string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-network" - auto_create_subnetworks = false -} - -resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - location = "us-central1-a" - network = "${google_compute_network.container_network.name}" - subnetwork = "%s-subnet" - - initial_node_count = 1 - - ip_allocation_policy { - use_ip_aliases = true - cluster_ipv4_cidr_block = "10.0.0.0/16" - services_ipv4_cidr_block = "10.1.0.0/16" - } -}`, cluster, cluster, cluster) -} - -func testAccContainerCluster_withIPAllocationPolicy_explicitEmpty(cluster string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-network" - auto_create_subnetworks = false -} - -resource "google_container_cluster" "with_ip_allocation_policy" { - name = "%s" - zone = "us-central1-a" - - initial_node_count = 1 - - ip_allocation_policy = [] }`, cluster, cluster) } @@ -2282,7 +2042,7 @@ resource "google_compute_subnetwork" "container_subnetwork" { name = "${google_compute_network.container_network.name}" network = "${google_compute_network.container_network.name}" ip_cidr_range = "10.0.36.0/24" - region = "us-central1" + region = "us-central1" private_ip_google_access = true secondary_ip_range { @@ -2297,8 +2057,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "with_private_cluster" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.name}" @@ -2327,7 +2087,7 @@ resource "google_compute_subnetwork" "container_subnetwork" { name = "${google_compute_network.container_network.name}" network = "${google_compute_network.container_network.name}" ip_cidr_range = "10.0.36.0/24" - region = "us-central1" + region = "us-central1" private_ip_google_access = true secondary_ip_range { @@ -2342,8 +2102,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "with_private_cluster" { - name = "cluster-test-%s" - zone = "us-central1-a" + name = "cluster-test-%s" + location = "us-central1-a" initial_node_count = 1 network = "${google_compute_network.container_network.name}" @@ -2376,8 +2136,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "cidr_error_preempt" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" network = "${google_compute_network.container_network.name}" subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" @@ -2397,8 +2157,8 @@ func testAccContainerCluster_withCIDROverlap(initConfig, secondCluster string) s %s resource "google_container_cluster" "cidr_error_overlap" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" network = "${google_compute_network.container_network.name}" subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" @@ -2416,8 +2176,8 @@ resource "google_container_cluster" "cidr_error_overlap" { func testAccContainerCluster_withInvalidLocation(location string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_resource_labels" { - name = "invalid-gke-cluster" - zone = "%s" + name = "invalid-gke-cluster" + location = "%s" initial_node_count = 1 } `, location) @@ -2426,45 +2186,47 @@ resource "google_container_cluster" "with_resource_labels" { func testAccContainerCluster_withMasterAuthorizedNetworksDisabled(clusterName string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { - name = "container-net-%s" - auto_create_subnetworks = false + name = "container-net-%s" + auto_create_subnetworks = false } resource "google_compute_subnetwork" "container_subnetwork" { - name = "${google_compute_network.container_network.name}" - network = "${google_compute_network.container_network.name}" - ip_cidr_range = "10.0.36.0/24" - region = "us-central1" - private_ip_google_access = true - - secondary_ip_range { - range_name = "pod" - ip_cidr_range = "10.0.0.0/19" - } + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } - secondary_ip_range { - range_name = "svc" - ip_cidr_range = "10.0.32.0/22" - } + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } } resource "google_container_cluster" "with_private_cluster" { - name = "cluster-test-%s" - zone = "us-central1-a" - initial_node_count = 1 + name = "cluster-test-%s" + location = "us-central1-a" + initial_node_count = 1 - network = "${google_compute_network.container_network.name}" - subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name - private_cluster_config { - enable_private_endpoint = false - enable_private_nodes = true - master_ipv4_cidr_block = "10.42.0.0/28" - } + private_cluster_config { + enable_private_endpoint = false + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } - ip_allocation_policy { - cluster_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.0.range_name}" - services_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.1.range_name}" - } -}`, clusterName, clusterName) + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } +} + +`, clusterName, clusterName) } diff --git a/google/resource_container_node_pool.go b/google/resource_container_node_pool.go index 00679fdc9cd..8dd913224bf 100644 --- a/google/resource_container_node_pool.go +++ b/google/resource_container_node_pool.go @@ -53,18 +53,14 @@ func resourceContainerNodePool() *schema.Resource { ForceNew: true, }, "zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "use location instead", - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Removed: "use location instead", }, "region": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "use location instead", - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Removed: "use location instead", }, "location": { Type: schema.TypeString, @@ -260,7 +256,7 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e } timeout -= time.Since(startTime) - d.SetId(fmt.Sprintf("%s/%s/%s", nodePoolInfo.location, nodePoolInfo.cluster, nodePool.Name)) + d.SetId(fmt.Sprintf("projects/%s/locations/%s/clusters/%s/nodePools/%s", nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, nodePool.Name)) waitErr := containerOperationWait(config, operation, nodePoolInfo.project, @@ -314,12 +310,6 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err d.Set(k, v) } - if isZone(nodePoolInfo.location) { - d.Set("zone", nodePoolInfo.location) - } else { - d.Set("region", nodePoolInfo.location) - } - d.Set("location", nodePoolInfo.location) d.Set("project", nodePoolInfo.project) @@ -415,39 +405,16 @@ func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) ( } func resourceContainerNodePoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - - switch len(parts) { - case 3: - location := parts[0] - if isZone(location) { - d.Set("zone", location) - } else { - d.Set("region", location) - } - - d.Set("location", location) - d.Set("cluster", parts[1]) - d.Set("name", parts[2]) - case 4: - d.Set("project", parts[0]) - - location := parts[1] - if isZone(location) { - d.Set("zone", location) - } else { - d.Set("region", location) - } - - d.Set("location", location) - d.Set("cluster", parts[2]) - d.Set("name", parts[3]) + config := meta.(*Config) + if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)/nodePools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } - // override the inputted ID with the // format - d.SetId(strings.Join(parts[1:], "/")) - default: - return nil, fmt.Errorf("Invalid container cluster specifier. Expecting {location}/{cluster}/{name} or {project}/{location}/{cluster}/{name}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/nodePools/{{name}}") + if err != nil { + return nil, err } + d.SetId(id) return []*schema.ResourceData{d}, nil } @@ -763,5 +730,6 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node func getNodePoolName(id string) string { // name can be specified with name, name_prefix, or neither, so read it from the id. - return strings.Split(id, "/")[2] + splits := strings.Split(id, "/") + return splits[len(splits)-1] } diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go index 548c512af8d..bf4c7d96d45 100644 --- a/google/resource_container_node_pool_test.go +++ b/google/resource_container_node_pool_test.go @@ -546,17 +546,17 @@ func testAccCheckContainerNodePoolDestroy(s *terraform.State) error { } attributes := rs.Primary.Attributes - zone := attributes["zone"] + location := attributes["location"] var err error - if zone != "" { + if location != "" { _, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get( - config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() + config.Project, attributes["location"], attributes["cluster"], attributes["name"]).Do() } else { name := fmt.Sprintf( "projects/%s/locations/%s/clusters/%s/nodePools/%s", config.Project, - attributes["region"], + attributes["location"], attributes["cluster"], attributes["name"], ) @@ -613,8 +613,8 @@ resource "google_compute_subnetwork" "container_subnetwork" { } resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 network = "${google_compute_network.container_network.name}" @@ -635,8 +635,8 @@ resource "google_container_cluster" "cluster" { } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" max_pods_per_node = 30 initial_node_count = 2 @@ -662,13 +662,13 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_namePrefix(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 } resource "google_container_node_pool" "np" { name_prefix = "%s" - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 }`, cluster, np) @@ -677,13 +677,13 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_noName(cluster string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 } resource "google_container_node_pool" "np" { - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 }`, cluster) @@ -692,14 +692,14 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_regionalAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - region = "us-central1" + name = "%s" + location = "us-central1" initial_node_count = 3 } resource "google_container_node_pool" "np" { - name = "%s" - region = "us-central1" + name = "%s" + location = "us-central1" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 autoscaling { @@ -712,14 +712,14 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_autoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 autoscaling { @@ -732,14 +732,14 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_updateAutoscaling(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 3 } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 2 autoscaling { @@ -749,23 +749,22 @@ resource "google_container_node_pool" "np" { }`, cluster, np) } -// This uses zone/additional_zones over location/node_locations to ensure we can update from old -> new func testAccContainerNodePool_additionalZones(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 - additional_zones = [ + node_locations = [ "us-central1-b", "us-central1-c" ] } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" node_count = 2 }`, cluster, nodePool) @@ -796,13 +795,13 @@ func testAccContainerNodePool_withManagement(cluster, nodePool, management strin return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-a" + location = "us-central1-a" initial_node_count = 1 } resource "google_container_node_pool" "np_with_management" { name = "%s" - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -819,13 +818,13 @@ resource "google_container_node_pool" "np_with_management" { func testAccContainerNodePool_withNodeConfig(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 } resource "google_container_node_pool" "np_with_node_config" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -861,13 +860,13 @@ resource "google_container_node_pool" "np_with_node_config" { func testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 } resource "google_container_node_pool" "np_with_node_config" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -903,20 +902,20 @@ resource "google_container_node_pool" "np_with_node_config" { func testAccContainerNodePool_withGPU() string { return fmt.Sprintf(` data "google_container_engine_versions" "central1c" { - zone = "us-central1-c" + location = "us-central1-c" } resource "google_container_cluster" "cluster" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-c" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-c" initial_node_count = 1 node_version = "${data.google_container_engine_versions.central1c.latest_node_version}" min_master_version = "${data.google_container_engine_versions.central1c.latest_master_version}" } resource "google_container_node_pool" "np_with_gpu" { - name = "tf-nodepool-test-%s" - zone = "us-central1-c" + name = "tf-nodepool-test-%s" + location = "us-central1-c" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -949,13 +948,13 @@ resource "google_container_node_pool" "np_with_gpu" { func testAccContainerNodePool_withNodeConfigScopeAlias() string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { - name = "tf-cluster-nodepool-test-%s" - zone = "us-central1-a" + name = "tf-cluster-nodepool-test-%s" + location = "us-central1-a" initial_node_count = 1 } resource "google_container_node_pool" "np_with_node_config_scope_alias" { name = "tf-nodepool-test-%s" - zone = "us-central1-a" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 node_config { @@ -969,19 +968,19 @@ resource "google_container_node_pool" "np_with_node_config_scope_alias" { func testAccContainerNodePool_version(cluster, np string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -992,19 +991,19 @@ resource "google_container_node_pool" "np" { func testAccContainerNodePool_updateVersion(cluster, np string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { - zone = "us-central1-a" + location = "us-central1-a" } resource "google_container_cluster" "cluster" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}" } resource "google_container_node_pool" "np" { - name = "%s" - zone = "us-central1-a" + name = "%s" + location = "us-central1-a" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1016,13 +1015,13 @@ func testAccContainerNodePool_012_ConfigModeAttr1(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1039,13 +1038,13 @@ func testAccContainerNodePool_012_ConfigModeAttr2(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1059,13 +1058,13 @@ func testAccContainerNodePool_EmptyGuestAccelerator(cluster, np string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1082,13 +1081,13 @@ func testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np string, c return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 @@ -1110,13 +1109,13 @@ func testAccContainerNodePool_PartialEmptyGuestAccelerator2(cluster, np string) return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" initial_node_count = 3 } resource "google_container_node_pool" "np" { name = "%s" - zone = "us-central1-f" + location = "us-central1-f" cluster = "${google_container_cluster.cluster.name}" initial_node_count = 1 diff --git a/google/resource_dataproc_cluster.go b/google/resource_dataproc_cluster.go index 46a1c694a31..e4db30de6f3 100644 --- a/google/resource_dataproc_cluster.go +++ b/google/resource_dataproc_cluster.go @@ -15,7 +15,43 @@ import ( dataproc "google.golang.org/api/dataproc/v1beta2" ) -var resolveDataprocImageVersion = regexp.MustCompile(`(?P[^\s.-]+)\.(?P[^\s.-]+)(?:\.(?P[^\s.-]+))?(?:\-(?P[^\s.-]+))?`) +var ( + resolveDataprocImageVersion = regexp.MustCompile(`(?P[^\s.-]+)\.(?P[^\s.-]+)(?:\.(?P[^\s.-]+))?(?:\-(?P[^\s.-]+))?`) + + gceClusterConfigKeys = []string{ + "cluster_config.0.gce_cluster_config.0.zone", + "cluster_config.0.gce_cluster_config.0.network", + "cluster_config.0.gce_cluster_config.0.subnetwork", + "cluster_config.0.gce_cluster_config.0.tags", + "cluster_config.0.gce_cluster_config.0.service_account", + "cluster_config.0.gce_cluster_config.0.service_account_scopes", + "cluster_config.0.gce_cluster_config.0.internal_ip_only", + "cluster_config.0.gce_cluster_config.0.metadata", + } + + preemptibleWorkerDiskConfigKeys = []string{ + "cluster_config.0.preemptible_worker_config.0.disk_config.0.num_local_ssds", + "cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_size_gb", + "cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_type", + } + + clusterSoftwareConfigKeys = []string{ + "cluster_config.0.software_config.0.image_version", + "cluster_config.0.software_config.0.override_properties", + "cluster_config.0.software_config.0.optional_components", + } + + clusterConfigKeys = []string{ + "cluster_config.0.staging_bucket", + "cluster_config.0.gce_cluster_config", + "cluster_config.0.master_config", + "cluster_config.0.worker_config", + "cluster_config.0.preemptible_worker_config", + "cluster_config.0.software_config", + "cluster_config.0.initialization_action", + "cluster_config.0.encryption_config", + } +) func resourceDataprocCluster() *schema.Resource { return &schema.Resource{ @@ -90,18 +126,11 @@ func resourceDataprocCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "delete_autogen_bucket": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Removed: "If you need a bucket that can be deleted, please create" + - "a new one and set the `staging_bucket` field", - }, - "staging_bucket": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + ForceNew: true, }, // If the user does not specify a staging bucket, GCP will allocate one automatically. // The staging_bucket field provides a way for the user to supply their own @@ -114,24 +143,27 @@ func resourceDataprocCluster() *schema.Resource { }, "gce_cluster_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + Computed: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, }, "network": { Type: schema.TypeString, Optional: true, Computed: true, + AtLeastOneOf: gceClusterConfigKeys, ForceNew: true, ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.subnetwork"}, DiffSuppressFunc: compareSelfLinkOrResourceName, @@ -140,29 +172,33 @@ func resourceDataprocCluster() *schema.Resource { "subnetwork": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: gceClusterConfigKeys, ForceNew: true, ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.network"}, DiffSuppressFunc: compareSelfLinkOrResourceName, }, "tags": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, }, "service_account_scopes": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeSet, + Optional: true, + Computed: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, Elem: &schema.Schema{ Type: schema.TypeString, StateFunc: func(v interface{}) string { @@ -173,36 +209,43 @@ func resourceDataprocCluster() *schema.Resource { }, "internal_ip_only": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: gceClusterConfigKeys, + ForceNew: true, + Default: false, }, "metadata": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: gceClusterConfigKeys, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, }, }, }, }, - "master_config": instanceConfigSchema(), - "worker_config": instanceConfigSchema(), + "master_config": instanceConfigSchema("master_config"), + "worker_config": instanceConfigSchema("worker_config"), // preemptible_worker_config has a slightly different config "preemptible_worker_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + Computed: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "num_instances": { Type: schema.TypeInt, Optional: true, Computed: true, + AtLeastOneOf: []string{ + "cluster_config.0.preemptible_worker_config.0.num_instances", + "cluster_config.0.preemptible_worker_config.0.disk_config", + }, }, // API does not honour this if set ... @@ -212,21 +255,27 @@ func resourceDataprocCluster() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, + AtLeastOneOf: []string{ + "cluster_config.0.preemptible_worker_config.0.num_instances", + "cluster_config.0.preemptible_worker_config.0.disk_config", + }, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "num_local_ssds": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + AtLeastOneOf: preemptibleWorkerDiskConfigKeys, + ForceNew: true, }, "boot_disk_size_gb": { Type: schema.TypeInt, Optional: true, Computed: true, + AtLeastOneOf: preemptibleWorkerDiskConfigKeys, ForceNew: true, ValidateFunc: validation.IntAtLeast(10), }, @@ -234,6 +283,7 @@ func resourceDataprocCluster() *schema.Resource { "boot_disk_type": { Type: schema.TypeString, Optional: true, + AtLeastOneOf: preemptibleWorkerDiskConfigKeys, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false), Default: "pd-standard", @@ -252,10 +302,11 @@ func resourceDataprocCluster() *schema.Resource { }, "software_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + Computed: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -263,15 +314,17 @@ func resourceDataprocCluster() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, + AtLeastOneOf: clusterSoftwareConfigKeys, ForceNew: true, DiffSuppressFunc: dataprocImageVersionDiffSuppress, }, "override_properties": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: clusterSoftwareConfigKeys, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "properties": { @@ -289,8 +342,9 @@ func resourceDataprocCluster() *schema.Resource { // is overridden, this will be empty. "optional_components": { - Type: schema.TypeSet, - Optional: true, + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: clusterSoftwareConfigKeys, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"COMPONENT_UNSPECIFIED", "ANACONDA", "DRUID", "HIVE_WEBHCAT", @@ -302,9 +356,10 @@ func resourceDataprocCluster() *schema.Resource { }, "initialization_action": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "script": { @@ -323,9 +378,10 @@ func resourceDataprocCluster() *schema.Resource { }, }, "encryption_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "kms_key_name": { @@ -342,39 +398,52 @@ func resourceDataprocCluster() *schema.Resource { } } -func instanceConfigSchema() *schema.Schema { +func instanceConfigSchema(parent string) *schema.Schema { + var instanceConfigKeys = []string{ + "cluster_config.0." + parent + ".0.num_instances", + "cluster_config.0." + parent + ".0.image_uri", + "cluster_config.0." + parent + ".0.machine_type", + "cluster_config.0." + parent + ".0.disk_config", + "cluster_config.0." + parent + ".0.accelerators", + } + return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: clusterConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "num_instances": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, }, "image_uri": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, + ForceNew: true, }, "machine_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, + ForceNew: true, }, "disk_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: instanceConfigKeys, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -382,20 +451,35 @@ func instanceConfigSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, Computed: true, + AtLeastOneOf: []string{ + "cluster_config.0." + parent + ".0.disk_config.0.num_local_ssds", + "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_size_gb", + "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_type", + }, ForceNew: true, }, "boot_disk_size_gb": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + AtLeastOneOf: []string{ + "cluster_config.0." + parent + ".0.disk_config.0.num_local_ssds", + "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_size_gb", + "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_type", + }, ForceNew: true, ValidateFunc: validation.IntAtLeast(10), }, "boot_disk_type": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{ + "cluster_config.0." + parent + ".0.disk_config.0.num_local_ssds", + "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_size_gb", + "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_type", + }, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false), Default: "pd-standard", @@ -406,10 +490,11 @@ func instanceConfigSchema() *schema.Schema { // Note: preemptible workers don't support accelerators "accelerators": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: acceleratorsSchema(), + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: instanceConfigKeys, + ForceNew: true, + Elem: acceleratorsSchema(), }, "instance_names": { @@ -477,7 +562,7 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error creating Dataproc cluster: %s", err) } - d.SetId(cluster.ClusterName) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/clusters/%s", project, region, cluster.ClusterName)) // Wait until it's created timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) diff --git a/google/resource_dataproc_cluster_test.go b/google/resource_dataproc_cluster_test.go index a5c29afb9a5..3981c6f6e88 100644 --- a/google/resource_dataproc_cluster_test.go +++ b/google/resource_dataproc_cluster_test.go @@ -6,6 +6,7 @@ import ( "reflect" "regexp" "strconv" + "strings" "testing" "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" @@ -623,8 +624,10 @@ func testAccCheckDataprocClusterDestroy() resource.TestCheckFunc { return err } + parts := strings.Split(rs.Primary.ID, "/") + clusterId := parts[len(parts)-1] _, err = config.clientDataprocBeta.Projects.Regions.Clusters.Get( - project, attributes["region"], rs.Primary.ID).Do() + project, attributes["region"], clusterId).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == http.StatusNotFound { @@ -779,14 +782,16 @@ func testAccCheckDataprocClusterExists(n string, cluster *dataproc.Cluster) reso return err } + parts := strings.Split(rs.Primary.ID, "/") + clusterId := parts[len(parts)-1] found, err := config.clientDataprocBeta.Projects.Regions.Clusters.Get( - project, rs.Primary.Attributes["region"], rs.Primary.ID).Do() + project, rs.Primary.Attributes["region"], clusterId).Do() if err != nil { return err } - if found.ClusterName != rs.Primary.ID { - return fmt.Errorf("Dataproc cluster %s not found, found %s instead", rs.Primary.ID, cluster.ClusterName) + if found.ClusterName != clusterId { + return fmt.Errorf("Dataproc cluster %s not found, found %s instead", clusterId, cluster.ClusterName) } *cluster = *found diff --git a/google/resource_dataproc_job.go b/google/resource_dataproc_job.go index ffc9658738f..e4ced4e41b6 100644 --- a/google/resource_dataproc_job.go +++ b/google/resource_dataproc_job.go @@ -3,6 +3,7 @@ package google import ( "fmt" "log" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" @@ -148,7 +149,7 @@ func resourceDataprocJob() *schema.Resource { "max_failures_per_hour": { Type: schema.TypeInt, Description: "Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.", - Optional: true, + Required: true, ForceNew: true, ValidateFunc: validation.IntAtMost(10), }, @@ -181,7 +182,6 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { return err } - jobConfCount := 0 clusterName := d.Get("placement.0.cluster_name").(string) region := d.Get("region").(string) @@ -204,52 +204,42 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { } if v, ok := d.GetOk("pyspark_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.PysparkJob = expandPySparkJob(config) } if v, ok := d.GetOk("spark_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.SparkJob = expandSparkJob(config) } if v, ok := d.GetOk("hadoop_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.HadoopJob = expandHadoopJob(config) } if v, ok := d.GetOk("hive_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.HiveJob = expandHiveJob(config) } if v, ok := d.GetOk("pig_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.PigJob = expandPigJob(config) } if v, ok := d.GetOk("sparksql_config"); ok { - jobConfCount++ config := extractFirstMapConfig(v.([]interface{})) submitReq.Job.SparkSqlJob = expandSparkSqlJob(config) } - if jobConfCount != 1 { - return fmt.Errorf("You must define and configure exactly one xxx_config block") - } - // Submit the job job, err := config.clientDataproc.Projects.Regions.Jobs.Submit( project, region, submitReq).Do() if err != nil { return err } - d.SetId(job.Reference.JobId) + d.SetId(fmt.Sprintf("projects/%s/regions/%s/jobs/%s", project, region, job.Reference.JobId)) timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) waitErr := dataprocJobOperationWait(config, region, project, job.Reference.JobId, @@ -271,10 +261,12 @@ func resourceDataprocJobRead(d *schema.ResourceData, meta interface{}) error { return err } + parts := strings.Split(d.Id(), "/") + jobId := parts[len(parts)-1] job, err := config.clientDataproc.Projects.Regions.Jobs.Get( - project, region, d.Id()).Do() + project, region, jobId).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Dataproc Job %q", d.Id())) + return handleNotFoundError(err, d, fmt.Sprintf("Dataproc Job %q", jobId)) } d.Set("force_delete", d.Get("force_delete")) @@ -320,15 +312,17 @@ func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { forceDelete := d.Get("force_delete").(bool) timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes()) + parts := strings.Split(d.Id(), "/") + jobId := parts[len(parts)-1] if forceDelete { log.Printf("[DEBUG] Attempting to first cancel Dataproc job %s if it's still running ...", d.Id()) // ignore error if we get one - job may be finished already and not need to // be cancelled. We do however wait for the state to be one that is // at least not active - _, _ = config.clientDataproc.Projects.Regions.Jobs.Cancel(project, region, d.Id(), &dataproc.CancelJobRequest{}).Do() + _, _ = config.clientDataproc.Projects.Regions.Jobs.Cancel(project, region, jobId, &dataproc.CancelJobRequest{}).Do() - waitErr := dataprocJobOperationWait(config, region, project, d.Id(), + waitErr := dataprocJobOperationWait(config, region, project, jobId, "Cancelling Dataproc job", timeoutInMinutes, 1) if waitErr != nil { return waitErr @@ -338,12 +332,12 @@ func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Deleting Dataproc job %s", d.Id()) _, err = config.clientDataproc.Projects.Regions.Jobs.Delete( - project, region, d.Id()).Do() + project, region, jobId).Do() if err != nil { return err } - waitErr := dataprocDeleteOperationWait(config, region, project, d.Id(), + waitErr := dataprocDeleteOperationWait(config, region, project, jobId, "Deleting Dataproc job", timeoutInMinutes, 1) if waitErr != nil { return waitErr @@ -368,7 +362,7 @@ var loggingConfig = &schema.Schema{ "driver_log_levels": { Type: schema.TypeMap, Description: "Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'.", - Optional: true, + Required: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -377,11 +371,11 @@ var loggingConfig = &schema.Schema{ } var pySparkSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"pyspark_config", "spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "main_python_file_uri": { @@ -494,26 +488,26 @@ func expandPySparkJob(config map[string]interface{}) *dataproc.PySparkJob { // ---- Spark Job ---- var sparkSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"pyspark_config", "spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main driver: can be only one of the class | jar_file "main_class": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"spark_config.0.main_jar_file_uri"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"spark_config.0.main_class", "spark_config.0.main_jar_file_uri"}, }, "main_jar_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"spark_config.0.main_class"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"spark_config.0.main_jar_file_uri", "spark_config.0.main_class"}, }, "args": { @@ -607,26 +601,26 @@ func expandSparkJob(config map[string]interface{}) *dataproc.SparkJob { // ---- Hadoop Job ---- var hadoopSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "pyspark_config", "hive_config", "pig_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main driver: can be only one of the main_class | main_jar_file_uri "main_class": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"hadoop_config.0.main_jar_file_uri"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"hadoop_config.0.main_jar_file_uri", "hadoop_config.0.main_class"}, }, "main_jar_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"hadoop_config.0.main_class"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"hadoop_config.0.main_jar_file_uri", "hadoop_config.0.main_class"}, }, "args": { @@ -720,27 +714,27 @@ func expandHadoopJob(config map[string]interface{}) *dataproc.HadoopJob { // ---- Hive Job ---- var hiveSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "pig_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main query: can be only one of query_list | query_file_uri "query_list": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"hive_config.0.query_file_uri"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"hive_config.0.query_file_uri", "hive_config.0.query_list"}, }, "query_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"hive_config.0.query_list"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"hive_config.0.query_file_uri", "hive_config.0.query_list"}, }, "continue_on_failure": { @@ -819,27 +813,27 @@ func expandHiveJob(config map[string]interface{}) *dataproc.HiveJob { // ---- Pig Job ---- var pigSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "sparksql_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main query: can be only one of query_list | query_file_uri "query_list": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"pig_config.0.query_file_uri"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"pig_config.0.query_file_uri", "pig_config.0.query_list"}, }, "query_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"pig_config.0.query_list"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"pig_config.0.query_file_uri", "pig_config.0.query_list"}, }, "continue_on_failure": { @@ -921,27 +915,27 @@ func expandPigJob(config map[string]interface{}) *dataproc.PigJob { // ---- Spark SQL Job ---- var sparkSqlSchema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ // main query: can be only one of query_list | query_file_uri "query_list": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ConflictsWith: []string{"pig_config.0.query_file_uri"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"sparksql_config.0.query_file_uri", "sparksql_config.0.query_list"}, }, "query_file_uri": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"pig_config.0.query_list"}, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"sparksql_config.0.query_file_uri", "sparksql_config.0.query_list"}, }, "script_variables": { diff --git a/google/resource_dataproc_job_test.go b/google/resource_dataproc_job_test.go index fcaa33bce42..b9f8bb04dff 100644 --- a/google/resource_dataproc_job_test.go +++ b/google/resource_dataproc_job_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "regexp" + // "regexp" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" @@ -22,21 +22,22 @@ type jobTestField struct { gcp_attr interface{} } -func TestAccDataprocJob_failForMissingJobConfig(t *testing.T) { - t.Parallel() - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDataprocJobDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDataprocJob_missingJobConf(), - ExpectError: regexp.MustCompile("You must define and configure exactly one xxx_config block"), - }, - }, - }) -} +// TODO (mbang): Test `ExactlyOneOf` here +// func TestAccDataprocJob_failForMissingJobConfig(t *testing.T) { +// t.Parallel() + +// resource.Test(t, resource.TestCase{ +// PreCheck: func() { testAccPreCheck(t) }, +// Providers: testAccProviders, +// CheckDestroy: testAccCheckDataprocJobDestroy, +// Steps: []resource.TestStep{ +// { +// Config: testAccDataprocJob_missingJobConf(), +// ExpectError: regexp.MustCompile("You must define and configure exactly one xxx_config block"), +// }, +// }, +// }) +// } func TestAccDataprocJob_updatable(t *testing.T) { t.Parallel() @@ -283,8 +284,10 @@ func testAccCheckDataprocJobDestroy(s *terraform.State) error { return err } + parts := strings.Split(rs.Primary.ID, "/") + job_id := parts[len(parts)-1] _, err = config.clientDataproc.Projects.Regions.Jobs.Get( - project, attributes["region"], rs.Primary.ID).Do() + project, attributes["region"], job_id).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { return nil @@ -367,7 +370,8 @@ func testAccCheckDataprocJobExists(n string, job *dataproc.Job) resource.TestChe } config := testAccProvider.Meta().(*Config) - jobId := s.RootModule().Resources[n].Primary.ID + parts := strings.Split(s.RootModule().Resources[n].Primary.ID, "/") + jobId := parts[len(parts)-1] project, err := getTestProject(s.RootModule().Resources[n].Primary, config) if err != nil { return err @@ -469,16 +473,17 @@ func testAccCheckDataprocJobAttrMatch(n, jobType string, job *dataproc.Job) reso } } -func testAccDataprocJob_missingJobConf() string { - return ` -resource "google_dataproc_job" "missing_config" { - placement { - cluster_name = "na" - } - - force_delete = true -}` -} +// TODO (mbang): Test `ExactlyOneOf` here +// func testAccDataprocJob_missingJobConf() string { +// return ` +// resource "google_dataproc_job" "missing_config" { +// placement { +// cluster_name = "na" +// } + +// force_delete = true +// }` +// } var singleNodeClusterConfig = ` resource "google_dataproc_cluster" "basic" { diff --git a/google/resource_dns_managed_zone.go b/google/resource_dns_managed_zone.go index 3b7740392dc..67473e8a7f1 100644 --- a/google/resource_dns_managed_zone.go +++ b/google/resource_dns_managed_zone.go @@ -116,13 +116,15 @@ to sign all other types of resource record sets.`, }, }, }, + AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, }, "kind": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Identifies what kind of resource this is`, - Default: "dns#managedZoneDnsSecConfig", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Identifies what kind of resource this is`, + Default: "dns#managedZoneDnsSecConfig", + AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, }, "non_existence": { Type: schema.TypeString, @@ -131,6 +133,7 @@ to sign all other types of resource record sets.`, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"nsec", "nsec3", ""}, false), Description: `Specifies the mechanism used to provide authenticated denial-of-existence responses.`, + AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, }, "state": { Type: schema.TypeString, @@ -138,6 +141,7 @@ to sign all other types of resource record sets.`, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"off", "on", "transfer", ""}, false), Description: `Specifies whether DNSSEC is enabled, and what mode it is in`, + AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, }, }, }, @@ -158,7 +162,7 @@ resources that the zone is visible from.`, Schema: map[string]*schema.Schema{ "networks": { Type: schema.TypeSet, - Optional: true, + Required: true, Description: `The list of VPC networks that can see this zone. Until the provider updates to use the Terraform 0.12 SDK in a future release, you may experience issues with this resource while updating. If you've defined a 'networks' block and add another 'networks' block while keeping the old block, Terraform will see an incorrect diff @@ -216,7 +220,7 @@ func dnsManagedZonePrivateVisibilityConfigNetworksSchema() *schema.Resource { Schema: map[string]*schema.Schema{ "network_url": { Type: schema.TypeString, - Optional: true, + Required: true, DiffSuppressFunc: compareSelfLinkOrResourceName, Description: `The fully qualified URL of the VPC network to bind to. This should be formatted like @@ -289,7 +293,7 @@ func resourceDNSManagedZoneCreate(d *schema.ResourceData, meta interface{}) erro } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/managedZones/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -436,7 +440,7 @@ func resourceDNSManagedZoneImport(d *schema.ResourceData, meta interface{}) ([]* } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/managedZones/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_dns_managed_zone_generated_test.go b/google/resource_dns_managed_zone_generated_test.go index 6ddcf12b526..248009736a9 100644 --- a/google/resource_dns_managed_zone_generated_test.go +++ b/google/resource_dns_managed_zone_generated_test.go @@ -51,8 +51,8 @@ func TestAccDNSManagedZone_dnsManagedZoneBasicExample(t *testing.T) { func testAccDNSManagedZone_dnsManagedZoneBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_dns_managed_zone" "example-zone" { - name = "example-zone" - dns_name = "example-${random_id.rnd.hex}.com." + name = "example-zone" + dns_name = "example-${random_id.rnd.hex}.com." description = "Example DNS zone" labels = { foo = "bar" @@ -92,8 +92,8 @@ func TestAccDNSManagedZone_dnsManagedZonePrivateExample(t *testing.T) { func testAccDNSManagedZone_dnsManagedZonePrivateExample(context map[string]interface{}) string { return Nprintf(` resource "google_dns_managed_zone" "private-zone" { - name = "private-zone%{random_suffix}" - dns_name = "private.example.com." + name = "private-zone%{random_suffix}" + dns_name = "private.example.com." description = "Example private DNS zone" labels = { foo = "bar" @@ -103,21 +103,21 @@ resource "google_dns_managed_zone" "private-zone" { private_visibility_config { networks { - network_url = "${google_compute_network.network-1.self_link}" + network_url = google_compute_network.network-1.self_link } networks { - network_url = "${google_compute_network.network-2.self_link}" + network_url = google_compute_network.network-2.self_link } } } resource "google_compute_network" "network-1" { - name = "network-1%{random_suffix}" + name = "network-1%{random_suffix}" auto_create_subnetworks = false } resource "google_compute_network" "network-2" { - name = "network-2%{random_suffix}" + name = "network-2%{random_suffix}" auto_create_subnetworks = false } `, context) diff --git a/google/resource_endpoints_service.go b/google/resource_endpoints_service.go index 0411a050c70..b1fd2458415 100644 --- a/google/resource_endpoints_service.go +++ b/google/resource_endpoints_service.go @@ -35,12 +35,6 @@ func resourceEndpointsService() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "protoc_output": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Please use protoc_output_base64 instead.", - }, "protoc_output_base64": { Type: schema.TypeString, Optional: true, diff --git a/google/resource_filestore_instance.go b/google/resource_filestore_instance.go index 2ee792a7ba3..78fc6c9654f 100644 --- a/google/resource_filestore_instance.go +++ b/google/resource_filestore_instance.go @@ -212,7 +212,7 @@ func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) e } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/instances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -394,7 +394,7 @@ func resourceFilestoreInstanceImport(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_firestore_index_generated_test.go b/google/resource_firestore_index_generated_test.go index 1b4a8d892b2..ff0a506c1c5 100644 --- a/google/resource_firestore_index_generated_test.go +++ b/google/resource_firestore_index_generated_test.go @@ -53,7 +53,7 @@ func TestAccFirestoreIndex_firestoreIndexBasicExample(t *testing.T) { func testAccFirestoreIndex_firestoreIndexBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_firestore_index" "my-index" { - project = "%{project_id}" + project = "%{project_id}" collection = "chatrooms" diff --git a/google/resource_google_folder_organization_policy.go b/google/resource_google_folder_organization_policy.go index 275c6dc1273..6bed8a291b2 100644 --- a/google/resource_google_folder_organization_policy.go +++ b/google/resource_google_folder_organization_policy.go @@ -43,8 +43,8 @@ func resourceFolderOrgPolicyImporter(d *schema.ResourceData, meta interface{}) ( config := meta.(*Config) if err := parseImportId([]string{ - "folders/(?P[^/]+):constraints/(?P[^/]+)", - "(?P[^/]+):(?P[^/]+)"}, + "folders/(?P[^/]+)/constraints/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { return nil, err } @@ -59,7 +59,7 @@ func resourceFolderOrgPolicyImporter(d *schema.ResourceData, meta interface{}) ( } func resourceGoogleFolderOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%s:%s", d.Get("folder"), d.Get("constraint"))) + d.SetId(fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) if isOrganizationPolicyUnset(d) { return resourceGoogleFolderOrganizationPolicyDelete(d, meta) diff --git a/google/resource_google_organization_policy.go b/google/resource_google_organization_policy.go index b322d9e9425..66a4c72ec32 100644 --- a/google/resource_google_organization_policy.go +++ b/google/resource_google_organization_policy.go @@ -17,10 +17,10 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ DiffSuppressFunc: compareSelfLinkOrResourceName, }, "boolean_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"list_policy", "restore_policy"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{"list_policy", "boolean_policy", "restore_policy"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enforced": { @@ -31,10 +31,10 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ }, }, "list_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"boolean_policy", "restore_policy"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{"list_policy", "boolean_policy", "restore_policy"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "allow": { @@ -45,37 +45,40 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "all": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ConflictsWith: []string{"list_policy.0.allow.0.values"}, + Type: schema.TypeBool, + Optional: true, + Default: false, + ExactlyOneOf: []string{"list_policy.0.allow.0.all", "list_policy.0.allow.0.values"}, }, "values": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Type: schema.TypeSet, + Optional: true, + ExactlyOneOf: []string{"list_policy.0.allow.0.all", "list_policy.0.allow.0.values"}, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, }, }, }, }, "deny": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"list_policy.0.allow"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "all": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ConflictsWith: []string{"list_policy.0.deny.0.values"}, + Type: schema.TypeBool, + Optional: true, + Default: false, + ExactlyOneOf: []string{"list_policy.0.deny.0.all", "list_policy.0.deny.0.values"}, }, "values": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Type: schema.TypeSet, + Optional: true, + ExactlyOneOf: []string{"list_policy.0.deny.0.all", "list_policy.0.deny.0.values"}, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, }, }, }, @@ -106,10 +109,10 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ Computed: true, }, "restore_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"boolean_policy", "list_policy"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{"restore_policy", "boolean_policy", "list_policy"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "default": { @@ -152,7 +155,7 @@ func resourceGoogleOrganizationPolicy() *schema.Resource { } func resourceGoogleOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%s:%s", d.Get("org_id"), d.Get("constraint").(string))) + d.SetId(fmt.Sprintf("%s/%s", d.Get("org_id"), d.Get("constraint").(string))) if isOrganizationPolicyUnset(d) { return resourceGoogleOrganizationPolicyDelete(d, meta) @@ -221,9 +224,9 @@ func resourceGoogleOrganizationPolicyDelete(d *schema.ResourceData, meta interfa } func resourceGoogleOrganizationPolicyImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), ":") + parts := strings.SplitN(d.Id(), "/", 2) if len(parts) != 2 { - return nil, fmt.Errorf("Invalid id format. Expecting {org_id}:{constraint}, got '%s' instead.", d.Id()) + return nil, fmt.Errorf("Invalid id format. Expecting {org_id}/{constraint}, got '%s' instead.", d.Id()) } d.Set("org_id", parts[0]) diff --git a/google/resource_google_project.go b/google/resource_google_project.go index 5e03f413f2f..d6655836d17 100644 --- a/google/resource_google_project.go +++ b/google/resource_google_project.go @@ -1,6 +1,7 @@ package google import ( + "context" "fmt" "log" "regexp" @@ -13,6 +14,7 @@ import ( "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/googleapi" + "google.golang.org/api/serviceusage/v1" ) // resourceGoogleProject returns a *schema.Resource that allows a customer @@ -72,17 +74,6 @@ func resourceGoogleProject() *schema.Resource { Computed: true, StateFunc: parseFolderId, }, - "policy_data": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "Use the 'google_project_iam_policy' resource to define policies for a Google Project", - }, - "policy_etag": { - Type: schema.TypeString, - Computed: true, - Removed: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project", - }, "number": { Type: schema.TypeString, Computed: true, @@ -96,109 +87,6 @@ func resourceGoogleProject() *schema.Resource { Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "app_engine": { - Type: schema.TypeList, - Elem: appEngineResource(), - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - }, - } -} - -func appEngineResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "auth_domain": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "location_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "serving_status": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "feature_settings": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - Elem: appEngineFeatureSettingsResource(), - }, - "name": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "url_dispatch_rule": { - Type: schema.TypeList, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - Elem: appEngineURLDispatchRuleResource(), - }, - "code_bucket": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "default_hostname": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "default_bucket": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "gcr_domain": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - }, - } -} - -func appEngineURLDispatchRuleResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "domain": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "path": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - "service": { - Type: schema.TypeString, - Computed: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, - }, - } -} - -func appEngineFeatureSettingsResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "split_health_checks": { - Type: schema.TypeBool, - Optional: true, - Removed: "This field has been removed. Use the google_app_engine_application resource instead.", - }, }, } } @@ -236,7 +124,7 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error project.ProjectId, project.Name, err) } - d.SetId(pid) + d.SetId(fmt.Sprintf("projects/%s", pid)) // Wait for the operation to complete opAsMap, err := ConvertToMap(op) @@ -287,7 +175,8 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] p, err := readGoogleProject(d, config) if err != nil { @@ -306,10 +195,6 @@ func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { d.Set("name", p.Name) d.Set("labels", p.Labels) - // We get app_engine.#: "" => "" without this set - // Remove when app_engine field is removed from schema completely - d.Set("app_engine", nil) - if p.Parent != nil { switch p.Parent.Type { case "organization": @@ -387,7 +272,8 @@ func parseFolderId(v interface{}) string { func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] project_name := d.Get("name").(string) // Read the project @@ -464,7 +350,8 @@ func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error config := meta.(*Config) // Only delete projects if skip_delete isn't set if !d.Get("skip_delete").(bool) { - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] if err := retryTimeDuration(func() error { _, delErr := config.clientResourceManager.Projects.Delete(pid).Do() return delErr @@ -477,7 +364,8 @@ func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error } func resourceProjectImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] // Prevent importing via project number, this will cause issues later matched, err := regexp.MatchString("^\\d+$", pid) if err != nil { @@ -488,6 +376,9 @@ func resourceProjectImportState(d *schema.ResourceData, meta interface{}) ([]*sc return nil, fmt.Errorf("Error importing project %q, please use project_id", pid) } + // Ensure the id format includes projects/ + d.SetId(fmt.Sprintf("projects/%s", pid)) + // Explicitly set to default as a workaround for `ImportStateVerify` tests, and so that users // don't see a diff immediately after import. d.Set("auto_create_network", true) @@ -530,7 +421,8 @@ func forceDeleteComputeNetwork(d *schema.ResourceData, config *Config, projectId } func updateProjectBillingAccount(d *schema.ResourceData, config *Config) error { - pid := d.Id() + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] name := d.Get("billing_account").(string) ba := &cloudbilling.ProjectBillingInfo{} // If we're unlinking an existing billing account, an empty request does that, not an empty-string billing account. @@ -577,8 +469,10 @@ func deleteComputeNetwork(project, network string, config *Config) error { func readGoogleProject(d *schema.ResourceData, config *Config) (*cloudresourcemanager.Project, error) { var p *cloudresourcemanager.Project // Read the project + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] err := retryTimeDuration(func() (reqErr error) { - p, reqErr = config.clientResourceManager.Projects.Get(d.Id()).Do() + p, reqErr = config.clientResourceManager.Projects.Get(pid).Do() return reqErr }, d.Timeout(schema.TimeoutRead)) return p, err @@ -610,6 +504,76 @@ func enableServiceUsageProjectServices(services []string, project string, config return waitForServiceUsageEnabledServices(services, project, config, timeout) } +func doEnableServicesRequest(services []string, project string, config *Config, timeout time.Duration) error { + var op *serviceusage.Operation + + err := retryTimeDuration(func() error { + var rerr error + if len(services) == 1 { + // BatchEnable returns an error for a single item, so just enable + // using service endpoint. + name := fmt.Sprintf("projects/%s/services/%s", project, services[0]) + req := &serviceusage.EnableServiceRequest{} + op, rerr = config.clientServiceUsage.Services.Enable(name, req).Do() + } else { + // Batch enable for multiple services. + name := fmt.Sprintf("projects/%s", project) + req := &serviceusage.BatchEnableServicesRequest{ServiceIds: services} + op, rerr = config.clientServiceUsage.Services.BatchEnable(name, req).Do() + } + return handleServiceUsageRetryableError(rerr) + }, timeout) + if err != nil { + return errwrap.Wrapf("failed to send enable services request: {{err}}", err) + } + // Poll for the API to return + waitErr := serviceUsageOperationWait(config, op, fmt.Sprintf("Enable Project %q Services: %+v", project, services)) + if waitErr != nil { + return waitErr + } + return nil +} + +// Retrieve a project's services from the API +func listCurrentlyEnabledServices(project string, config *Config, timeout time.Duration) (map[string]struct{}, error) { + // Verify project for services still exists + p, err := config.clientResourceManager.Projects.Get(project).Do() + if err != nil { + return nil, err + } + if p.LifecycleState == "DELETE_REQUESTED" { + // Construct a 404 error for handleNotFoundError + return nil, &googleapi.Error{ + Code: 404, + Message: "Project deletion was requested", + } + } + + log.Printf("[DEBUG] Listing enabled services for project %s", project) + apiServices := make(map[string]struct{}) + err = retryTimeDuration(func() error { + ctx := context.Background() + return config.clientServiceUsage.Services. + List(fmt.Sprintf("projects/%s", project)). + Fields("services/name,nextPageToken"). + Filter("state:ENABLED"). + Pages(ctx, func(r *serviceusage.ListServicesResponse) error { + for _, v := range r.Services { + // services are returned as "projects/PROJECT/services/NAME" + name := GetResourceNameFromSelfLink(v.Name) + if _, ok := ignoredProjectServicesSet[name]; !ok { + apiServices[name] = struct{}{} + } + } + return nil + }) + }, timeout) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Failed to list enabled services for project %s: {{err}}", project), err) + } + return apiServices, nil +} + // waitForServiceUsageEnabledServices doesn't resend enable requests - it just // waits for service enablement status to propagate. Essentially, it waits until // all services show up as enabled when listing services on the project. diff --git a/google/resource_google_project_iam_policy.go b/google/resource_google_project_iam_policy.go index 5b3c9bab4f6..f44d707fb63 100644 --- a/google/resource_google_project_iam_policy.go +++ b/google/resource_google_project_iam_policy.go @@ -35,21 +35,6 @@ func resourceGoogleProjectIamPolicy() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "authoritative": { - Removed: "The authoritative field was removed. To ignore changes not managed by Terraform, use google_project_iam_binding and google_project_iam_member instead. See https://www.terraform.io/docs/providers/google/r/google_project_iam.html for more information.", - Type: schema.TypeBool, - Optional: true, - }, - "restore_policy": { - Removed: "This field was removed alongside the authoritative field. To ignore changes not managed by Terraform, use google_project_iam_binding and google_project_iam_member instead. See https://www.terraform.io/docs/providers/google/r/google_project_iam.html for more information.", - Type: schema.TypeString, - Computed: true, - }, - "disable_project": { - Removed: "This field was removed alongside the authoritative field. Use lifecycle.prevent_destroy instead.", - Type: schema.TypeBool, - Optional: true, - }, }, } } diff --git a/google/resource_google_project_service.go b/google/resource_google_project_service.go index 86c06e6f07f..fe1a79ccf6b 100644 --- a/google/resource_google_project_service.go +++ b/google/resource_google_project_service.go @@ -10,12 +10,15 @@ import ( "google.golang.org/api/serviceusage/v1" ) -var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"} - // These services can only be enabled as a side-effect of enabling other services, // so don't bother storing them in the config or using them for diffing. +var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"} var ignoredProjectServicesSet = golangSetFromStringSlice(ignoredProjectServices) +// Services that can't be user-specified but are otherwise valid. Renamed +// services should be added to this set during major releases. +var bannedProjectServices = []string{"bigquery-json.googleapis.com"} + // Service Renames // we expect when a service is renamed: // - both service names will continue to be able to be set @@ -41,7 +44,7 @@ var ignoredProjectServicesSet = golangSetFromStringSlice(ignoredProjectServices) // upon removal, we should disallow the old name from being used even if it's // not gone from the underlying API yet var renamedServices = map[string]string{ - "bigquery-json.googleapis.com": "bigquery.googleapis.com", // DEPRECATED FOR 3.0.0 + "bigquery-json.googleapis.com": "bigquery.googleapis.com", // DEPRECATED FOR 4.0.0. Originally for 3.0.0, but the migration did not happen server-side yet. } // renamedServices in reverse (new -> old) @@ -50,6 +53,8 @@ var renamedServicesByNewServiceNames = reverseStringMap(renamedServices) // renamedServices expressed as both old -> new and new -> old var renamedServicesByOldAndNewServiceNames = mergeStringMaps(renamedServices, renamedServicesByNewServiceNames) +const maxServiceUsageBatchSize = 20 + func resourceGoogleProjectService() *schema.Resource { return &schema.Resource{ Create: resourceGoogleProjectServiceCreate, @@ -73,7 +78,7 @@ func resourceGoogleProjectService() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: StringNotInSlice(ignoredProjectServices, false), + ValidateFunc: StringNotInSlice(append(ignoredProjectServices, bannedProjectServices...), false), }, "project": { Type: schema.TypeString, diff --git a/google/resource_google_project_service_test.go b/google/resource_google_project_service_test.go index 6d8dfd81ff3..08e1484c448 100644 --- a/google/resource_google_project_service_test.go +++ b/google/resource_google_project_service_test.go @@ -142,7 +142,7 @@ func TestAccProjectService_renamedService(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccProjectService_single("bigquery-json.googleapis.com", pid, pname, org), + Config: testAccProjectService_single("bigquery.googleapis.com", pid, pname, org), }, { ResourceName: "google_project_service.test", diff --git a/google/resource_google_project_services.go b/google/resource_google_project_services.go deleted file mode 100644 index 30c8a0eb11d..00000000000 --- a/google/resource_google_project_services.go +++ /dev/null @@ -1,335 +0,0 @@ -package google - -import ( - "context" - "fmt" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "google.golang.org/api/googleapi" - "google.golang.org/api/serviceusage/v1" - "log" - "strings" - "time" -) - -const maxServiceUsageBatchSize = 20 - -func resourceGoogleProjectServices() *schema.Resource { - return &schema.Resource{ - Create: resourceGoogleProjectServicesCreateUpdate, - Read: resourceGoogleProjectServicesRead, - Update: resourceGoogleProjectServicesCreateUpdate, - Delete: resourceGoogleProjectServicesDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - DeprecationMessage: "google_project_services is deprecated - many users reported " + - "issues with dependent services that were not resolvable. Please use google_project_service or the " + - "https://github.com/terraform-google-modules/terraform-google-project-factory/tree/master/modules/project_services" + - " module. It's recommended that you use a provider version of 2.13.0 or higher when you migrate so that requests are" + - " batched to the API, reducing the request rate. This resource will be removed in version 3.0.0.", - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Read: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - "services": { - Type: schema.TypeSet, - Required: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: StringNotInSlice(ignoredProjectServices, false), - }, - }, - "disable_on_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - } -} - -func resourceGoogleProjectServicesCreateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - // Get services from config - services, err := expandServiceUsageProjectServicesServices(d.Get("services"), d, config) - if err != nil { - return err - } - - log.Printf("[DEBUG]: Enabling Project Services for %s: %+v", d.Id(), services) - if err := setServiceUsageProjectEnabledServices(services, project, d, config); err != nil { - return fmt.Errorf("Error authoritatively enabling Project %s Services: %v", project, err) - } - log.Printf("[DEBUG]: Finished enabling Project Services for %s: %+v", d.Id(), services) - - d.SetId(project) - return resourceGoogleProjectServicesRead(d, meta) -} - -func resourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - enabledSet, err := listCurrentlyEnabledServices(d.Id(), config, d.Timeout(schema.TimeoutRead)) - if err != nil { - return err - } - - // use old services to set the correct renamed service names in state - s, _ := expandServiceUsageProjectServicesServices(d.Get("services"), d, config) - log.Printf("[DEBUG] Saw services in state on Read: %s ", s) - sset := golangSetFromStringSlice(s) - for ov, nv := range renamedServices { - _, ook := sset[ov] - _, nok := sset[nv] - - // preserve the values set in prior state if they're identical. If none - // were set, we delete the new value if it exists. By doing that that - // we only store the old value if the service is enabled, and no value - // if it isn't. - if ook && nok { - continue - } else if ook { - delete(enabledSet, nv) - } else if nok { - delete(enabledSet, ov) - } else { - delete(enabledSet, nv) - } - } - - services := stringSliceFromGolangSet(enabledSet) - - d.Set("project", d.Id()) - d.Set("services", flattenServiceUsageProjectServicesServices(services, d)) - - return nil -} - -func resourceGoogleProjectServicesDelete(d *schema.ResourceData, meta interface{}) error { - if disable := d.Get("disable_on_destroy"); !(disable.(bool)) { - log.Printf("[WARN] Project Services disable_on_destroy set to false, skip disabling services for %s.", d.Id()) - d.SetId("") - return nil - } - - config := meta.(*Config) - - // Get services from config - services, err := expandServiceUsageProjectServicesServices(d.Get("services"), d, config) - if err != nil { - return err - } - project := d.Id() - - log.Printf("[DEBUG]: Disabling Project Services %s: %+v", project, services) - for _, s := range services { - if err := disableServiceUsageProjectService(s, project, d, config, true); err != nil { - return fmt.Errorf("Unable to destroy google_project_services for %s: %s", d.Id(), err) - } - } - log.Printf("[DEBUG] Finished disabling Project Services %s: %+v", project, services) - - d.SetId("") - return nil -} - -// *Authoritatively* sets enabled services. -func setServiceUsageProjectEnabledServices(services []string, project string, d *schema.ResourceData, config *Config) error { - currentlyEnabled, err := listCurrentlyEnabledServices(project, config, d.Timeout(schema.TimeoutRead)) - if err != nil { - return err - } - - toEnable := map[string]struct{}{} - for _, srv := range services { - // We don't have to enable a service if it's already enabled. - if _, ok := currentlyEnabled[srv]; !ok { - toEnable[srv] = struct{}{} - } - } - - if len(toEnable) > 0 { - log.Printf("[DEBUG] Enabling services: %s", toEnable) - if err := BatchRequestEnableServices(toEnable, project, d, config); err != nil { - return fmt.Errorf("unable to enable Project Services %s (%+v): %s", project, services, err) - } - } else { - log.Printf("[DEBUG] No services to enable.") - } - - srvSet := golangSetFromStringSlice(services) - - srvSetWithRenames := map[string]struct{}{} - - // we'll always list both names for renamed services, so allow both forms if - // we see both. - for k := range srvSet { - srvSetWithRenames[k] = struct{}{} - if v, ok := renamedServicesByOldAndNewServiceNames[k]; ok { - srvSetWithRenames[v] = struct{}{} - } - } - - for srv := range currentlyEnabled { - // Disable any services that are currently enabled for project but are not - // in our list of acceptable services. - if _, ok := srvSetWithRenames[srv]; !ok { - // skip deleting services by their new names and prefer the old name. - if _, ok := renamedServicesByNewServiceNames[srv]; ok { - continue - } - - log.Printf("[DEBUG] Disabling project %s service %s", project, srv) - err := disableServiceUsageProjectService(srv, project, d, config, true) - if err != nil { - log.Printf("[DEBUG] Saw error %s deleting service %s", err, srv) - - // if we got the right error and the service is renamed, delete by the new name - if n, ok := renamedServices[srv]; ok && strings.Contains(err.Error(), "not found or permission denied.") { - log.Printf("[DEBUG] Failed to delete service %s, it doesn't exist. Trying %s", srv, n) - err = disableServiceUsageProjectService(n, project, d, config, true) - if err == nil { - return nil - } - } - - return fmt.Errorf("unable to disable unwanted Project Service %s %s): %s", project, srv, err) - } - } - } - return nil -} - -func doEnableServicesRequest(services []string, project string, config *Config, timeout time.Duration) error { - var op *serviceusage.Operation - - err := retryTimeDuration(func() error { - var rerr error - if len(services) == 1 { - // BatchEnable returns an error for a single item, so just enable - // using service endpoint. - name := fmt.Sprintf("projects/%s/services/%s", project, services[0]) - req := &serviceusage.EnableServiceRequest{} - op, rerr = config.clientServiceUsage.Services.Enable(name, req).Do() - } else { - // Batch enable for multiple services. - name := fmt.Sprintf("projects/%s", project) - req := &serviceusage.BatchEnableServicesRequest{ServiceIds: services} - op, rerr = config.clientServiceUsage.Services.BatchEnable(name, req).Do() - } - return handleServiceUsageRetryableError(rerr) - }, timeout) - if err != nil { - return errwrap.Wrapf("failed to send enable services request: {{err}}", err) - } - - // Poll for the API to return - waitErr := serviceUsageOperationWait(config, op, fmt.Sprintf("Enable Project %q Services: %+v", project, services)) - if waitErr != nil { - return waitErr - } - return nil -} - -func handleServiceUsageRetryableError(err error) error { - if err == nil { - return nil - } - if gerr, ok := err.(*googleapi.Error); ok { - if (gerr.Code == 400 || gerr.Code == 412) && gerr.Message == "Precondition check failed." { - return &googleapi.Error{ - Code: 503, - Message: "api returned \"precondition failed\" while enabling service", - } - } - } - return err -} - -func flattenServiceUsageProjectServicesServices(v interface{}, d *schema.ResourceData) interface{} { - if v == nil { - return v - } - if strV, ok := v.([]string); ok { - v = convertStringArrToInterface(strV) - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func expandServiceUsageProjectServicesServices(v interface{}, d TerraformResourceData, config *Config) ([]string, error) { - if v == nil { - return nil, nil - } - return convertStringArr(v.(*schema.Set).List()), nil -} - -// Retrieve a project's services from the API -// if a service has been renamed, this function will list both the old and new -// forms of the service. LIST responses are expected to return only the old or -// new form, but we'll always return both. -func listCurrentlyEnabledServices(project string, config *Config, timeout time.Duration) (map[string]struct{}, error) { - // Verify project for services still exists - p, err := config.clientResourceManager.Projects.Get(project).Do() - if err != nil { - return nil, err - } - if p.LifecycleState == "DELETE_REQUESTED" { - // Construct a 404 error for handleNotFoundError - return nil, &googleapi.Error{ - Code: 404, - Message: "Project deletion was requested", - } - } - - log.Printf("[DEBUG] Listing enabled services for project %s", project) - apiServices := make(map[string]struct{}) - err = retryTimeDuration(func() error { - ctx := context.Background() - return config.clientServiceUsage.Services. - List(fmt.Sprintf("projects/%s", project)). - Fields("services/name,nextPageToken"). - Filter("state:ENABLED"). - Pages(ctx, func(r *serviceusage.ListServicesResponse) error { - for _, v := range r.Services { - // services are returned as "projects/{{project}}/services/{{name}}" - name := GetResourceNameFromSelfLink(v.Name) - - // if name not in ignoredProjectServicesSet - if _, ok := ignoredProjectServicesSet[name]; !ok { - apiServices[name] = struct{}{} - - // if a service has been renamed, set both. We'll deal - // with setting the right values later. - if v, ok := renamedServicesByOldAndNewServiceNames[name]; ok { - log.Printf("[DEBUG] Adding service alias for %s to enabled services: %s", name, v) - apiServices[v] = struct{}{} - } - } - } - return nil - }) - }, timeout) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Failed to list enabled services for project %s: {{err}}", project), err) - } - return apiServices, nil -} diff --git a/google/resource_google_project_services_test.go b/google/resource_google_project_services_test.go deleted file mode 100644 index 3b351a1cef0..00000000000 --- a/google/resource_google_project_services_test.go +++ /dev/null @@ -1,440 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "testing" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" -) - -// Test that services can be enabled and disabled on a project -func TestAccProjectServices_basic(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - services1 := []string{"logging.googleapis.com", "cloudresourcemanager.googleapis.com"} - services2 := []string{"cloudresourcemanager.googleapis.com"} - oobService := "logging.googleapis.com" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with some services - { - Config: testAccProjectAssociateServicesBasic(services1, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services1, pid), - ), - }, - // Update services to remove one - { - Config: testAccProjectAssociateServicesBasic(services2, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services2, pid), - ), - }, - // Add a service out-of-band and ensure it is removed - { - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - if err := enableServiceUsageProjectServices([]string{oobService}, pid, config, time.Minute*20); err != nil { - t.Fatalf("Error enabling %q: %v", oobService, err) - } - }, - Config: testAccProjectAssociateServicesBasic(services2, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services2, pid), - ), - }, - { - ResourceName: "google_project_services.acceptance", - ImportState: true, - ImportStateId: pid, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - }, - }, - }) -} - -// Test that services are authoritative when a project has existing -// services not represented in config -func TestAccProjectServices_authoritative(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - services := []string{"cloudresourcemanager.googleapis.com"} - oobService := "logging.googleapis.com" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with no services - { - Config: testAccProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - ), - }, - // Add a service out-of-band, then apply a config that creates a service. - // It should remove the out-of-band service. - { - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - if err := enableServiceUsageProjectServices([]string{oobService}, pid, config, time.Minute*20); err != nil { - t.Fatalf("Error enabling %q: %v", oobService, err) - } - }, - Config: testAccProjectAssociateServicesBasic(services, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -// Test that services are authoritative when a project has existing -// services, some which are represented in the config and others -// that are not -func TestAccProjectServices_authoritative2(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - oobServices := []string{"logging.googleapis.com", "cloudresourcemanager.googleapis.com"} - services := []string{"logging.googleapis.com"} - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - // Create a new project with no services - { - Config: testAccProject_create(pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance", pid), - ), - }, - // Add a service out-of-band, then apply a config that creates a service. - // It should remove the out-of-band service. - { - PreConfig: func() { - config := testAccProvider.Meta().(*Config) - for _, s := range oobServices { - if err := enableServiceUsageProjectServices([]string{s}, pid, config, time.Minute*20); err != nil { - t.Fatalf("Error enabling %q: %v", s, err) - } - } - }, - Config: testAccProjectAssociateServicesBasic(services, pid, pname, org), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com) -// don't end up causing diffs when they are enabled as a side-effect of a different service's -// enablement. -func TestAccProjectServices_ignoreUnenablableServices(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - billingId := getTestBillingAccountFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - services := []string{ - "dataproc.googleapis.com", - // The following services are enabled as a side-effect of dataproc's enablement - "storage-component.googleapis.com", - "deploymentmanager.googleapis.com", - "replicapool.googleapis.com", - "replicapoolupdater.googleapis.com", - "resourceviews.googleapis.com", - "compute.googleapis.com", - "container.googleapis.com", - "containerregistry.googleapis.com", - "storage-api.googleapis.com", - "pubsub.googleapis.com", - "oslogin.googleapis.com", - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), - Check: resource.ComposeTestCheckFunc(testProjectServicesMatch(services, pid)), - }, - }, - }) -} - -func TestAccProjectServices_pagination(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - billingId := getTestBillingAccountFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - - // we need at least 50 services (doesn't matter what they are) to exercise the - // pagination handling code. - services := []string{ - "actions.googleapis.com", - "appengine.googleapis.com", - "appengineflex.googleapis.com", - "bigquery-json.googleapis.com", - "bigquerydatatransfer.googleapis.com", - "bigquerystorage.googleapis.com", - "bigtableadmin.googleapis.com", - "bigtabletableadmin.googleapis.com", - "cloudbuild.googleapis.com", - "clouderrorreporting.googleapis.com", - "cloudfunctions.googleapis.com", - "cloudiot.googleapis.com", - "cloudkms.googleapis.com", - "cloudmonitoring.googleapis.com", - "cloudresourcemanager.googleapis.com", - "cloudtrace.googleapis.com", - "compute.googleapis.com", - "container.googleapis.com", - "containerregistry.googleapis.com", - "dataflow.googleapis.com", - "dataproc.googleapis.com", - "datastore.googleapis.com", - "deploymentmanager.googleapis.com", - "dialogflow.googleapis.com", - "dns.googleapis.com", - "endpoints.googleapis.com", - "firebaserules.googleapis.com", - "firestore.googleapis.com", - "genomics.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "language.googleapis.com", - "logging.googleapis.com", - "ml.googleapis.com", - "monitoring.googleapis.com", - "oslogin.googleapis.com", - "pubsub.googleapis.com", - "replicapool.googleapis.com", - "replicapoolupdater.googleapis.com", - "resourceviews.googleapis.com", - "runtimeconfig.googleapis.com", - "servicecontrol.googleapis.com", - "servicemanagement.googleapis.com", - "sourcerepo.googleapis.com", - "spanner.googleapis.com", - "speech.googleapis.com", - "sql-component.googleapis.com", - "storage-api.googleapis.com", - "storage-component.googleapis.com", - "storagetransfer.googleapis.com", - "testing.googleapis.com", - "toolresults.googleapis.com", - "translate.googleapis.com", - "videointelligence.googleapis.com", - "vision.googleapis.com", - "zync.googleapis.com", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), - Check: resource.ComposeTestCheckFunc( - testProjectServicesMatch(services, pid), - ), - }, - }, - }) -} - -func TestAccProjectServices_renamedServices(t *testing.T) { - t.Parallel() - - org := getTestOrgFromEnv(t) - pid := "terraform-" + acctest.RandString(10) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - // create new - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // transition to old - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // transition to new - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // remove new - Config: testAccProjectAssociateServicesBasic([]string{ - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // create both - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery.googleapis.com", - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // remove new - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // import imports old - ResourceName: "google_project_services.acceptance", - ImportState: true, - ImportStateId: pid, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - }, - { - // transition to both - Config: testAccProjectAssociateServicesBasic([]string{ - "bigquery.googleapis.com", - "bigquery-json.googleapis.com", - "bigquerystorage.googleapis.com", - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - { - // remove both - Config: testAccProjectAssociateServicesBasic([]string{ - "iam.googleapis.com", - "iamcredentials.googleapis.com", - "oslogin.googleapis.com", - }, pid, pname, org), - }, - }, - }) -} - -func testAccProjectAssociateServicesBasic(services []string, pid, name, org string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" -} -resource "google_project_services" "acceptance" { - project = "${google_project.acceptance.project_id}" - services = [%s] - disable_on_destroy = true -} -`, pid, name, org, testStringsToString(services)) -} - -func testAccProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" -} -resource "google_project_services" "acceptance" { - project = "${google_project.acceptance.project_id}" - services = [%s] - disable_on_destroy = false -} -`, pid, name, org, billing, testStringsToString(services)) -} - -func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc { - return func(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - currentlyEnabled, err := listCurrentlyEnabledServices(pid, config, time.Minute*10) - if err != nil { - return fmt.Errorf("Error listing services for project %q: %v", pid, err) - } - - servicesSet := golangSetFromStringSlice(services) - // add renamed service aliases because listCurrentlyEnabledServices will - // have both - for k := range servicesSet { - if v, ok := renamedServicesByOldAndNewServiceNames[k]; ok { - servicesSet[v] = struct{}{} - } - } - - services = stringSliceFromGolangSet(servicesSet) - - apiServices := stringSliceFromGolangSet(currentlyEnabled) - sort.Strings(services) - sort.Strings(apiServices) - if !reflect.DeepEqual(services, apiServices) { - return fmt.Errorf("Services in config (%v) do not exactly match services returned by API (%v)", services, apiServices) - } - - return nil - } -} - -func testStringsToString(s []string) string { - var b bytes.Buffer - for i, v := range s { - b.WriteString(fmt.Sprintf("\"%s\"", v)) - if i < len(s)-1 { - b.WriteString(",") - } - } - return b.String() -} diff --git a/google/resource_google_project_test.go b/google/resource_google_project_test.go index 39ac099fe82..6df0cbcc17c 100644 --- a/google/resource_google_project_test.go +++ b/google/resource_google_project_test.go @@ -200,8 +200,9 @@ func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { return fmt.Errorf("No ID is set") } - if rs.Primary.ID != pid { - return fmt.Errorf("Expected project %q to match ID %q in state", pid, rs.Primary.ID) + projectId := fmt.Sprintf("projects/%s", pid) + if rs.Primary.ID != projectId { + return fmt.Errorf("Expected project %q to match ID %q in state", projectId, rs.Primary.ID) } return nil diff --git a/google/resource_google_service_account.go b/google/resource_google_service_account.go index 60f57f69b98..e1ab14fad2b 100644 --- a/google/resource_google_service_account.go +++ b/google/resource_google_service_account.go @@ -51,11 +51,6 @@ func resourceGoogleServiceAccount() *schema.Resource { Optional: true, ForceNew: true, }, - "policy_data": { - Type: schema.TypeString, - Optional: true, - Removed: "Use the 'google_service_account_iam_policy' resource to define policies for a service account", - }, }, } } diff --git a/google/resource_iam_audit_config.go b/google/resource_iam_audit_config.go index 3dbbd32979c..bec5b1634d9 100644 --- a/google/resource_iam_audit_config.go +++ b/google/resource_iam_audit_config.go @@ -44,9 +44,9 @@ func ResourceIamAuditConfig(parentSpecificSchema map[string]*schema.Schema, newU func ResourceIamAuditConfigWithBatching(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, enableBatching bool) *schema.Resource { return &schema.Resource{ - Create: resourceIamAuditConfigCreate(newUpdaterFunc, enableBatching), + Create: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, enableBatching), Read: resourceIamAuditConfigRead(newUpdaterFunc), - Update: resourceIamAuditConfigUpdate(newUpdaterFunc, enableBatching), + Update: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, enableBatching), Delete: resourceIamAuditConfigDelete(newUpdaterFunc, enableBatching), Schema: mergeSchemas(iamAuditConfigSchema, parentSpecificSchema), Importer: &schema.ResourceImporter{ @@ -55,34 +55,6 @@ func ResourceIamAuditConfigWithBatching(parentSpecificSchema map[string]*schema. } } -func resourceIamAuditConfigCreate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) schema.CreateFunc { - return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - ac := getResourceIamAuditConfig(d) - modifyF := func(ep *cloudresourcemanager.Policy) error { - ep.AuditConfigs = mergeAuditConfigs(append(ep.AuditConfigs, ac)) - return nil - } - - if enableBatching { - err = BatchRequestModifyIamPolicy(updater, modifyF, config, fmt.Sprintf( - "Add audit config for service %s on resource %q", ac.Service, updater.DescribeResource())) - } else { - err = iamPolicyReadModifyWrite(updater, modifyF) - } - if err != nil { - return err - } - d.SetId(updater.GetResourceId() + "/audit_config/" + ac.Service) - return resourceIamAuditConfigRead(newUpdaterFunc)(d, meta) - } -} - func resourceIamAuditConfigRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { return func(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -150,7 +122,7 @@ func iamAuditConfigImport(resourceIdParser resourceIdParserFunc) schema.StateFun } } -func resourceIamAuditConfigUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) schema.UpdateFunc { +func resourceIamAuditConfigCreateUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) func(*schema.ResourceData, interface{}) error { return func(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) updater, err := newUpdaterFunc(d, config) @@ -173,7 +145,7 @@ func resourceIamAuditConfigUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enab if err != nil { return err } - + d.SetId(updater.GetResourceId() + "/audit_config/" + ac.Service) return resourceIamAuditConfigRead(newUpdaterFunc)(d, meta) } } diff --git a/google/resource_logging_metric.go b/google/resource_logging_metric.go index 51d67d7e90d..b977eb5fef8 100644 --- a/google/resource_logging_metric.go +++ b/google/resource_logging_metric.go @@ -145,7 +145,7 @@ describes the bucket boundaries used to create a histogram of the extracted valu Schema: map[string]*schema.Schema{ "bounds": { Type: schema.TypeList, - Optional: true, + Required: true, Description: `The values must be monotonically increasing.`, Elem: &schema.Schema{ Type: schema.TypeFloat, @@ -153,6 +153,7 @@ describes the bucket boundaries used to create a histogram of the extracted valu }, }, }, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, }, "exponential_buckets": { Type: schema.TypeList, @@ -163,22 +164,26 @@ the lower bound. Each bucket represents a constant relative uncertainty on a spe Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "growth_factor": { - Type: schema.TypeInt, - Optional: true, - Description: `Must be greater than 1.`, + Type: schema.TypeInt, + Optional: true, + Description: `Must be greater than 1.`, + AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, }, "num_finite_buckets": { - Type: schema.TypeInt, - Optional: true, - Description: `Must be greater than 0.`, + Type: schema.TypeInt, + Optional: true, + Description: `Must be greater than 0.`, + AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, }, "scale": { - Type: schema.TypeFloat, - Optional: true, - Description: `Must be greater than 0.`, + Type: schema.TypeFloat, + Optional: true, + Description: `Must be greater than 0.`, + AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, }, }, }, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, }, "linear_buckets": { Type: schema.TypeList, @@ -189,22 +194,26 @@ Each bucket represents a constant absolute uncertainty on the specific value in Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "num_finite_buckets": { - Type: schema.TypeInt, - Optional: true, - Description: `Must be greater than 0.`, + Type: schema.TypeInt, + Optional: true, + Description: `Must be greater than 0.`, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, }, "offset": { - Type: schema.TypeFloat, - Optional: true, - Description: `Lower bound of the first bucket.`, + Type: schema.TypeFloat, + Optional: true, + Description: `Lower bound of the first bucket.`, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, }, "width": { - Type: schema.TypeInt, - Optional: true, - Description: `Must be greater than 0.`, + Type: schema.TypeInt, + Optional: true, + Description: `Must be greater than 0.`, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, }, }, }, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, }, }, }, diff --git a/google/resource_logging_metric_generated_test.go b/google/resource_logging_metric_generated_test.go index 980b889e567..1e938a772f4 100644 --- a/google/resource_logging_metric_generated_test.go +++ b/google/resource_logging_metric_generated_test.go @@ -51,26 +51,28 @@ func TestAccLoggingMetric_loggingMetricBasicExample(t *testing.T) { func testAccLoggingMetric_loggingMetricBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_logging_metric" "logging_metric" { - name = "my-(custom)/metric%{random_suffix}" + name = "my-(custom)/metric%{random_suffix}" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "DISTRIBUTION" - unit = "1" + value_type = "DISTRIBUTION" + unit = "1" labels { - key = "mass" - value_type = "STRING" - description = "amount of matter" + key = "mass" + value_type = "STRING" + description = "amount of matter" } display_name = "My metric" } value_extractor = "EXTRACT(jsonPayload.request)" - label_extractors = { "mass": "EXTRACT(jsonPayload.request)" } + label_extractors = { + "mass" = "EXTRACT(jsonPayload.request)" + } bucket_options { linear_buckets { num_finite_buckets = 3 - width = 1 - offset = 1 + width = 1 + offset = 1 } } } @@ -104,11 +106,11 @@ func TestAccLoggingMetric_loggingMetricCounterBasicExample(t *testing.T) { func testAccLoggingMetric_loggingMetricCounterBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_logging_metric" "logging_metric" { - name = "my-(custom)/metric%{random_suffix}" + name = "my-(custom)/metric%{random_suffix}" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "INT64" + value_type = "INT64" } } `, context) @@ -141,18 +143,20 @@ func TestAccLoggingMetric_loggingMetricCounterLabelsExample(t *testing.T) { func testAccLoggingMetric_loggingMetricCounterLabelsExample(context map[string]interface{}) string { return Nprintf(` resource "google_logging_metric" "logging_metric" { - name = "my-(custom)/metric%{random_suffix}" + name = "my-(custom)/metric%{random_suffix}" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "INT64" + value_type = "INT64" labels { - key = "mass" - value_type = "STRING" - description = "amount of matter" + key = "mass" + value_type = "STRING" + description = "amount of matter" } } - label_extractors = { "mass": "EXTRACT(jsonPayload.request)" } + label_extractors = { + "mass" = "EXTRACT(jsonPayload.request)" + } } `, context) } diff --git a/google/resource_ml_engine_model.go b/google/resource_ml_engine_model.go index 1d0ee0dcda8..a746e61df34 100644 --- a/google/resource_ml_engine_model.go +++ b/google/resource_ml_engine_model.go @@ -56,7 +56,7 @@ prediction requests that do not specify a version.`, Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, Description: `The name specified for the version when it was created.`, }, @@ -172,7 +172,7 @@ func resourceMLEngineModelCreate(d *schema.ResourceData, meta interface{}) error } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/models/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -273,7 +273,7 @@ func resourceMLEngineModelImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/models/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_ml_engine_model_generated_test.go b/google/resource_ml_engine_model_generated_test.go index 839cd06d7cd..e5c6c720aab 100644 --- a/google/resource_ml_engine_model_generated_test.go +++ b/google/resource_ml_engine_model_generated_test.go @@ -51,9 +51,9 @@ func TestAccMLEngineModel_mlModelBasicExample(t *testing.T) { func testAccMLEngineModel_mlModelBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_ml_engine_model" "default" { - name = "default%{random_suffix}" + name = "default%{random_suffix}" description = "My model" - regions = ["us-central1"] + regions = ["us-central1"] } `, context) } @@ -85,13 +85,13 @@ func TestAccMLEngineModel_mlModelFullExample(t *testing.T) { func testAccMLEngineModel_mlModelFullExample(context map[string]interface{}) string { return Nprintf(` resource "google_ml_engine_model" "default" { - name = "default%{random_suffix}" + name = "default%{random_suffix}" description = "My model" - regions = ["us-central1"] - labels = { + regions = ["us-central1"] + labels = { my_model = "foo" } - online_prediction_logging = true + online_prediction_logging = true online_prediction_console_logging = true } `, context) diff --git a/google/resource_monitoring_alert_policy.go b/google/resource_monitoring_alert_policy.go index bbd9656348a..e0d34eaf14a 100644 --- a/google/resource_monitoring_alert_policy.go +++ b/google/resource_monitoring_alert_policy.go @@ -631,13 +631,15 @@ limited to 512 Unicode characters.`, The content may not exceed 8,192 Unicode characters and may not exceed more than 10,240 bytes when encoded in UTF-8 format, whichever is smaller.`, + AtLeastOneOf: []string{"documentation.0.content", "documentation.0.mime_type"}, }, "mime_type": { Type: schema.TypeString, Optional: true, Description: `The format of the content field. Presently, only the value "text/markdown" is supported.`, - Default: "text/markdown", + Default: "text/markdown", + AtLeastOneOf: []string{"documentation.0.content", "documentation.0.mime_type"}, }, }, }, @@ -706,7 +708,7 @@ Its syntax is: projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]`, Elem: &schema.Schema{ Type: schema.TypeString, }, - Deprecated: "labels is removed as it was never used. See user_labels for the correct field", + Removed: "labels is removed as it was never used. See user_labels for the correct field", }, "project": { Type: schema.TypeString, diff --git a/google/resource_monitoring_group_generated_test.go b/google/resource_monitoring_group_generated_test.go index 5c5c04135c9..d5174a947f9 100644 --- a/google/resource_monitoring_group_generated_test.go +++ b/google/resource_monitoring_group_generated_test.go @@ -86,13 +86,13 @@ func testAccMonitoringGroup_monitoringGroupSubgroupExample(context map[string]in return Nprintf(` resource "google_monitoring_group" "parent" { display_name = "tf-test MonitoringSubGroup%{random_suffix}" - filter = "resource.metadata.region=\"europe-west2\"" + filter = "resource.metadata.region=\"europe-west2\"" } resource "google_monitoring_group" "subgroup" { display_name = "tf-test MonitoringSubGroup%{random_suffix}" - filter = "resource.metadata.region=\"europe-west2\"" - parent_name = "${google_monitoring_group.parent.name}" + filter = "resource.metadata.region=\"europe-west2\"" + parent_name = google_monitoring_group.parent.name } `, context) } diff --git a/google/resource_monitoring_notification_channel_generated_test.go b/google/resource_monitoring_notification_channel_generated_test.go index eac2c8089bb..e46e66fff03 100644 --- a/google/resource_monitoring_notification_channel_generated_test.go +++ b/google/resource_monitoring_notification_channel_generated_test.go @@ -52,7 +52,7 @@ func testAccMonitoringNotificationChannel_notificationChannelBasicExample(contex return Nprintf(` resource "google_monitoring_notification_channel" "basic" { display_name = "Test Notification Channel%{random_suffix}" - type = "email" + type = "email" labels = { email_address = "fake_email@blahblah.com" } diff --git a/google/resource_monitoring_uptime_check_config.go b/google/resource_monitoring_uptime_check_config.go index 3ec900bcce0..6f70c3a4ea1 100644 --- a/google/resource_monitoring_uptime_check_config.go +++ b/google/resource_monitoring_uptime_check_config.go @@ -62,7 +62,7 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { Schema: map[string]*schema.Schema{ "content": { Type: schema.TypeString, - Optional: true, + Required: true, Description: `String or regex content to match (max 1024 bytes)`, }, }, @@ -84,45 +84,51 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { Schema: map[string]*schema.Schema{ "password": { Type: schema.TypeString, - Optional: true, + Required: true, Description: `The password to authenticate.`, Sensitive: true, }, "username": { Type: schema.TypeString, - Optional: true, + Required: true, Description: `The username to authenticate.`, }, }, }, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, }, "headers": { - Type: schema.TypeMap, - Optional: true, - Description: `The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + Description: `The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100.`, + Elem: &schema.Schema{Type: schema.TypeString}, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, }, "mask_headers": { - Type: schema.TypeBool, - Optional: true, - Description: `Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if mask_headers is set to True then the headers will be obscured with ******.`, + Type: schema.TypeBool, + Optional: true, + Description: `Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if mask_headers is set to True then the headers will be obscured with ******.`, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, }, "path": { - Type: schema.TypeString, - Optional: true, - Description: `The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. Optional (defaults to "/").`, - Default: "/", + Type: schema.TypeString, + Optional: true, + Description: `The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. Optional (defaults to "/").`, + Default: "/", + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, }, "port": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL).`, + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL).`, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, }, "use_ssl": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, use HTTPS instead of HTTP to run the check.`, + Type: schema.TypeBool, + Optional: true, + Description: `If true, use HTTPS instead of HTTP to run the check.`, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, }, "validate_ssl": { Type: schema.TypeBool, @@ -131,7 +137,7 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { }, }, }, - ConflictsWith: []string{"tcp_check"}, + ExactlyOneOf: []string{"http_check", "tcp_check"}, }, "monitored_resource": { Type: schema.TypeList, @@ -156,7 +162,7 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { }, }, }, - ConflictsWith: []string{"resource_group"}, + ExactlyOneOf: []string{"monitored_resource", "resource_group"}, }, "period": { Type: schema.TypeString, @@ -179,6 +185,7 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { ForceNew: true, DiffSuppressFunc: compareSelfLinkOrResourceName, Description: `The group of resources being monitored. Should be the 'name' of a group`, + AtLeastOneOf: []string{"resource_group.0.resource_type", "resource_group.0.group_id"}, }, "resource_type": { Type: schema.TypeString, @@ -186,10 +193,11 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"RESOURCE_TYPE_UNSPECIFIED", "INSTANCE", "AWS_ELB_LOAD_BALANCER", ""}, false), Description: `The resource type of the group members.`, + AtLeastOneOf: []string{"resource_group.0.resource_type", "resource_group.0.group_id"}, }, }, }, - ConflictsWith: []string{"monitored_resource"}, + ExactlyOneOf: []string{"monitored_resource", "resource_group"}, }, "selected_regions": { Type: schema.TypeList, @@ -213,7 +221,7 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { }, }, }, - ConflictsWith: []string{"http_check"}, + ExactlyOneOf: []string{"http_check", "tcp_check"}, }, "name": { Type: schema.TypeString, @@ -226,42 +234,40 @@ func resourceMonitoringUptimeCheckConfig() *schema.Resource { Description: `The id of the uptime check`, }, "is_internal": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Type: schema.TypeBool, + Optional: true, + Removed: "This field never worked, and will be removed in 3.0.0.", }, "internal_checkers": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Type: schema.TypeList, + Optional: true, + Removed: "This field never worked, and will be removed in 3.0.0.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "display_name": { - Type: schema.TypeString, - Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Type: schema.TypeString, + Optional: true, + Removed: "This field never worked, and will be removed in 3.0.0.", }, "gcp_zone": { - Type: schema.TypeString, - Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Type: schema.TypeString, + Optional: true, + Removed: "This field never worked, and will be removed in 3.0.0.", }, "name": { - Type: schema.TypeString, - Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Type: schema.TypeString, + Optional: true, + Removed: "This field never worked, and will be removed in 3.0.0.", }, "network": { - Type: schema.TypeString, - Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Type: schema.TypeString, + Optional: true, + Removed: "This field never worked, and will be removed in 3.0.0.", }, "peer_project_id": { - Type: schema.TypeString, - Optional: true, - Deprecated: "This field never worked, and will be removed in 3.0.0.", + Type: schema.TypeString, + Optional: true, + Removed: "This field never worked, and will be removed in 3.0.0.", }, }, }, @@ -387,18 +393,6 @@ func resourceMonitoringUptimeCheckConfigRead(d *schema.ResourceData, meta interf return handleNotFoundError(err, d, fmt.Sprintf("MonitoringUptimeCheckConfig %q", d.Id())) } - res, err = resourceMonitoringUptimeCheckConfigDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing MonitoringUptimeCheckConfig because it no longer exists.") - d.SetId("") - return nil - } - if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) } @@ -1029,8 +1023,3 @@ func expandMonitoringUptimeCheckConfigMonitoredResourceLabels(v interface{}, d T } return m, nil } - -func resourceMonitoringUptimeCheckConfigDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - d.Set("internal_checkers", nil) - return res, nil -} diff --git a/google/resource_monitoring_uptime_check_config_generated_test.go b/google/resource_monitoring_uptime_check_config_generated_test.go index ff0edde105d..49e5d803245 100644 --- a/google/resource_monitoring_uptime_check_config_generated_test.go +++ b/google/resource_monitoring_uptime_check_config_generated_test.go @@ -53,7 +53,7 @@ func testAccMonitoringUptimeCheckConfig_uptimeCheckConfigHttpExample(context map return Nprintf(` resource "google_monitoring_uptime_check_config" "http" { display_name = "http-uptime-check%{random_suffix}" - timeout = "60s" + timeout = "60s" http_check { path = "/some-path" @@ -64,7 +64,7 @@ resource "google_monitoring_uptime_check_config" "http" { type = "uptime_url" labels = { project_id = "%{project_id}" - host = "192.168.1.1" + host = "192.168.1.1" } } @@ -156,7 +156,7 @@ func testAccMonitoringUptimeCheckConfig_uptimeCheckTcpExample(context map[string return Nprintf(` resource "google_monitoring_uptime_check_config" "tcp_group" { display_name = "tcp-uptime-check%{random_suffix}" - timeout = "60s" + timeout = "60s" tcp_check { port = 888 @@ -164,14 +164,13 @@ resource "google_monitoring_uptime_check_config" "tcp_group" { resource_group { resource_type = "INSTANCE" - group_id = "${google_monitoring_group.check.name}" + group_id = google_monitoring_group.check.name } } - resource "google_monitoring_group" "check" { display_name = "uptime-check-group%{random_suffix}" - filter = "resource.metadata.name=has_substring(\"foo\")" + filter = "resource.metadata.name=has_substring(\"foo\")" } `, context) } diff --git a/google/resource_pubsub_subscription.go b/google/resource_pubsub_subscription.go index 4cb06dd4a28..2af183146ee 100644 --- a/google/resource_pubsub_subscription.go +++ b/google/resource_pubsub_subscription.go @@ -57,11 +57,10 @@ func resourcePubsubSubscription() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: comparePubsubSubscriptionBasename, - Description: `Name of the subscription.`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the subscription.`, }, "topic": { Type: schema.TypeString, @@ -101,7 +100,7 @@ will eventually redeliver the message.`, A subscription is considered active as long as any connected subscriber is successfully consuming messages from the subscription or is issuing operations on the subscription. If expirationPolicy is not set, a default -policy with ttl of 31 days will be used. If it is set but left empty, the +policy with ttl of 31 days will be used. If it is set but ttl is "", the resource never expires. The minimum allowed value for expirationPolicy.ttl is 1 day.`, MaxItems: 1, @@ -109,7 +108,7 @@ is 1 day.`, Schema: map[string]*schema.Schema{ "ttl": { Type: schema.TypeString, - Optional: true, + Required: true, DiffSuppressFunc: comparePubsubSubscriptionExpirationPolicy, Description: `Specifies the "time-to-live" duration for an associated resource. The resource expires if it is not active for a period of ttl. @@ -625,26 +624,7 @@ func flattenPubsubSubscriptionExpirationPolicyTtl(v interface{}, d *schema.Resou } func expandPubsubSubscriptionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) - if err != nil { - return "", err - } - - subscription := d.Get("name").(string) - - re := regexp.MustCompile("projects\\/(.*)\\/subscriptions\\/(.*)") - match := re.FindStringSubmatch(subscription) - if len(match) == 3 { - // We need to preserve the behavior where the user passes the subscription name already in the long form, - // however we need it to be stored as the short form since it's used for the replaceVars in the URL. - // The unintuitive behavior is that if the user provides the long form, we use the project from there, not the one - // specified on the resource or provider. - // TODO(drebes): consider deprecating the long form behavior for 3.0 - d.Set("project", match[1]) - d.Set("name", match[2]) - return subscription, nil - } - return fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription), nil + return replaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") } func expandPubsubSubscriptionTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { diff --git a/google/resource_pubsub_subscription_generated_test.go b/google/resource_pubsub_subscription_generated_test.go index 0b090b735e8..7304ec1c006 100644 --- a/google/resource_pubsub_subscription_generated_test.go +++ b/google/resource_pubsub_subscription_generated_test.go @@ -56,7 +56,7 @@ resource "google_pubsub_topic" "example" { resource "google_pubsub_subscription" "example" { name = "example-subscription%{random_suffix}" - topic = "${google_pubsub_topic.example.name}" + topic = google_pubsub_topic.example.name labels = { foo = "bar" @@ -64,7 +64,7 @@ resource "google_pubsub_subscription" "example" { # 20 minutes message_retention_duration = "1200s" - retain_acked_messages = true + retain_acked_messages = true ack_deadline_seconds = 20 diff --git a/google/resource_pubsub_subscription_test.go b/google/resource_pubsub_subscription_test.go index 71e3cef79e5..d7c7e2d8a2b 100644 --- a/google/resource_pubsub_subscription_test.go +++ b/google/resource_pubsub_subscription_test.go @@ -12,7 +12,7 @@ func TestAccPubsubSubscription_emptyTTL(t *testing.T) { t.Parallel() topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(10)) - subscription := fmt.Sprintf("projects/%s/subscriptions/tf-test-sub-%s", getTestProjectFromEnv(), acctest.RandString(10)) + subscription := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -32,11 +32,11 @@ func TestAccPubsubSubscription_emptyTTL(t *testing.T) { }) } -func TestAccPubsubSubscription_fullName(t *testing.T) { +func TestAccPubsubSubscription_basic(t *testing.T) { t.Parallel() topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(10)) - subscription := fmt.Sprintf("projects/%s/subscriptions/tf-test-sub-%s", getTestProjectFromEnv(), acctest.RandString(10)) + subscription := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -44,7 +44,7 @@ func TestAccPubsubSubscription_fullName(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroy, Steps: []resource.TestStep{ { - Config: testAccPubsubSubscription_fullName(topic, subscription, "bar", 20), + Config: testAccPubsubSubscription_basic(topic, subscription, "bar", 20), }, { ResourceName: "google_pubsub_subscription.foo", @@ -69,28 +69,16 @@ func TestAccPubsubSubscription_update(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroy, Steps: []resource.TestStep{ { - Config: testAccPubsubSubscription_fullName(topic, subscriptionLong, "bar", 20), + Config: testAccPubsubSubscription_basic(topic, subscriptionShort, "bar", 20), }, { ResourceName: "google_pubsub_subscription.foo", - ImportStateId: subscriptionLong, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPubsubSubscription_fullName(topic, subscriptionLong, "baz", 30), - Check: resource.TestCheckResourceAttr( - "google_pubsub_subscription.foo", "path", subscriptionLong, - ), - }, - { - ResourceName: "google_pubsub_subscription.foo", - ImportStateId: subscriptionLong, + ImportStateId: subscriptionShort, ImportState: true, ImportStateVerify: true, }, { - Config: testAccPubsubSubscription_fullName(topic, subscriptionShort, "baz", 30), + Config: testAccPubsubSubscription_basic(topic, subscriptionShort, "baz", 30), Check: resource.TestCheckResourceAttr( "google_pubsub_subscription.foo", "path", subscriptionLong, ), @@ -109,7 +97,7 @@ func TestAccPubsubSubscription_push(t *testing.T) { t.Parallel() topicFoo := fmt.Sprintf("tf-test-topic-foo-%s", acctest.RandString(10)) - subscription := fmt.Sprintf("projects/%s/subscriptions/tf-test-topic-foo-%s", getTestProjectFromEnv(), acctest.RandString(10)) + subscription := fmt.Sprintf("tf-test-topic-foo-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -142,7 +130,9 @@ resource "google_pubsub_subscription" "foo" { message_retention_duration = "1200s" retain_acked_messages = true ack_deadline_seconds = 20 - expiration_policy {} + expiration_policy { + ttl = "" + } } `, topic, subscription) } @@ -183,7 +173,7 @@ resource "google_pubsub_subscription" "foo" { `, topicFoo, subscription) } -func testAccPubsubSubscription_fullName(topic, subscription, label string, deadline int) string { +func testAccPubsubSubscription_basic(topic, subscription, label string, deadline int) string { return fmt.Sprintf(` resource "google_pubsub_topic" "foo" { name = "%s" diff --git a/google/resource_pubsub_topic_generated_test.go b/google/resource_pubsub_topic_generated_test.go index facd189f81b..a1f69477758 100644 --- a/google/resource_pubsub_topic_generated_test.go +++ b/google/resource_pubsub_topic_generated_test.go @@ -94,7 +94,6 @@ resource "google_pubsub_topic" "example" { "europe-west3", ] } - } `, context) } diff --git a/google/resource_redis_instance.go b/google/resource_redis_instance.go index 803a5e6bbe9..a3bba9c9974 100644 --- a/google/resource_redis_instance.go +++ b/google/resource_redis_instance.go @@ -273,7 +273,7 @@ func resourceRedisInstanceCreate(d *schema.ResourceData, meta interface{}) error } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -502,7 +502,7 @@ func resourceRedisInstanceImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -629,17 +629,7 @@ func expandRedisInstanceLocationId(v interface{}, d TerraformResourceData, confi } func expandRedisInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - - return fmt.Sprintf("projects/%s/locations/%s/instances/%s", project, region, v.(string)), nil + return replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") } func expandRedisInstanceMemorySizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { diff --git a/google/resource_redis_instance_generated_test.go b/google/resource_redis_instance_generated_test.go index a49d0dbc98d..04ffba2fcf9 100644 --- a/google/resource_redis_instance_generated_test.go +++ b/google/resource_redis_instance_generated_test.go @@ -93,7 +93,7 @@ resource "google_redis_instance" "cache" { location_id = "us-central1-a" alternative_location_id = "us-central1-f" - authorized_network = "${google_compute_network.auto-network.self_link}" + authorized_network = google_compute_network.auto-network.self_link redis_version = "REDIS_3_2" display_name = "Terraform Test Instance" diff --git a/google/resource_source_repo_repository.go b/google/resource_source_repo_repository.go index b5dad850b98..fc12212934c 100644 --- a/google/resource_source_repo_repository.go +++ b/google/resource_source_repo_repository.go @@ -95,7 +95,7 @@ func resourceSourceRepoRepositoryCreate(d *schema.ResourceData, meta interface{} } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -176,7 +176,7 @@ func resourceSourceRepoRepositoryImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_spanner_database.go b/google/resource_spanner_database.go index d13a2a73d37..7b0cbf7e72c 100644 --- a/google/resource_spanner_database.go +++ b/google/resource_spanner_database.go @@ -125,7 +125,7 @@ func resourceSpannerDatabaseCreate(d *schema.ResourceData, meta interface{}) err } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{instance}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -229,7 +229,7 @@ func resourceSpannerDatabaseImport(d *schema.ResourceData, meta interface{}) ([] } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{instance}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_spanner_database_generated_test.go b/google/resource_spanner_database_generated_test.go index 615a6859a6a..4c19a80e9dd 100644 --- a/google/resource_spanner_database_generated_test.go +++ b/google/resource_spanner_database_generated_test.go @@ -57,11 +57,11 @@ resource "google_spanner_instance" "main" { } resource "google_spanner_database" "database" { - instance = "${google_spanner_instance.main.name}" - name = "my-database%{random_suffix}" - ddl = [ + instance = google_spanner_instance.main.name + name = "my-database%{random_suffix}" + ddl = [ "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", - "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)" + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", ] } `, context) diff --git a/google/resource_spanner_instance_generated_test.go b/google/resource_spanner_instance_generated_test.go index 294510442e5..7bc8a292a6a 100644 --- a/google/resource_spanner_instance_generated_test.go +++ b/google/resource_spanner_instance_generated_test.go @@ -51,9 +51,9 @@ func TestAccSpannerInstance_spannerInstanceBasicExample(t *testing.T) { func testAccSpannerInstance_spannerInstanceBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_spanner_instance" "example" { - config = "regional-us-central1" - display_name = "Test Spanner Instance" - num_nodes = 2 + config = "regional-us-central1" + display_name = "Test Spanner Instance" + num_nodes = 2 labels = { "foo" = "bar" } diff --git a/google/resource_sql_database.go b/google/resource_sql_database.go index bf30bd827cc..8084945d5b9 100644 --- a/google/resource_sql_database.go +++ b/google/resource_sql_database.go @@ -142,7 +142,7 @@ func resourceSQLDatabaseCreate(d *schema.ResourceData, meta interface{}) error { } // Store the ID now - id, err := replaceVars(d, config, "{{instance}}:{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -332,14 +332,13 @@ func resourceSQLDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*sch "instances/(?P[^/]+)/databases/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+):(?P[^/]+)", "(?P[^/]+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{instance}}:{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_sql_database_generated_test.go b/google/resource_sql_database_generated_test.go index 4e898058d6f..13f8350b71a 100644 --- a/google/resource_sql_database_generated_test.go +++ b/google/resource_sql_database_generated_test.go @@ -51,16 +51,16 @@ func TestAccSQLDatabase_sqlDatabaseBasicExample(t *testing.T) { func testAccSQLDatabase_sqlDatabaseBasicExample(context map[string]interface{}) string { return Nprintf(` resource "google_sql_database" "database" { - name = "my-database%{random_suffix}" - instance = "${google_sql_database_instance.instance.name}" + name = "my-database%{random_suffix}" + instance = google_sql_database_instance.instance.name } resource "google_sql_database_instance" "instance" { - name = "my-database-instance%{random_suffix}" - region = "us-central" - settings { - tier = "D0" - } + name = "my-database-instance%{random_suffix}" + region = "us-central" + settings { + tier = "D0" + } } `, context) } diff --git a/google/resource_sql_database_instance.go b/google/resource_sql_database_instance.go index dbf55492eec..27217f7fae2 100644 --- a/google/resource_sql_database_instance.go +++ b/google/resource_sql_database_instance.go @@ -30,11 +30,55 @@ var sqlDatabaseAuthorizedNetWorkSchemaElem *schema.Resource = &schema.Resource{ }, "value": { Type: schema.TypeString, - Optional: true, + Required: true, }, }, } +var ( + backupConfigurationKeys = []string{ + "settings.0.backup_configuration.0.binary_log_enabled", + "settings.0.backup_configuration.0.enabled", + "settings.0.backup_configuration.0.start_time", + "settings.0.backup_configuration.0.location", + } + + ipConfigurationKeys = []string{ + "settings.0.ip_configuration.0.authorized_networks", + "settings.0.ip_configuration.0.ipv4_enabled", + "settings.0.ip_configuration.0.require_ssl", + "settings.0.ip_configuration.0.private_network", + } + + maintenanceWindowKeys = []string{ + "settings.0.maintenance_window.0.day", + "settings.0.maintenance_window.0.hour", + "settings.0.maintenance_window.0.update_track", + } + + serverCertsKeys = []string{ + "server_ca_cert.0.cert", + "server_ca_cert.0.common_name", + "server_ca_cert.0.create_time", + "server_ca_cert.0.expiration_time", + "server_ca_cert.0.sha1_fingerprint", + } + + replicaConfigurationKeys = []string{ + "replica_configuration.0.ca_certificate", + "replica_configuration.0.client_certificate", + "replica_configuration.0.client_key", + "replica_configuration.0.connect_retry_interval", + "replica_configuration.0.dump_file_path", + "replica_configuration.0.failover_target", + "replica_configuration.0.master_heartbeat_period", + "replica_configuration.0.password", + "replica_configuration.0.ssl_cipher", + "replica_configuration.0.username", + "replica_configuration.0.verify_server_certificate", + } +) + func resourceSqlDatabaseInstance() *schema.Resource { return &schema.Resource{ Create: resourceSqlDatabaseInstanceCreate, @@ -105,22 +149,26 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "binary_log_enabled": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, }, "enabled": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, }, "start_time": { Type: schema.TypeString, Optional: true, // start_time is randomly assigned if not set - Computed: true, + Computed: true, + AtLeastOneOf: backupConfigurationKeys, }, "location": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, }, }, }, @@ -137,11 +185,11 @@ func resourceSqlDatabaseInstance() *schema.Resource { Schema: map[string]*schema.Schema{ "value": { Type: schema.TypeString, - Optional: true, + Required: true, }, "name": { Type: schema.TypeString, - Optional: true, + Required: true, }, }, }, @@ -172,26 +220,30 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "authorized_networks": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), - Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), + Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, + AtLeastOneOf: ipConfigurationKeys, }, "ipv4_enabled": { Type: schema.TypeBool, Optional: true, // Defaults differ between first and second gen instances - Computed: true, + Computed: true, + AtLeastOneOf: ipConfigurationKeys, }, "require_ssl": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, }, "private_network": { Type: schema.TypeString, Optional: true, ValidateFunc: orEmpty(validateRegexp(privateNetworkLinkRegex)), DiffSuppressFunc: compareSelfLinkRelativePaths, + AtLeastOneOf: ipConfigurationKeys, }, }, }, @@ -204,12 +256,14 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "follow_gae_application": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, }, "zone": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, }, }, }, @@ -224,15 +278,18 @@ func resourceSqlDatabaseInstance() *schema.Resource { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(1, 7), + AtLeastOneOf: maintenanceWindowKeys, }, "hour": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 23), + AtLeastOneOf: maintenanceWindowKeys, }, "update_track": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: maintenanceWindowKeys, }, }, }, @@ -334,60 +391,71 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "ca_certificate": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "client_certificate": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "client_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "connect_retry_interval": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "dump_file_path": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "failover_target": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "master_heartbeat_period": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "password": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + AtLeastOneOf: replicaConfigurationKeys, }, "ssl_cipher": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "username": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, "verify_server_certificate": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, }, }, }, @@ -399,24 +467,29 @@ func resourceSqlDatabaseInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cert": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, "common_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, "create_time": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, "expiration_time": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, "sha1_fingerprint": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + AtLeastOneOf: serverCertsKeys, }, }, }, @@ -503,7 +576,11 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) } - d.SetId(instance.Name) + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) err = sqlAdminOperationWaitTime(config.clientSqlAdmin, op, project, "Create Instance", int(d.Timeout(schema.TimeoutCreate).Minutes())) if err != nil { @@ -708,7 +785,7 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e var instance *sqladmin.DatabaseInstance err = retryTimeDuration(func() (rerr error) { - instance, rerr = config.clientSqlAdmin.Instances.Get(project, d.Id()).Do() + instance, rerr = config.clientSqlAdmin.Instances.Get(project, d.Get("name").(string)).Do() return rerr }, d.Timeout(schema.TimeoutRead), isSqlOperationInProgressError) if err != nil { @@ -843,7 +920,7 @@ func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_sql_ssl_cert.go b/google/resource_sql_ssl_cert.go index edfbaba623f..5afce1f0a18 100644 --- a/google/resource_sql_ssl_cert.go +++ b/google/resource_sql_ssl_cert.go @@ -105,7 +105,7 @@ func resourceSqlSslCertCreate(d *schema.ResourceData, meta interface{}) error { } fingerprint := resp.ClientCert.CertInfo.Sha1Fingerprint - d.SetId(fmt.Sprintf("%s/%s", instance, fingerprint)) + d.SetId(fmt.Sprintf("projects/%s/instances/%s/sslCerts/%s", project, instance, fingerprint)) d.Set("sha1_fingerprint", fingerprint) // The private key is only returned on the initial insert so set it here. @@ -148,7 +148,7 @@ func resourceSqlSslCertRead(d *schema.ResourceData, meta interface{}) error { d.Set("create_time", sslCerts.CreateTime) d.Set("expiration_time", sslCerts.ExpirationTime) - d.SetId(fmt.Sprintf("%s/%s", instance, fingerprint)) + d.SetId(fmt.Sprintf("projects/%s/instances/%s/sslCerts/%s", project, instance, fingerprint)) return nil } diff --git a/google/resource_storage_bucket.go b/google/resource_storage_bucket.go index 683cd690a24..c924fef9160 100644 --- a/google/resource_storage_bucket.go +++ b/google/resource_storage_bucket.go @@ -32,7 +32,8 @@ func resourceStorageBucket() *schema.Resource { State: resourceStorageBucketStateImporter, }, CustomizeDiff: customdiff.All( - customdiff.ForceNewIfChange("retention_policy.0.is_locked", isPolicyLocked)), + customdiff.ForceNewIfChange("retention_policy.0.is_locked", isPolicyLocked), + ), Schema: map[string]*schema.Schema{ "name": { @@ -82,13 +83,6 @@ func resourceStorageBucket() *schema.Resource { }, }, - "predefined_acl": { - Type: schema.TypeString, - Removed: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, - }, - "project": { Type: schema.TypeString, Optional: true, @@ -154,10 +148,9 @@ func resourceStorageBucket() *schema.Resource { Optional: true, }, "is_live": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Deprecated: "Please use `with_state` instead", + Type: schema.TypeBool, + Optional: true, + Removed: "Please use `with_state` instead", }, "with_state": { Type: schema.TypeString, @@ -190,8 +183,7 @@ func resourceStorageBucket() *schema.Resource { Schema: map[string]*schema.Schema{ "enabled": { Type: schema.TypeBool, - Optional: true, - Default: false, + Required: true, }, }, }, @@ -204,12 +196,14 @@ func resourceStorageBucket() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "main_page_suffix": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.not_found_page", "website.0.main_page_suffix"}, }, "not_found_page": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.main_page_suffix", "website.0.not_found_page"}, }, }, }, @@ -886,10 +880,8 @@ func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleC } else { if *condition.IsLive { ruleCondition["with_state"] = "LIVE" - ruleCondition["is_live"] = true } else { ruleCondition["with_state"] = "ARCHIVED" - ruleCondition["is_live"] = false } } return ruleCondition @@ -1052,18 +1044,9 @@ func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLi transformed.IsLive = googleapi.Bool(true) case "ARCHIVED": transformed.IsLive = googleapi.Bool(false) - case "ANY": + case "ANY", "": // This is unnecessary, but set explicitly to nil for readability. transformed.IsLive = nil - case "": - // Support deprecated `is_live` behavior - // is_live was always read (ok always true) - // so it can only support LIVE/ARCHIVED. - // TODO: When removing is_live, combine this case with case "ANY" - if v, ok := condition["is_live"]; ok { - log.Printf("[WARN] using deprecated field `is_live` because with_state is empty") - transformed.IsLive = googleapi.Bool(v.(bool)) - } default: return nil, fmt.Errorf("unexpected value %q for condition.with_state", withStateV.(string)) } @@ -1119,32 +1102,8 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } - // Note that we are keeping the boolean notation from when is_live was - // the only field (i.e. not deprecated) in order to prevent a diff from - // hash key. - // There are three possible states for the actual condition - // and correspond to the following hash codes: - // - // 1. LIVE only: "true-" - // Applies for one of: - // with_state = "" && is_live = true - // with_state = "LIVE" - // - // 2. ARCHIVED only: "false-" - // Applies for one of: - // with_state = "" && is_live = false - // with_state = "ARCHIVED" - // - // 3. ANY (i.e. LIVE and ARCHIVED): "" - // Applies for one of: - // with_state = "ANY" - withStateV, withStateOk := m["with_state"] - if !withStateOk || withStateV.(string) == "" { - if isLiveV, ok := m["is_live"]; ok { - buf.WriteString(fmt.Sprintf("%t-", isLiveV.(bool))) - } - } else if withStateOk { + if withStateOk { switch withStateV.(string) { case "LIVE": buf.WriteString(fmt.Sprintf("%t-", true)) diff --git a/google/resource_storage_bucket_access_control_generated_test.go b/google/resource_storage_bucket_access_control_generated_test.go index ad8bd9e8688..be8d0a7ea2d 100644 --- a/google/resource_storage_bucket_access_control_generated_test.go +++ b/google/resource_storage_bucket_access_control_generated_test.go @@ -57,7 +57,7 @@ resource "google_storage_bucket_access_control" "public_rule" { } resource "google_storage_bucket" "bucket" { - name = "static-content-bucket%{random_suffix}" + name = "static-content-bucket%{random_suffix}" } `, context) } diff --git a/google/resource_storage_bucket_object.go b/google/resource_storage_bucket_object.go index 31a000ecdbb..96c0a6b7bfe 100644 --- a/google/resource_storage_bucket_object.go +++ b/google/resource_storage_bucket_object.go @@ -85,13 +85,6 @@ func resourceStorageBucketObject() *schema.Resource { Computed: true, }, - "predefined_acl": { - Type: schema.TypeString, - Removed: "Please use resource \"storage_object_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, - }, - "source": { Type: schema.TypeString, Optional: true, diff --git a/google/resource_storage_bucket_test.go b/google/resource_storage_bucket_test.go index 94e2081522e..56663df3776 100644 --- a/google/resource_storage_bucket_test.go +++ b/google/resource_storage_bucket_test.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" "log" + "regexp" + "strings" "testing" "time" @@ -165,31 +167,12 @@ func TestAccStorageBucket_lifecycleRuleStateLive(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ - { - Config: testAccStorageBucket_lifecycleRule_IsLiveTrue(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(true), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "true"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"with_state", "LIVE"), - ), - }, - { - ResourceName: "google_storage_bucket.bucket", - ImportState: true, - ImportStateVerify: true, - }, { Config: testAccStorageBucket_lifecycleRule_withStateLive(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( "google_storage_bucket.bucket", bucketName, &bucket), testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(true), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "true"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", attrPrefix+"with_state", "LIVE"), ), @@ -226,28 +209,7 @@ func TestAccStorageBucket_lifecycleRuleStateArchived(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( "google_storage_bucket.bucket", bucketName, &bucket), - testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "false"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"with_state", "ARCHIVED"), - ), - }, - { - ResourceName: "google_storage_bucket.bucket", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccStorageBucket_lifecycleRule_isLiveFalse(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckStorageBucketExists( - "google_storage_bucket.bucket", bucketName, &bucket), - testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "false"), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"with_state", "ARCHIVED"), + testAccCheckStorageBucketLifecycleConditionState(nil, &bucket), ), }, { @@ -261,8 +223,6 @@ func TestAccStorageBucket_lifecycleRuleStateArchived(t *testing.T) { testAccCheckStorageBucketExists( "google_storage_bucket.bucket", bucketName, &bucket), testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", attrPrefix+"is_live", "false"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", attrPrefix+"with_state", "ARCHIVED"), ), @@ -870,11 +830,27 @@ func TestAccStorageBucket_website(t *testing.T) { bucketSuffix := acctest.RandomWithPrefix("tf-website-test") + websiteKeys := []string{"website.0.main_page_suffix", "website.0.not_found_page"} + errMsg := fmt.Sprintf("one of `%s` must be specified", strings.Join(websiteKeys, ",")) + fullErr := fmt.Sprintf("config is invalid: 2 problems:\n\n- \"%s\": %s\n- \"%s\": %s", websiteKeys[0], errMsg, websiteKeys[1], errMsg) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_websiteNoAttributes(bucketSuffix), + ExpectError: regexp.MustCompile(fullErr), + }, + { + Config: testAccStorageBucket_websiteOneAttribute(bucketSuffix), + }, + { + ResourceName: "google_storage_bucket.website", + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccStorageBucket_website(bucketSuffix), }, @@ -1302,24 +1278,6 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testAccStorageBucket_lifecycleRule_isLiveFalse(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" - lifecycle_rule { - action { - type = "Delete" - } - - condition { - age = 10 - is_live = false - } - } -} -`, bucketName) -} - func testAccStorageBucket_lifecycleRule_withStateArchived(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { @@ -1338,24 +1296,6 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testAccStorageBucket_lifecycleRule_IsLiveTrue(bucketName string) string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "%s" - lifecycle_rule { - action { - type = "Delete" - } - - condition { - age = 10 - is_live = true - } - } -} -`, bucketName) -} - func testAccStorageBucket_lifecycleRule_withStateLive(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { @@ -1421,17 +1361,14 @@ resource "google_project" "acceptance" { billing_account = "%{billing_account}" } -resource "google_project_services" "acceptance" { +resource "google_project_service" "acceptance" { project = "${google_project.acceptance.project_id}" - - services = [ - "cloudkms.googleapis.com", - ] + service = "cloudkms.googleapis.com" } resource "google_kms_key_ring" "key_ring" { name = "tf-test-%{random_suffix}" - project = "${google_project_services.acceptance.project}" + project = "${google_project_service.acceptance.project}" location = "us" } @@ -1501,3 +1438,29 @@ resource "google_storage_bucket" "bucket" { } `, bucketName) } + +func testAccStorageBucket_websiteNoAttributes(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "MULTI_REGIONAL" + + website {} + } +`, bucketName) +} + +func testAccStorageBucket_websiteOneAttribute(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "MULTI_REGIONAL" + + website { + main_page_suffix = "index.html" + } + } +`, bucketName) +} diff --git a/google/resource_storage_default_object_access_control_generated_test.go b/google/resource_storage_default_object_access_control_generated_test.go index e4334976aa1..4fe84c74afd 100644 --- a/google/resource_storage_default_object_access_control_generated_test.go +++ b/google/resource_storage_default_object_access_control_generated_test.go @@ -52,13 +52,13 @@ func TestAccStorageDefaultObjectAccessControl_storageDefaultObjectAccessControlP func testAccStorageDefaultObjectAccessControl_storageDefaultObjectAccessControlPublicExample(context map[string]interface{}) string { return Nprintf(` resource "google_storage_default_object_access_control" "public_rule" { - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name role = "READER" entity = "allUsers" } resource "google_storage_bucket" "bucket" { - name = "static-content-bucket%{random_suffix}" + name = "static-content-bucket%{random_suffix}" } `, context) } diff --git a/google/resource_storage_object_access_control_generated_test.go b/google/resource_storage_object_access_control_generated_test.go index a3363e09bf6..b422ecc22ff 100644 --- a/google/resource_storage_object_access_control_generated_test.go +++ b/google/resource_storage_object_access_control_generated_test.go @@ -51,20 +51,20 @@ func TestAccStorageObjectAccessControl_storageObjectAccessControlPublicObjectExa func testAccStorageObjectAccessControl_storageObjectAccessControlPublicObjectExample(context map[string]interface{}) string { return Nprintf(` resource "google_storage_object_access_control" "public_rule" { - object = "${google_storage_bucket_object.object.output_name}" - bucket = "${google_storage_bucket.bucket.name}" + object = google_storage_bucket_object.object.output_name + bucket = google_storage_bucket.bucket.name role = "READER" entity = "allUsers" } resource "google_storage_bucket" "bucket" { - name = "static-content-bucket%{random_suffix}" + name = "static-content-bucket%{random_suffix}" } - resource "google_storage_bucket_object" "object" { - name = "public-object%{random_suffix}" - bucket = "${google_storage_bucket.bucket.name}" - source = "test-fixtures/header-logo.png" +resource "google_storage_bucket_object" "object" { + name = "public-object%{random_suffix}" + bucket = google_storage_bucket.bucket.name + source = "test-fixtures/header-logo.png" } `, context) } diff --git a/google/resource_storage_transfer_job.go b/google/resource_storage_transfer_job.go index cc8c18cea94..494e330b77d 100644 --- a/google/resource_storage_transfer_job.go +++ b/google/resource_storage_transfer_job.go @@ -11,6 +11,27 @@ import ( "time" ) +var ( + objectConditionsKeys = []string{ + "transfer_spec.0.object_conditions.0.min_time_elapsed_since_last_modification", + "transfer_spec.0.object_conditions.0.max_time_elapsed_since_last_modification", + "transfer_spec.0.object_conditions.0.include_prefixes", + "transfer_spec.0.object_conditions.0.exclude_prefixes", + } + + transferOptionsKeys = []string{ + "transfer_spec.0.transfer_options.0.overwrite_objects_already_existing_in_sink", + "transfer_spec.0.transfer_options.0.delete_objects_unique_in_sink", + "transfer_spec.0.transfer_options.0.delete_objects_from_source_after_transfer", + } + + transferSpecDataSourceKeys = []string{ + "transfer_spec.0.gcs_data_source", + "transfer_spec.0.aws_s3_data_source", + "transfer_spec.0.http_data_source", + } +) + func resourceStorageTransferJob() *schema.Resource { return &schema.Resource{ Create: resourceStorageTransferJobCreate, @@ -52,25 +73,25 @@ func resourceStorageTransferJob() *schema.Resource { Elem: gcsDataSchema(), }, "gcs_data_source": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: gcsDataSchema(), - ConflictsWith: []string{"transfer_spec.aws_s3_data_source", "transfer_spec.http_data_source"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: gcsDataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, }, "aws_s3_data_source": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: awsS3DataSchema(), - ConflictsWith: []string{"transfer_spec.gcs_data_source", "transfer_spec.http_data_source"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: awsS3DataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, }, "http_data_source": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: httpDataSchema(), - ConflictsWith: []string{"transfer_spec.aws_s3_data_source", "transfer_spec.gcs_data_source"}, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: httpDataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, }, }, }, @@ -139,23 +160,27 @@ func objectConditionsSchema() *schema.Schema { Type: schema.TypeString, ValidateFunc: validateDuration(), Optional: true, + AtLeastOneOf: objectConditionsKeys, }, "max_time_elapsed_since_last_modification": { Type: schema.TypeString, ValidateFunc: validateDuration(), Optional: true, + AtLeastOneOf: objectConditionsKeys, }, "include_prefixes": { - Type: schema.TypeList, - Optional: true, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: objectConditionsKeys, Elem: &schema.Schema{ MaxItems: 1000, Type: schema.TypeString, }, }, "exclude_prefixes": { - Type: schema.TypeList, - Optional: true, + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: objectConditionsKeys, Elem: &schema.Schema{ MaxItems: 1000, Type: schema.TypeString, @@ -174,17 +199,20 @@ func transferOptionsSchema() *schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "overwrite_objects_already_existing_in_sink": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: transferOptionsKeys, }, "delete_objects_unique_in_sink": { Type: schema.TypeBool, Optional: true, + AtLeastOneOf: transferOptionsKeys, ConflictsWith: []string{"transfer_spec.transfer_options.delete_objects_from_source_after_transfer"}, }, "delete_objects_from_source_after_transfer": { Type: schema.TypeBool, Optional: true, + AtLeastOneOf: transferOptionsKeys, ConflictsWith: []string{"transfer_spec.transfer_options.delete_objects_unique_in_sink"}, }, }, diff --git a/google/resource_tpu_node.go b/google/resource_tpu_node.go index ee7795c6f95..5c2efb88554 100644 --- a/google/resource_tpu_node.go +++ b/google/resource_tpu_node.go @@ -148,11 +148,10 @@ used.`, Schema: map[string]*schema.Schema{ "preemptible": { Type: schema.TypeBool, - Optional: true, + Required: true, ForceNew: true, DiffSuppressFunc: compareTpuNodeSchedulingConfig, Description: `Defines whether the TPU instance is preemptible.`, - Default: false, }, }, }, @@ -265,7 +264,7 @@ func resourceTPUNodeCreate(d *schema.ResourceData, meta interface{}) error { } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -431,7 +430,7 @@ func resourceTPUNodeImport(d *schema.ResourceData, meta interface{}) ([]*schema. } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{name}}") + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/google/resource_tpu_node_generated_test.go b/google/resource_tpu_node_generated_test.go index 776a296da60..590501b6f67 100644 --- a/google/resource_tpu_node_generated_test.go +++ b/google/resource_tpu_node_generated_test.go @@ -51,15 +51,17 @@ func TestAccTPUNode_tpuNodeBasicExample(t *testing.T) { func testAccTPUNode_tpuNodeBasicExample(context map[string]interface{}) string { return Nprintf(` -data "google_tpu_tensorflow_versions" "available" { } + +data "google_tpu_tensorflow_versions" "available" { +} resource "google_tpu_node" "tpu" { - name = "test-tpu%{random_suffix}" - zone = "us-central1-b" + name = "test-tpu%{random_suffix}" + zone = "us-central1-b" - accelerator_type = "v3-8" - tensorflow_version = "${data.google_tpu_tensorflow_versions.available.versions[0]}" - cidr_block = "10.2.0.0/29" + accelerator_type = "v3-8" + tensorflow_version = data.google_tpu_tensorflow_versions.available.versions[0] + cidr_block = "10.2.0.0/29" } `, context) } @@ -91,27 +93,29 @@ func TestAccTPUNode_tpuNodeFullExample(t *testing.T) { func testAccTPUNode_tpuNodeFullExample(context map[string]interface{}) string { return Nprintf(` -data "google_tpu_tensorflow_versions" "available" { } +data "google_tpu_tensorflow_versions" "available" { +} + resource "google_tpu_node" "tpu" { - name = "test-tpu%{random_suffix}" - zone = "us-central1-b" + name = "test-tpu%{random_suffix}" + zone = "us-central1-b" - accelerator_type = "v3-8" + accelerator_type = "v3-8" - cidr_block = "10.3.0.0/29" - tensorflow_version = "${data.google_tpu_tensorflow_versions.available.versions[0]}" + cidr_block = "10.3.0.0/29" + tensorflow_version = data.google_tpu_tensorflow_versions.available.versions[0] - description = "Terraform Google Provider test TPU" - network = "default" + description = "Terraform Google Provider test TPU" + network = "default" - labels = { - foo = "bar" - } + labels = { + foo = "bar" + } - scheduling_config { - preemptible = true - } + scheduling_config { + preemptible = true + } } `, context) } diff --git a/google/serviceusage_operation.go b/google/serviceusage_operation.go index 7eea8347d2a..7e4794fc49a 100644 --- a/google/serviceusage_operation.go +++ b/google/serviceusage_operation.go @@ -3,6 +3,7 @@ package google import ( "fmt" + "google.golang.org/api/googleapi" "google.golang.org/api/serviceusage/v1" ) @@ -37,3 +38,18 @@ func serviceUsageOperationWaitTime(config *Config, op *serviceusage.Operation, a } return OperationWait(w, activity, timeoutMinutes) } + +func handleServiceUsageRetryableError(err error) error { + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok { + if (gerr.Code == 400 || gerr.Code == 412) && gerr.Message == "Precondition check failed." { + return &googleapi.Error{ + Code: 503, + Message: "api returned \"precondition failed\" while enabling service", + } + } + } + return err +} diff --git a/google/test_utils.go b/google/test_utils.go index 3a9b611ad37..b00ea13672e 100644 --- a/google/test_utils.go +++ b/google/test_utils.go @@ -71,6 +71,10 @@ func (d *ResourceDiffMock) GetChange(key string) (interface{}, interface{}) { return d.Before[key], d.After[key] } +func (d *ResourceDiffMock) Get(key string) interface{} { + return d.After[key] +} + func (d *ResourceDiffMock) Clear(key string) error { if d.Cleared == nil { d.Cleared = map[string]struct{}{} diff --git a/google/utils.go b/google/utils.go index f7c2348bd3c..760d35e84ad 100644 --- a/google/utils.go +++ b/google/utils.go @@ -29,6 +29,7 @@ type TerraformResourceData interface { type TerraformResourceDiff interface { GetChange(string) (interface{}, interface{}) + Get(string) interface{} Clear(string) error } diff --git a/google/validation.go b/google/validation.go index 07b169799ad..3a59f006b4e 100644 --- a/google/validation.go +++ b/google/validation.go @@ -232,6 +232,14 @@ func validateNonNegativeDuration() schema.SchemaValidateFunc { } } +func validateIpAddress(i interface{}, val string) ([]string, []error) { + ip := net.ParseIP(i.(string)) + if ip == nil { + return nil, []error{fmt.Errorf("could not parse %q to IP address", val)} + } + return nil, nil +} + // StringNotInSlice returns a SchemaValidateFunc which tests if the provided value // is of type string and that it matches none of the element in the invalid slice. // if ignorecase is true, case is ignored. diff --git a/website/docs/d/datasource_client_config.html.markdown b/website/docs/d/datasource_client_config.html.markdown index 2627a0f8680..4c570679a35 100644 --- a/website/docs/d/datasource_client_config.html.markdown +++ b/website/docs/d/datasource_client_config.html.markdown @@ -14,29 +14,33 @@ Use this data source to access the configuration of the Google Cloud provider. ## Example Usage ```tf -data "google_client_config" "current" {} +data "google_client_config" "current" { +} output "project" { - value = "${data.google_client_config.current.project}" + value = data.google_client_config.current.project } ``` ## Example Usage: Configure Kubernetes provider with OAuth2 access token ```tf -data "google_client_config" "default" {} +data "google_client_config" "default" { +} data "google_container_cluster" "my_cluster" { - name = "my-cluster" - zone = "us-east1-a" + name = "my-cluster" + zone = "us-east1-a" } provider "kubernetes" { load_config_file = false - host = "https://${data.google_container_cluster.my_cluster.endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(data.google_container_cluster.my_cluster.master_auth.0.cluster_ca_certificate)}" + host = "https://${data.google_container_cluster.my_cluster.endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode( + data.google_container_cluster.my_cluster.master_auth[0].cluster_ca_certificate, + ) } ``` diff --git a/website/docs/d/datasource_compute_address.html.markdown b/website/docs/d/datasource_compute_address.html.markdown index 91eec27df68..656b71cb4b6 100644 --- a/website/docs/d/datasource_compute_address.html.markdown +++ b/website/docs/d/datasource_compute_address.html.markdown @@ -24,9 +24,9 @@ resource "google_dns_record_set" "frontend" { type = "A" ttl = 300 - managed_zone = "${google_dns_managed_zone.prod.name}" + managed_zone = google_dns_managed_zone.prod.name - rrdatas = ["${data.google_compute_address.my_address.address}"] + rrdatas = [data.google_compute_address.my_address.address] } resource "google_dns_managed_zone" "prod" { diff --git a/website/docs/d/datasource_compute_global_address.html.markdown b/website/docs/d/datasource_compute_global_address.html.markdown index d985e2fe9cb..4f238efe603 100644 --- a/website/docs/d/datasource_compute_global_address.html.markdown +++ b/website/docs/d/datasource_compute_global_address.html.markdown @@ -24,9 +24,9 @@ resource "google_dns_record_set" "frontend" { type = "A" ttl = 300 - managed_zone = "${google_dns_managed_zone.prod.name}" + managed_zone = google_dns_managed_zone.prod.name - rrdatas = ["${data.google_compute_global_address.my_address.address}"] + rrdatas = [data.google_compute_global_address.my_address.address] } resource "google_dns_managed_zone" "prod" { diff --git a/website/docs/d/datasource_compute_image.html.markdown b/website/docs/d/datasource_compute_image.html.markdown index 53597c27c49..f30e03f6ecc 100644 --- a/website/docs/d/datasource_compute_image.html.markdown +++ b/website/docs/d/datasource_compute_image.html.markdown @@ -25,7 +25,7 @@ resource "google_compute_instance" "default" { boot_disk { initialize_params { - image = "${data.google_compute_image.my_image.self_link}" + image = data.google_compute_image.my_image.self_link } } } diff --git a/website/docs/d/datasource_compute_instance.html.markdown b/website/docs/d/datasource_compute_instance.html.markdown index 40ae8bd983f..45898b94404 100644 --- a/website/docs/d/datasource_compute_instance.html.markdown +++ b/website/docs/d/datasource_compute_instance.html.markdown @@ -19,8 +19,8 @@ and ```hcl data "google_compute_instance" "appserver" { - name = "primary-application-server" - zone = "us-central1-a" + name = "primary-application-server" + zone = "us-central1-a" } ``` diff --git a/website/docs/d/datasource_compute_lb_ip_ranges.html.markdown b/website/docs/d/datasource_compute_lb_ip_ranges.html.markdown index cce7318e020..a2c52ccc4e6 100644 --- a/website/docs/d/datasource_compute_lb_ip_ranges.html.markdown +++ b/website/docs/d/datasource_compute_lb_ip_ranges.html.markdown @@ -16,20 +16,21 @@ https://cloud.google.com/compute/docs/load-balancing/health-checks#health_check_ ## Example Usage ```tf -data "google_compute_lb_ip_ranges" "ranges" {} +data "google_compute_lb_ip_ranges" "ranges" { +} resource "google_compute_firewall" "lb" { name = "lb-firewall" - network = "${google_compute_network.main.name}" + network = google_compute_network.main.name allow { protocol = "tcp" ports = ["80"] } - source_ranges = ["${data.google_compute_lb_ip_ranges.ranges.network}"] + source_ranges = data.google_compute_lb_ip_ranges.ranges.network target_tags = [ - "InstanceBehindLoadBalancer" + "InstanceBehindLoadBalancer", ] } ``` diff --git a/website/docs/d/datasource_compute_region_instance_group.html.markdown b/website/docs/d/datasource_compute_region_instance_group.html.markdown index 592c220da5c..19b0090452f 100644 --- a/website/docs/d/datasource_compute_region_instance_group.html.markdown +++ b/website/docs/d/datasource_compute_region_instance_group.html.markdown @@ -12,29 +12,28 @@ description: |- Get a Compute Region Instance Group within GCE. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroups). -``` +```hcl data "google_compute_region_instance_group" "group" { - name = "instance-group-name" + name = "instance-group-name" } ``` The most common use of this datasource will be to fetch information about the instances inside regional managed instance groups, for instance: -``` +```hcl resource "google_compute_region_instance_group_manager" "foo" { - name = "some_name" + name = "some_name" ... - base_instance_name = "foo" + base_instance_name = "foo" ... - instance_template = "${google_compute_instance_template.foo.self_link}" - target_pools = ["${google_compute_target_pool.foo.self_link}"] + instance_template = google_compute_instance_template.foo.self_link + target_pools = [google_compute_target_pool.foo.self_link] ... } data "google_compute_region_instance_group" "data_source" { - self_link = "${google_compute_region_instance_group_manager.foo.instance_group}" + self_link = google_compute_region_instance_group_manager.foo.instance_group } - ``` ## Argument Reference diff --git a/website/docs/d/datasource_compute_ssl_certificate.html.markdown b/website/docs/d/datasource_compute_ssl_certificate.html.markdown index deddd521f72..daa3d6c7d4a 100644 --- a/website/docs/d/datasource_compute_ssl_certificate.html.markdown +++ b/website/docs/d/datasource_compute_ssl_certificate.html.markdown @@ -15,19 +15,19 @@ Get info about a Google Compute SSL Certificate from its name. ```tf data "google_compute_ssl_certificate" "my_cert" { - name = "my-cert" + name = "my-cert" } output "certificate" { - value = "${data.google_compute_ssl_certificate.my_cert.certificate}" + value = data.google_compute_ssl_certificate.my_cert.certificate } output "certificate_id" { - value = "${data.google_compute_ssl_certificate.my_cert.certificate_id}" + value = data.google_compute_ssl_certificate.my_cert.certificate_id } output "self_link" { - value = "${data.google_compute_ssl_certificate.my_cert.self_link}" + value = data.google_compute_ssl_certificate.my_cert.self_link } ``` diff --git a/website/docs/d/datasource_google_client_openid_userinfo.html.markdown b/website/docs/d/datasource_google_client_openid_userinfo.html.markdown index ad33f675d40..5a4dcd3068e 100644 --- a/website/docs/d/datasource_google_client_openid_userinfo.html.markdown +++ b/website/docs/d/datasource_google_client_openid_userinfo.html.markdown @@ -12,9 +12,8 @@ description: |- Get OpenID userinfo about the credentials used with the Google provider, specifically the email. -When the `https://www.googleapis.com/auth/userinfo.email` scope is enabled in -your provider block, this datasource enables you to export the email of the -account you've authenticated the provider with; this can be used alongside +This datasource enables you to export the email of the account you've +authenticated the provider with; this can be used alongside `data.google_client_config`'s `access_token` to perform OpenID Connect authentication with GKE and configure an RBAC role for the email used. @@ -25,51 +24,36 @@ receive an error otherwise. ## Example Usage - exporting an email ```hcl -provider "google" { - scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/userinfo.email", - ] +data "google_client_openid_userinfo" "me" { } -data "google_client_openid_userinfo" "me" {} - output "my-email" { - value = "${data.google_client_openid_userinfo.me.email}" + value = data.google_client_openid_userinfo.me.email } ``` ## Example Usage - OpenID Connect w/ Kubernetes provider + RBAC IAM role ```hcl -provider "google" { - scopes = [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/userinfo.email", - ] +data "google_client_openid_userinfo" "provider_identity" { } -data "google_client_openid_userinfo" "provider_identity" {} - -data "google_client_config" "provider" {} +data "google_client_config" "provider" { +} data "google_container_cluster" "my_cluster" { - name = "my-cluster" - zone = "us-east1-a" + name = "my-cluster" + zone = "us-east1-a" } provider "kubernetes" { load_config_file = false - host = "https://${data.google_container_cluster.my_cluster.endpoint}" - token = "${data.google_client_config.provider.access_token}" - cluster_ca_certificate = "${base64decode(data.google_container_cluster.my_cluster.master_auth.0.cluster_ca_certificate)}" + host = "https://${data.google_container_cluster.my_cluster.endpoint}" + token = data.google_client_config.provider.access_token + cluster_ca_certificate = base64decode( + data.google_container_cluster.my_cluster.master_auth[0].cluster_ca_certificate, + ) } resource "kubernetes_cluster_role_binding" "user" { @@ -85,7 +69,7 @@ resource "kubernetes_cluster_role_binding" "user" { subject { kind = "User" - name = "${data.google_client_openid_userinfo.provider_identity.email}" + name = data.google_client_openid_userinfo.provider_identity.email } } ``` diff --git a/website/docs/d/datasource_google_composer_image_versions.html.markdown b/website/docs/d/datasource_google_composer_image_versions.html.markdown index a1bfec475b0..ac07c9d8125 100644 --- a/website/docs/d/datasource_google_composer_image_versions.html.markdown +++ b/website/docs/d/datasource_google_composer_image_versions.html.markdown @@ -18,11 +18,11 @@ data "google_composer_image_versions" "all" { } resource "google_composer_environment" "test" { - name = "test-env" - region = "us-central1" + name = "test-env" + region = "us-central1" config { software_config { - image_version = "${data.google_composer_image_versions.all.image_versions.0.image_version_id}" + image_version = data.google_composer_image_versions.all.image_versions[0].image_version_id } } } diff --git a/website/docs/d/datasource_google_compute_backend_service.html.markdown b/website/docs/d/datasource_google_compute_backend_service.html.markdown index 1bb0f3d8d58..21f7d69df7f 100644 --- a/website/docs/d/datasource_google_compute_backend_service.html.markdown +++ b/website/docs/d/datasource_google_compute_backend_service.html.markdown @@ -22,7 +22,7 @@ data "google_compute_backend_service" "baz" { resource "google_compute_backend_service" "default" { name = "backend-service" - health_checks = ["${tolist(data.google_compute_backend_service.baz.health_checks)[0]}"] + health_checks = [tolist(data.google_compute_backend_service.baz.health_checks)[0]] } ``` diff --git a/website/docs/d/datasource_google_compute_network_endpoint_group.html.markdown b/website/docs/d/datasource_google_compute_network_endpoint_group.html.markdown index f9e89e12691..8aedcf36b0d 100644 --- a/website/docs/d/datasource_google_compute_network_endpoint_group.html.markdown +++ b/website/docs/d/datasource_google_compute_network_endpoint_group.html.markdown @@ -17,12 +17,12 @@ The NEG may be found by providing either a `self_link`, or a `name` and a `zone` ```hcl data "google_compute_network_endpoint_group" "neg1" { - name = "k8s1-abcdef01-myns-mysvc-8080-4b6bac43" - zone = "us-central1-a" + name = "k8s1-abcdef01-myns-mysvc-8080-4b6bac43" + zone = "us-central1-a" } data "google_compute_network_endpoint_group" "neg2" { - self_link = "https://www.googleapis.com/compute/v1/projects/myproject/zones/us-central1-a/networkEndpointGroups/k8s1-abcdef01-myns-mysvc-8080-4b6bac43" + self_link = "https://www.googleapis.com/compute/v1/projects/myproject/zones/us-central1-a/networkEndpointGroups/k8s1-abcdef01-myns-mysvc-8080-4b6bac43" } ``` diff --git a/website/docs/d/datasource_google_folder_organization_policy.html.markdown b/website/docs/d/datasource_google_folder_organization_policy.html.markdown index e1e18b801b4..c3e88de7132 100644 --- a/website/docs/d/datasource_google_folder_organization_policy.html.markdown +++ b/website/docs/d/datasource_google_folder_organization_policy.html.markdown @@ -22,7 +22,7 @@ data "google_folder_organization_policy" "policy" { } output "version" { - value = "${data.google_folder_organization_policy.policy.version}" + value = data.google_folder_organization_policy.policy.version } ``` diff --git a/website/docs/d/datasource_google_iam_role.html.markdown b/website/docs/d/datasource_google_iam_role.html.markdown index a438b8eef36..c4e178290de 100644 --- a/website/docs/d/datasource_google_iam_role.html.markdown +++ b/website/docs/d/datasource_google_iam_role.html.markdown @@ -17,9 +17,8 @@ data "google_iam_role" "roleinfo" { } output "the_role_permissions" { - value = "${data.google_iam_role.roleinfo.included_permissions}" + value = data.google_iam_role.roleinfo.included_permissions } - ``` ## Argument Reference diff --git a/website/docs/d/datasource_google_netblock_ip_ranges.html.markdown b/website/docs/d/datasource_google_netblock_ip_ranges.html.markdown index 95d0b187407..219e2b38a8d 100644 --- a/website/docs/d/datasource_google_netblock_ip_ranges.html.markdown +++ b/website/docs/d/datasource_google_netblock_ip_ranges.html.markdown @@ -14,18 +14,19 @@ Use this data source to get the IP addresses from different special IP ranges on ## Example Usage - Cloud Ranges ```tf -data "google_netblock_ip_ranges" "netblock" {} +data "google_netblock_ip_ranges" "netblock" { +} output "cidr_blocks" { - value = "${data.google_netblock_ip_ranges.netblock.cidr_blocks}" + value = data.google_netblock_ip_ranges.netblock.cidr_blocks } output "cidr_blocks_ipv4" { - value = "${data.google_netblock_ip_ranges.netblock.cidr_blocks_ipv4}" + value = data.google_netblock_ip_ranges.netblock.cidr_blocks_ipv4 } output "cidr_blocks_ipv6" { - value = "${data.google_netblock_ip_ranges.netblock.cidr_blocks_ipv6}" + value = data.google_netblock_ip_ranges.netblock.cidr_blocks_ipv6 } ``` @@ -38,14 +39,14 @@ data "google_netblock_ip_ranges" "legacy-hcs" { resource "google_compute_firewall" "allow-hcs" { name = "allow-hcs" - network = "${google_compute_network.default.name}" + network = google_compute_network.default.name allow { protocol = "tcp" ports = ["80"] } - source_ranges = ["${data.google_netblock_ip_ranges.legacy-hcs.cidr_blocks_ipv4}"] + source_ranges = data.google_netblock_ip_ranges.legacy-hcs.cidr_blocks_ipv4 } resource "google_compute_network" "default" { diff --git a/website/docs/d/datasource_google_project_organization_policy.html.markdown b/website/docs/d/datasource_google_project_organization_policy.html.markdown index 0e4a697c0fe..ee6e7b01830 100644 --- a/website/docs/d/datasource_google_project_organization_policy.html.markdown +++ b/website/docs/d/datasource_google_project_organization_policy.html.markdown @@ -22,7 +22,7 @@ data "google_project_organization_policy" "policy" { } output "version" { - value = "${data.google_project_organization_policy.policy.version}" + value = data.google_project_organization_policy.policy.version } ``` diff --git a/website/docs/d/datasource_google_service_account.html.markdown b/website/docs/d/datasource_google_service_account.html.markdown index cc38459c2a3..1f319ea3f7b 100644 --- a/website/docs/d/datasource_google_service_account.html.markdown +++ b/website/docs/d/datasource_google_service_account.html.markdown @@ -27,15 +27,15 @@ data "google_service_account" "myaccount" { } resource "google_service_account_key" "mykey" { - service_account_id = "${data.google_service_account.myaccount.name}" + service_account_id = data.google_service_account.myaccount.name } resource "kubernetes_secret" "google-application-credentials" { - metadata = { + metadata { name = "google-application-credentials" } - data { - credentials.json = "${base64decode(google_service_account_key.mykey.private_key)}" + data = { + credentials.json = base64decode(google_service_account_key.mykey.private_key) } } ``` diff --git a/website/docs/d/datasource_google_service_account_access_token.html.markdown b/website/docs/d/datasource_google_service_account_access_token.html.markdown index 476c14ffc26..c18d6b154b6 100644 --- a/website/docs/d/datasource_google_service_account_access_token.html.markdown +++ b/website/docs/d/datasource_google_service_account_access_token.html.markdown @@ -35,30 +35,31 @@ Once the IAM permissions are set, you can apply the new token to a provider boot In the example below, `google_project` will run as `service_B`. ```hcl -provider "google" {} +provider "google" { +} data "google_client_config" "default" { - provider = "google" + provider = google } data "google_service_account_access_token" "default" { - provider = "google" - target_service_account = "service_B@projectB.iam.gserviceaccount.com" - scopes = ["userinfo-email", "cloud-platform"] - lifetime = "300s" + provider = google + target_service_account = "service_B@projectB.iam.gserviceaccount.com" + scopes = ["userinfo-email", "cloud-platform"] + lifetime = "300s" } provider "google" { - alias = "impersonated" - access_token = "${data.google_service_account_access_token.default.access_token}" + alias = "impersonated" + access_token = data.google_service_account_access_token.default.access_token } data "google_client_openid_userinfo" "me" { - provider = "google.impersonated" + provider = google.impersonated } output "target-email" { - value = "${data.google_client_openid_userinfo.me.email}" + value = data.google_client_openid_userinfo.me.email } ``` diff --git a/website/docs/d/datasource_google_service_account_key.html.markdown b/website/docs/d/datasource_google_service_account_key.html.markdown index 69c194152d6..69a2201b9a0 100644 --- a/website/docs/d/datasource_google_service_account_key.html.markdown +++ b/website/docs/d/datasource_google_service_account_key.html.markdown @@ -20,11 +20,11 @@ resource "google_service_account" "myaccount" { } resource "google_service_account_key" "mykey" { - service_account_id = "${google_service_account.myaccount.name}" + service_account_id = google_service_account.myaccount.name } data "google_service_account_key" "mykey" { - name = "${google_service_account_key.mykey.name}" + name = google_service_account_key.mykey.name public_key_type = "TYPE_X509_PEM_FILE" } ``` diff --git a/website/docs/d/datasource_tpu_tensorflow_versions.html.markdown b/website/docs/d/datasource_tpu_tensorflow_versions.html.markdown index 7d809c68ada..3f24fc6e358 100644 --- a/website/docs/d/datasource_tpu_tensorflow_versions.html.markdown +++ b/website/docs/d/datasource_tpu_tensorflow_versions.html.markdown @@ -14,21 +14,23 @@ Get TensorFlow versions available for a project. For more information see the [o ## Example Usage ```hcl -data "google_tpu_tensorflow_versions" "available" { } +data "google_tpu_tensorflow_versions" "available" { +} ``` ## Example Usage: Configure Basic TPU Node with available version ```hcl -data "google_tpu_tensorflow_versions" "available" { } +data "google_tpu_tensorflow_versions" "available" { +} resource "google_tpu_node" "tpu" { - name = "test-tpu" - zone = "us-central1-b" + name = "test-tpu" + zone = "us-central1-b" - accelerator_type = "v3-8" - tensorflow_version = "${data.google_tpu_tensorflow_versions.available.versions[0]}" - cidr_block = "10.2.0.0/29" + accelerator_type = "v3-8" + tensorflow_version = data.google_tpu_tensorflow_versions.available.versions[0] + cidr_block = "10.2.0.0/29" } ``` diff --git a/website/docs/d/dns_managed_zone.html.markdown b/website/docs/d/dns_managed_zone.html.markdown index 8cef82a1f6b..3d2403963b8 100644 --- a/website/docs/d/dns_managed_zone.html.markdown +++ b/website/docs/d/dns_managed_zone.html.markdown @@ -17,7 +17,7 @@ and ```hcl data "google_dns_managed_zone" "env_dns_zone" { - name = "qa-zone" + name = "qa-zone" } resource "google_dns_record_set" "dns" { @@ -25,7 +25,7 @@ resource "google_dns_record_set" "dns" { type = "TXT" ttl = 300 - managed_zone = "${data.google_dns_managed_zone.env_dns_zone.name}" + managed_zone = data.google_dns_managed_zone.env_dns_zone.name rrdatas = ["test"] } diff --git a/website/docs/d/google_active_folder.html.markdown b/website/docs/d/google_active_folder.html.markdown index 1caca3e4257..9053e831fb9 100644 --- a/website/docs/d/google_active_folder.html.markdown +++ b/website/docs/d/google_active_folder.html.markdown @@ -16,7 +16,7 @@ Get an active folder within GCP by `display_name` and `parent`. ```tf data "google_active_folder" "department1" { display_name = "Department 1" - parent = "organizations/1234567" + parent = "organizations/1234567" } ``` diff --git a/website/docs/d/google_billing_account.html.markdown b/website/docs/d/google_billing_account.html.markdown index a03e0df8ed9..84d8c8b1e19 100644 --- a/website/docs/d/google_billing_account.html.markdown +++ b/website/docs/d/google_billing_account.html.markdown @@ -22,7 +22,7 @@ resource "google_project" "my_project" { project_id = "your-project-id" org_id = "1234567" - billing_account = "${data.google_billing_account.acct.id}" + billing_account = data.google_billing_account.acct.id } ``` diff --git a/website/docs/d/google_compute_default_service_account.html.markdown b/website/docs/d/google_compute_default_service_account.html.markdown index 7e50711381d..8bc330a41b8 100644 --- a/website/docs/d/google_compute_default_service_account.html.markdown +++ b/website/docs/d/google_compute_default_service_account.html.markdown @@ -14,11 +14,12 @@ Use this data source to retrieve default service account for this project ## Example Usage ```hcl -data "google_compute_default_service_account" "default" { } +data "google_compute_default_service_account" "default" { +} output "default_account" { - value = "${data.google_compute_default_service_account.default.email}" -} + value = data.google_compute_default_service_account.default.email +} ``` ## Argument Reference diff --git a/website/docs/d/google_compute_instance_group.html.markdown b/website/docs/d/google_compute_instance_group.html.markdown index 303df971666..c0ccaa30895 100644 --- a/website/docs/d/google_compute_instance_group.html.markdown +++ b/website/docs/d/google_compute_instance_group.html.markdown @@ -13,7 +13,7 @@ Get a Compute Instance Group within GCE. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/#unmanaged_instance_groups) and [API](https://cloud.google.com/compute/docs/reference/latest/instanceGroups) -``` +```hcl data "google_compute_instance_group" "all" { name = "instance-group-name" zone = "us-central1-a" diff --git a/website/docs/d/google_compute_node_types.html.markdown b/website/docs/d/google_compute_node_types.html.markdown index 0ddde645734..5a7d8299ce4 100644 --- a/website/docs/d/google_compute_node_types.html.markdown +++ b/website/docs/d/google_compute_node_types.html.markdown @@ -23,7 +23,7 @@ data "google_compute_node_types" "central1b" { resource "google_compute_node_template" "tmpl" { name = "terraform-test-tmpl" region = "us-central1" - node_type = "${data.google_compute_node_types.types.names[0]}" + node_type = data.google_compute_node_types.types.names[0] } ``` diff --git a/website/docs/d/google_compute_regions.html.markdown b/website/docs/d/google_compute_regions.html.markdown index 1df2c2bd8c6..468cfd8206b 100644 --- a/website/docs/d/google_compute_regions.html.markdown +++ b/website/docs/d/google_compute_regions.html.markdown @@ -12,15 +12,16 @@ description: |- Provides access to available Google Compute regions for a given project. See more about [regions and regions](https://cloud.google.com/compute/docs/regions-zones/) in the upstream docs. -``` -data "google_compute_regions" "available" {} +```hcl +data "google_compute_regions" "available" { +} resource "google_compute_subnetwork" "cluster" { - count = "${length(data.google_compute_regions.available.names)}" + count = length(data.google_compute_regions.available.names) name = "my-network" ip_cidr_range = "10.36.${count.index}.0/24" network = "my-network" - region = "${data.google_compute_regions.available.names[count.index]}" + region = data.google_compute_regions.available.names[count.index] } ``` diff --git a/website/docs/d/google_compute_resource_policy.html.markdown b/website/docs/d/google_compute_resource_policy.html.markdown index efaf9716041..d91f671bfa7 100644 --- a/website/docs/d/google_compute_resource_policy.html.markdown +++ b/website/docs/d/google_compute_resource_policy.html.markdown @@ -21,7 +21,7 @@ provider "google-beta" { } data "google_compute_resource_policy" "daily" { - provider = "google-beta" + provider = google-beta name = "daily" region = "us-central1" } diff --git a/website/docs/d/google_compute_zones.html.markdown b/website/docs/d/google_compute_zones.html.markdown index 604377b6aca..cf6d3bc47c6 100644 --- a/website/docs/d/google_compute_zones.html.markdown +++ b/website/docs/d/google_compute_zones.html.markdown @@ -12,16 +12,17 @@ description: |- Provides access to available Google Compute zones in a region for a given project. See more about [regions and zones](https://cloud.google.com/compute/docs/regions-zones/regions-zones) in the upstream docs. -``` -data "google_compute_zones" "available" {} +```hcl +data "google_compute_zones" "available" { +} resource "google_compute_instance_group_manager" "foo" { - count = "${length(data.google_compute_zones.available.names)}" + count = length(data.google_compute_zones.available.names) name = "terraform-test-${count.index}" - instance_template = "${google_compute_instance_template.foobar.self_link}" + instance_template = google_compute_instance_template.foobar.self_link base_instance_name = "foobar-${count.index}" - zone = "${data.google_compute_zones.available.names[count.index]}" + zone = data.google_compute_zones.available.names[count.index] target_size = 1 } ``` diff --git a/website/docs/d/google_container_cluster.html.markdown b/website/docs/d/google_container_cluster.html.markdown index 67aba024ce4..c03e9341260 100644 --- a/website/docs/d/google_container_cluster.html.markdown +++ b/website/docs/d/google_container_cluster.html.markdown @@ -15,32 +15,32 @@ Get info about a GKE cluster from its name and location. ```tf data "google_container_cluster" "my_cluster" { - name = "my-cluster" - location = "us-east1-a" + name = "my-cluster" + location = "us-east1-a" } output "cluster_username" { - value = "${data.google_container_cluster.my_cluster.master_auth.0.username}" + value = data.google_container_cluster.my_cluster.master_auth[0].username } output "cluster_password" { - value = "${data.google_container_cluster.my_cluster.master_auth.0.password}" + value = data.google_container_cluster.my_cluster.master_auth[0].password } output "endpoint" { - value = "${data.google_container_cluster.my_cluster.endpoint}" + value = data.google_container_cluster.my_cluster.endpoint } output "instance_group_urls" { - value = "${data.google_container_cluster.my_cluster.instance_group_urls}" + value = data.google_container_cluster.my_cluster.instance_group_urls } output "node_config" { - value = "${data.google_container_cluster.my_cluster.node_config}" + value = data.google_container_cluster.my_cluster.node_config } output "node_pools" { - value = "${data.google_container_cluster.my_cluster.node_pool}" + value = data.google_container_cluster.my_cluster.node_pool } ``` diff --git a/website/docs/d/google_container_engine_versions.html.markdown b/website/docs/d/google_container_engine_versions.html.markdown index 49ff7cf47c4..d6b6ce23a78 100644 --- a/website/docs/d/google_container_engine_versions.html.markdown +++ b/website/docs/d/google_container_engine_versions.html.markdown @@ -21,14 +21,14 @@ support the same version. ```hcl data "google_container_engine_versions" "central1b" { - location = "us-central1-b" + location = "us-central1-b" version_prefix = "1.12." } resource "google_container_cluster" "foo" { name = "terraform-test-cluster" - location = "us-central1-b" - node_version = "${data.google_container_engine_versions.central1b.latest_node_version}" + location = "us-central1-b" + node_version = data.google_container_engine_versions.central1b.latest_node_version initial_node_count = 1 master_auth { @@ -47,14 +47,6 @@ Must exactly match the location the cluster will be deployed in, or listed versions may not be available. If `location`, `region`, and `zone` are not specified, the provider-level zone must be set and is used instead. -* `zone` (Optional, Deprecated) - Zone to list available cluster versions for. -Should match the zone the cluster will be deployed in. `zone` has been -deprecated in favour of `location`. - -* `region` (Optional, Deprecated) - Region to list available cluster versions -for. Should match the region the cluster will be deployed in. `region` has been -deprecated in favour of `location`. - * `project` (Optional) - ID of the project to list available cluster versions for. Should match the project the cluster will be deployed to. Defaults to the project that the provider is authenticated with. diff --git a/website/docs/d/google_container_registry_image.html.markdown b/website/docs/d/google_container_registry_image.html.markdown index 8d7ae054a82..799646d76cd 100644 --- a/website/docs/d/google_container_registry_image.html.markdown +++ b/website/docs/d/google_container_registry_image.html.markdown @@ -17,11 +17,11 @@ The URLs are computed entirely offline - as long as the project exists, they wil ```hcl data "google_container_registry_image" "debian" { - name = "debian" + name = "debian" } output "gcr_location" { - value = "${data.google_container_registry_image.debian.image_url}" + value = data.google_container_registry_image.debian.image_url } ``` diff --git a/website/docs/d/google_container_registry_repository.html.markdown b/website/docs/d/google_container_registry_repository.html.markdown index 9880cc4b1b9..3fabcaa3738 100644 --- a/website/docs/d/google_container_registry_repository.html.markdown +++ b/website/docs/d/google_container_registry_repository.html.markdown @@ -16,10 +16,11 @@ The URLs are computed entirely offline - as long as the project exists, they wil ## Example Usage ```hcl -data "google_container_registry_repository" "foo" {} +data "google_container_registry_repository" "foo" { +} output "gcr_location" { - value = "${data.google_container_registry_repository.foo.repository_url}" + value = data.google_container_registry_repository.foo.repository_url } ``` diff --git a/website/docs/d/google_folder.html.markdown b/website/docs/d/google_folder.html.markdown index c35afb99a24..51ed1c50f1c 100644 --- a/website/docs/d/google_folder.html.markdown +++ b/website/docs/d/google_folder.html.markdown @@ -14,7 +14,7 @@ Use this data source to get information about a Google Cloud Folder. ```hcl # Get folder by id data "google_folder" "my_folder_1" { - folder = "folders/12345" + folder = "folders/12345" lookup_organization = true } @@ -24,13 +24,12 @@ data "google_folder" "my_folder_2" { } output "my_folder_1_organization" { - value = "${data.google_folder.my_folder_1.organization}" + value = data.google_folder.my_folder_1.organization } output "my_folder_2_parent" { - value = "${data.google_folder.my_folder_2.parent}" + value = data.google_folder.my_folder_2.parent } - ``` ## Argument Reference diff --git a/website/docs/d/google_iam_policy.html.markdown b/website/docs/d/google_iam_policy.html.markdown index 9c42472a613..c5e83cf8191 100644 --- a/website/docs/d/google_iam_policy.html.markdown +++ b/website/docs/d/google_iam_policy.html.markdown @@ -13,7 +13,7 @@ description: |- Generates an IAM policy document that may be referenced by and applied to other Google Cloud Platform resources, such as the `google_project` resource. -``` +```hcl data "google_iam_policy" "admin" { binding { role = "roles/compute.instanceAdmin" diff --git a/website/docs/d/google_kms_crypto_key.html.markdown b/website/docs/d/google_kms_crypto_key.html.markdown index 82a861692ef..cc91bd9fc5f 100644 --- a/website/docs/d/google_kms_crypto_key.html.markdown +++ b/website/docs/d/google_kms_crypto_key.html.markdown @@ -26,8 +26,8 @@ data "google_kms_key_ring" "my_key_ring" { } data "google_kms_crypto_key" "my_crypto_key" { - name = "my-crypto-key" - key_ring = "${data.google_kms_key_ring.my_key_ring.self_link}" + name = "my-crypto-key" + key_ring = data.google_kms_key_ring.my_key_ring.self_link } ``` diff --git a/website/docs/d/google_kms_crypto_key_version.html.markdown b/website/docs/d/google_kms_crypto_key_version.html.markdown index 93cc08b1ed4..e0b00e157c0 100644 --- a/website/docs/d/google_kms_crypto_key_version.html.markdown +++ b/website/docs/d/google_kms_crypto_key_version.html.markdown @@ -26,11 +26,11 @@ data "google_kms_key_ring" "my_key_ring" { data "google_kms_crypto_key" "my_crypto_key" { name = "my-crypto-key" - key_ring = "${data.google_kms_key_ring.my_key_ring.self_link}" + key_ring = data.google_kms_key_ring.my_key_ring.self_link } data "google_kms_crypto_key_version" "my_crypto_key_version" { - crypto_key = "${data.google_kms_key.my_key.self_link}" + crypto_key = data.google_kms_key.my_key.self_link } ``` diff --git a/website/docs/d/google_kms_secret.html.markdown b/website/docs/d/google_kms_secret.html.markdown index 47014bda058..55bdbc936c3 100644 --- a/website/docs/d/google_kms_secret.html.markdown +++ b/website/docs/d/google_kms_secret.html.markdown @@ -33,7 +33,7 @@ resource "google_kms_key_ring" "my_key_ring" { resource "google_kms_crypto_key" "my_crypto_key" { name = "my-crypto-key" - key_ring = "${google_kms_key_ring.my_key_ring.self_link}" + key_ring = google_kms_key_ring.my_key_ring.self_link } ``` @@ -56,7 +56,7 @@ Finally, reference the encrypted ciphertext in your resource definitions: ```hcl data "google_kms_secret" "sql_user_password" { - crypto_key = "${google_kms_crypto_key.my_crypto_key.self_link}" + crypto_key = google_kms_crypto_key.my_crypto_key.self_link ciphertext = "CiQAqD+xX4SXOSziF4a8JYvq4spfAuWhhYSNul33H85HnVtNQW4SOgDu2UZ46dQCRFl5MF6ekabviN8xq+F+2035ZJ85B+xTYXqNf4mZs0RJitnWWuXlYQh6axnnJYu3kDU=" } @@ -74,9 +74,9 @@ resource "google_sql_database_instance" "master" { resource "google_sql_user" "users" { name = "me" - instance = "${google_sql_database_instance.master.name}" + instance = google_sql_database_instance.master.name host = "me.com" - password = "${data.google_kms_secret.sql_user_password.plaintext}" + password = data.google_kms_secret.sql_user_password.plaintext } ``` diff --git a/website/docs/d/google_kms_secret_ciphertext.html.markdown b/website/docs/d/google_kms_secret_ciphertext.html.markdown index a83e27f602c..40e85ab43bc 100644 --- a/website/docs/d/google_kms_secret_ciphertext.html.markdown +++ b/website/docs/d/google_kms_secret_ciphertext.html.markdown @@ -33,7 +33,7 @@ resource "google_kms_key_ring" "my_key_ring" { resource "google_kms_crypto_key" "my_crypto_key" { name = "my-crypto-key" - key_ring = "${google_kms_key_ring.my_key_ring.self_link}" + key_ring = google_kms_key_ring.my_key_ring.self_link } ``` @@ -41,8 +41,8 @@ Next, encrypt some sensitive information and use the encrypted data in your reso ```hcl data "google_kms_secret_ciphertext" "my_password" { - crypto_key = "${google_kms_crypto_key.my_crypto_key.self_link}" - plaintext = "my-secret-password" + crypto_key = google_kms_crypto_key.my_crypto_key.self_link + plaintext = "my-secret-password" } resource "google_compute_instance" "instance" { @@ -64,7 +64,7 @@ resource "google_compute_instance" "instance" { } metadata = { - password = "${data.google_kms_secret_ciphertext.my_password.ciphertext}" + password = data.google_kms_secret_ciphertext.my_password.ciphertext } } ``` diff --git a/website/docs/d/google_organization.html.markdown b/website/docs/d/google_organization.html.markdown index 4168109714e..d9479242388 100644 --- a/website/docs/d/google_organization.html.markdown +++ b/website/docs/d/google_organization.html.markdown @@ -18,7 +18,7 @@ data "google_organization" "org" { resource "google_folder" "sales" { display_name = "Sales" - parent = "${data.google_organization.org.name}" + parent = data.google_organization.org.name } ``` diff --git a/website/docs/d/google_project.html.markdown b/website/docs/d/google_project.html.markdown index 069939a89aa..4887fd87a45 100644 --- a/website/docs/d/google_project.html.markdown +++ b/website/docs/d/google_project.html.markdown @@ -16,10 +16,11 @@ For more information see ## Example Usage ```hcl -data "google_project" "project" {} +data "google_project" "project" { +} output "project_number" { - value = "${data.google_project.project.number}" + value = data.google_project.project.number } ``` diff --git a/website/docs/d/google_project_services.html.markdown b/website/docs/d/google_project_services.html.markdown deleted file mode 100644 index 6b0e1765c23..00000000000 --- a/website/docs/d/google_project_services.html.markdown +++ /dev/null @@ -1,40 +0,0 @@ ---- -subcategory: "Cloud Platform" -layout: "google" -page_title: "Google: google_project_services" -sidebar_current: "docs-google-datasource-project-services" -description: |- - Retrieve enabled of API services for a Google Cloud Platform project ---- - -# google\_project\_services - -Use this data source to get details on the enabled project services. - -For a list of services available, visit the -[API library page](https://console.cloud.google.com/apis/library) or run `gcloud services list`. - -## Example Usage - -```hcl -data "google_project_services" "project" { - project = "your-project-id" -} - -output "project_services" { - value = "${join(",", data.google_project_services.project.services)}" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `project` - (Required) The project ID. - - -## Attributes Reference - -The following attributes are exported: - -See [google_project_services](https://www.terraform.io/docs/providers/google/r/google_project_services.html) resource for details of the available attributes. diff --git a/website/docs/d/google_projects.html.markdown b/website/docs/d/google_projects.html.markdown index f71a4cf4c69..4fc3762d1b3 100644 --- a/website/docs/d/google_projects.html.markdown +++ b/website/docs/d/google_projects.html.markdown @@ -21,7 +21,7 @@ data "google_projects" "my-org-projects" { } data "google_project" "deletion-candidate" { - project_id = "${data.google_projects.my-org-projects.projects.0.project_id}" + project_id = data.google_projects.my-org-projects.projects[0].project_id } ``` diff --git a/website/docs/d/google_storage_project_service_account.html.markdown b/website/docs/d/google_storage_project_service_account.html.markdown index 8ab1e5a98af..017e712944b 100644 --- a/website/docs/d/google_storage_project_service_account.html.markdown +++ b/website/docs/d/google_storage_project_service_account.html.markdown @@ -20,13 +20,14 @@ For more information see ## Example Usage ```hcl -data "google_storage_project_service_account" "gcs_account" {} +data "google_storage_project_service_account" "gcs_account" { +} resource "google_pubsub_topic_iam_binding" "binding" { - topic = "${google_pubsub_topic.topic.name}" - role = "roles/pubsub.publisher" - - members = ["serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}"] + topic = google_pubsub_topic.topic.name + role = "roles/pubsub.publisher" + + members = ["serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}"] } ``` diff --git a/website/docs/d/google_storage_transfer_project_service_account.html.markdown b/website/docs/d/google_storage_transfer_project_service_account.html.markdown index 06daf3ae011..b3ad3bdaeb9 100644 --- a/website/docs/d/google_storage_transfer_project_service_account.html.markdown +++ b/website/docs/d/google_storage_transfer_project_service_account.html.markdown @@ -14,10 +14,11 @@ Use this data source to retrieve Storage Transfer service account for this proje ## Example Usage ```hcl -data "google_storage_transfer_project_service_account" "default" { } +data "google_storage_transfer_project_service_account" "default" { +} output "default_account" { - value = "${data.google_storage_transfer_project_service_account.default.email}" + value = data.google_storage_transfer_project_service_account.default.email } ``` diff --git a/website/docs/d/signed_url.html.markdown b/website/docs/d/signed_url.html.markdown index fee03ac2b55..6946cd25b1a 100644 --- a/website/docs/d/signed_url.html.markdown +++ b/website/docs/d/signed_url.html.markdown @@ -22,15 +22,15 @@ data "google_storage_object_signed_url" "artifact" { } resource "google_compute_instance" "vm" { - name = "vm" - - provisioner "remote-exec" { - inline = [ - "wget '${data.google_storage_object_signed_url.artifact.signed_url}' -O install_file.bin", - "chmod +x install_file.bin", - "./install_file.bin" - ] - } + name = "vm" + + provisioner "remote-exec" { + inline = [ + "wget '${data.google_storage_object_signed_url.artifact.signed_url}' -O install_file.bin", + "chmod +x install_file.bin", + "./install_file.bin", + ] + } } ``` @@ -43,7 +43,7 @@ data "google_storage_object_signed_url" "get_url" { content_md5 = "pRviqwS4c4OTJRTe03FD1w==" content_type = "text/plain" duration = "2d" - credentials = "${file("path/to/credentials.json")}" + credentials = file("path/to/credentials.json") extension_headers = { x-goog-if-generation-match = 1 diff --git a/website/docs/guides/provider_reference.html.markdown b/website/docs/guides/provider_reference.html.markdown index 8344f7c1fde..741b1333ecb 100644 --- a/website/docs/guides/provider_reference.html.markdown +++ b/website/docs/guides/provider_reference.html.markdown @@ -213,6 +213,7 @@ an access token using the service account key specified in `credentials`. * https://www.googleapis.com/auth/cloud-platform * https://www.googleapis.com/auth/ndev.clouddns.readwrite * https://www.googleapis.com/auth/devstorage.full_control + * https://www.googleapis.com/auth/userinfo.email --- @@ -318,8 +319,7 @@ as their versioned counterpart but that won't necessarily always be the case. **So far, batching is implemented for**: -* enabling project services using `google_project_service` or - `google_project_services` +* enabling project services using `google_project_service`. The `batching` block supports the following fields. diff --git a/website/docs/guides/version_3_upgrade.html.markdown b/website/docs/guides/version_3_upgrade.html.markdown index 9c5829e4c80..6811039e07a 100644 --- a/website/docs/guides/version_3_upgrade.html.markdown +++ b/website/docs/guides/version_3_upgrade.html.markdown @@ -50,50 +50,817 @@ so Terraform knows to manage them. ## Upgrade Topics + +- [Provider Version Configuration](#provider-version-configuration) +- [Provider](#provider) +- [ID Format Changes](#id-format-changes) +- [Data Source: `google_container_engine_versions`](#data-source-google_container_engine_versions) +- [Resource: `google_access_context_manager_access_level`](#resource-google_access_context_manager_access_level) +- [Resource: `google_access_context_manager_service_perimeter`](#resource-google_access_context_manager_service_perimeter) +- [Resource: `google_app_engine_application`](#resource-google_app_engine_application) +- [Resource: `google_app_engine_domain_mapping`](#resource-google_app_engine_domain_mapping) +- [Resource: `google_app_engine_standard_app_version`](#resource-google_app_engine_standard_app_version) +- [Resource: `google_bigquery_dataset`](#resource-google_bigquery_dataset) +- [Resource: `google_bigquery_table`](#resource-google_bigquery_table) +- [Resource: `google_bigtable_app_profile`](#resource-google_bigtable_app_profile) +- [Resource: `google_binary_authorization_policy`](#resource-google_binary_authorization_policy) +- [Resource: `google_cloudbuild_trigger`](#resource-google_cloudbuild_trigger) +- [Resource: `google_cloudfunctions_function`](#resource-google_cloudfunctions_function) +- [Resource: `google_cloudiot_registry`](#resource-google_cloudiot_registry) +- [Resource: `google_cloudscheduler_job`](#resource-google_cloudscheduler_job) +- [Resource: `google_composer_environment`](#resource-google_composer_environment) +- [Resource: `google_compute_backend_bucket`](#resource-google_compute_backend_bucket) +- [Resource: `google_compute_backend_service`](#resource-google_compute_backend_service) +- [Resource: `google_compute_firewall`](#resource-google_compute_firewall) +- [Resource: `google_compute_forwarding_rule`](#resource-google_compute_forwarding_rule) +- [Resource: `google_compute_global_forwarding_rule`](#resource-google_compute_global_forwarding_rule) +- [Resource: `google_compute_health_check`](#resource-google_compute_health_check) +- [Resource: `google_compute_image`](#resource-google_compute_image) +- [Resource: `google_compute_instance`](#resource-google_compute_instance) +- [Resource: `google_compute_instance_group_manager`](#resource-google_compute_instance_group_manager) +- [Resource: `google_compute_instance_template`](#resource-google_compute_instance_template) +- [Resource: `google_compute_network`](#resource-google_compute_network) +- [Resource: `google_compute_network_peering`](#resource-google_compute_network_peering) +- [Resource: `google_compute_node_template`](#resource-google_compute_node_template) +- [Resource: `google_compute_region_backend_service`](#resource-google_compute_region_backend_service) +- [Resource: `google_compute_region_health_check`](#resource-google_compute_region_health_check) +- [Resource: `google_compute_region_instance_group_manager`](#resource-google_compute_instance_group_manager) +- [Resource: `google_compute_resource_policy`](#resource-google_compute_resource_policy) +- [Resource: `google_compute_route`](#resource-google_compute_route) +- [Resource: `google_compute_router`](#resource-google_compute_router) +- [Resource: `google_compute_router_peer`](#resource-google_compute_router_peer) +- [Resource: `google_compute_snapshot`](#resource-google_compute_snapshot) +- [Resource: `google_compute_subnetwork`](#resource-google_compute_subnetwork) - [Resource: `google_container_cluster`](#resource-google_container_cluster) +- [Resource: `google_container_node_pool`](#resource-google_container_node_pool) +- [Resource: `google_dataproc_autoscaling_policy`](#resource-google_dataproc_autoscaling_policy) +- [Resource: `google_dataproc_cluster`](#resource-google_dataproc_cluster) +- [Resource: `google_dataproc_job`](#resource-google_dataproc_job) +- [Resource: `google_dns_managed_zone`](#resource-google_dns_managed_zone) +- [Resource: `google_dns_policy`](#resource-google_dns_policy) +- [Resource: `google_healthcare_hl7_v2_store`](#resource-google_healthcare_hl7_v2_store) +- [Resource: `google_logging_metric`](#resource-google_logging_metric) +- [Resource: `google_mlengine_model`](#resource-google_mlengine_model) +- [Resource: `google_monitoring_alert_policy`](#resource-google_monitoring_alert_policy) +- [Resource: `google_monitoring_uptime_check_config`](#resource-google_monitoring_uptime_check_config) +- [Resource: `google_organization_policy`](#resource-google_organization_policy) +- [Resource: `google_project_iam_audit_config`](#resource-google_project_iam_audit_config) - [Resource: `google_project_service`](#resource-google_project_service) - [Resource: `google_project_services`](#resource-google_project_services) - [Resource: `google_pubsub_subscription`](#resource-google_pubsub_subscription) -- [Resource: `google_cloudiot_registry`](#resource-google_cloudiot_registry) +- [Resource: `google_security_scanner_scan_config`](#resource-google_security_scanner_scan_config) +- [Resource: `google_service_account_key`](#resource-google_service_account_key) +- [Resource: `google_sql_database_instance`](#resource-google_sql_database_instance) +- [Resource: `google_storage_bucket`](#resource-google_storage_bucket) +- [Resource: `google_storage_transfer_job`](#resource-google_storage_transfer_job) +- [Resource: `google_tpu_node`](#resource-google_tpu_node) + + + +## Provider Version Configuration + +-> Before upgrading to version 3.0.0, it is recommended to upgrade to the most +recent `2.X` series release of the provider, make the changes noted in this guide, +and ensure that your environment successfully runs +[`terraform plan`](https://www.terraform.io/docs/commands/plan.html) +without unexpected changes or deprecation notices. + +It is recommended to use [version constraints](https://www.terraform.io/docs/configuration/providers.html#provider-versions) +when configuring Terraform providers. If you are following that recommendation, +update the version constraints in your Terraform configuration and run +[`terraform init`](https://www.terraform.io/docs/commands/init.html) to download +the new version. + +If you aren't using version constraints, you can use `terraform init -upgrade` +in order to upgrade your provider to the latest released version. + +For example, given this previous configuration: + +```hcl +provider "google" { + # ... other configuration ... + + version = "~> 2.17.0" +} +``` + +An updated configuration: + +```hcl +provider "google" { + # ... other configuration ... + + version = "~> 3.0.0" +} +``` + +## Provider + +### Terraform 0.11 no longer supported + +Support for Terraform 0.11 has been deprecated, and Terraform 0.12 or higher is +required to `terraform init` the provider. See [the blog post](https://www.hashicorp.com/blog/deprecating-terraform-0-11-support-in-terraform-providers/) +for more information. It is recommended that you upgrade to Terraform 0.12 before +upgrading to version 3.0.0 of the provider. + +### `userinfo.email` added to default scopes + +`userinfo.email` has been added to the default set of OAuth scopes in the +provider. This provides the Terraform user specified by `credentials`' (generally +a service account) email address to GCP APIs in addition to an obfuscated user +id; particularly, it makes the email of the Terraform user available for some +Kubernetes and IAP use cases. + +If this was previously defined explicitly, the definition can now be removed. + +#### Old Config + +```hcl +provider "google" { + scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/userinfo.email", + ] +} +``` + +#### New Config + +```hcl +provider "google" {} +``` + +## ID Format Changes + +ID formats on many resources have changed. ID formats have standardized on being similar to the `self_link` of +a resource. Users who depended on particular ID formats in previous versions may be impacted. + +## Data Source: `google_container_engine_versions` + +### `region` and `zone` are now removed + +Use `location` instead. + +## Resource: `google_access_context_manager_access_level` + +### `os_type` is now required on block `google_access_context_manager_access_level.basic.conditions.device_policy.os_constraints` + +In an attempt to avoid allowing empty blocks in config files, `os_type` is now +required on the `basic.conditions.device_policy.os_constraints` block. + +## Resource: `google_access_context_manager_service_perimeter` + +### At least one of `resources`, `access_levels`, or `restricted_services` is now required on `google_accesscontextmanager_service_perimeter.status` + +In an attempt to avoid allowing empty blocks in config files, at least one of `resources`, `access_levels`, +or `restricted_services` is now required on the `status` block. + +## Resource: `google_app_engine_application` + +### `split_health_checks` is now required on block `google_app_engine_application.feature_settings` + +In an attempt to avoid allowing empty blocks in config files, `split_health_checks` is now +required on the `feature_settings` block. + +## Resource: `google_app_engine_domain_mapping` + +### `ssl_management_type` is now required on `google_app_engine_domain_mapping.ssl_settings` + +In an attempt to avoid allowing empty blocks in config files, `ssl_management_type` is now +required on the `ssl_settings` block. + +## Resource: `google_app_engine_standard_app_version` + +### At least one of `zip` or `files` is now required on `google_app_engine_standard_app_version.deployment` + +In an attempt to avoid allowing empty blocks in config files, at least one of `zip` or `files` +is now required on the `deployment` block. + +### `shell` is now required on `google_app_engine_standard_app_version.entrypoint` + +In an attempt to avoid allowing empty blocks in config files, `shell` is now +required on the `entrypoint` block. + +### `script_path` is now required on `google_app_engine_standard_app_version.handlers.script` + +In an attempt to avoid allowing empty blocks in config files, `script_path` is now +required on the `handlers.script` block. + +### `source_url` is now required on `google_app_engine_standard_app_version.deployment.files` and `google_app_engine_standard_app_version.deployment.zip` + +In an attempt to avoid allowing empty blocks in config files, `shell` is now +required on the `deployment.files` and `deployment.zip` blocks. + +## Resource: `google_bigquery_dataset` + +### `role` is now required on `google_bigquery_dataset.access` + +In an attempt to avoid allowing empty blocks in config files, `role` is now +required on the `access` block. + +## Resource: `google_bigquery_table` + +### At least one of `range` or `skip_leading_rows` is now required on `external_data_configuration.google_sheets_options` + +In an attempt to avoid allowing empty blocks in config files, at least one +of `range` or `skip_leading_rows` is now required on the +`external_data_configuration.google_sheets_options` block. + +## Resource: `google_bigtable_app_profile` + +### Exactly one of `single_cluster_routing` or `multi_cluster_routing_use_any` is now required on `google_bigtable_app_profile` + +In attempt to be more consistent with the API, exactly one of `single_cluster_routing` or +`multi_cluster_routing_use_any` is now required on `google_bigtable_app_profile`. + +### `cluster_id` is now required on `google_bigtable_app_profile.single_cluster_routing` + +In an attempt to avoid allowing empty blocks in config files, `cluster_id` is now +required on the `single_cluster_routing` block. + +## Resource: `google_binary_authorization_policy` + +### `name_pattern` is now required on `google_binary_authorization_policy.admission_whitelist_patterns` + +In an attempt to avoid allowing empty blocks in config files, `name_pattern` is now +required on the `admission_whitelist_patterns` block. + +### `evaluation_mode` and `enforcement_mode` are now required on `google_binary_authorization_policy.cluster_admission_rules` + +In an attempt to avoid allowing empty blocks in config files, `evaluation_mode` and `enforcement_mode` are now +required on the `cluster_admission_rules` block. + +## Resource: `google_cloudbuild_trigger` + +### Exactly one of `filename` or `build` is now required on `google_cloudbuild_trigger` + +In attempt to be more consistent with the API, exactly one of `filename` or `build` is now +required on `google_cloudbuild_trigger`. + +### Exactly one of `branch_name`, `tag_name` or `commit_sha` is now required on `google_cloudbuild_trigger.trigger_template` + +In an attempt to avoid allowing empty blocks in config files, exactly one +of `branch_name`, `tag_name` or `commit_sha` is now required on the +`trigger_template` block. + +### Exactly one of `pull_request` or `push` is now required on `google_cloudbuild_trigger.github` + +In an attempt to avoid allowing empty blocks in config files, exactly one +of `pull_request` or `push` is now required on the `github` block. + +### Exactly one of `branch` or `tag_name` is now required on `google_cloudbuild_trigger.github.push` + +In an attempt to avoid allowing empty blocks in config files, exactly one +of `branch` or `tag_name` is now required on the `github.push` block. + +### `steps` is now required on `google_cloudbuild_trigger.build`. + +In an attempt to avoid allowing empty blocks in config files, `steps` is now +required on the `build` block. + +### `name` is now required on `google_cloudbuild_trigger.build.steps` + +In an attempt to avoid allowing empty blocks in config files, `name` is now +required on the `build.steps` block. + +### `name` and `path` are now required on `google_cloudbuild_trigger.build.steps.volumes` + +In an attempt to avoid allowing empty blocks in config files, `name` and `path` are now +required on the `build.volumes` block. + +## Resource: `google_cloudfunctions_function` + +### The `runtime` option `nodejs6` has been deprecated + +`nodejs6` has been deprecated and is no longer the default value for `runtime`. +`runtime` is now required. + +## Resource: `google_cloudiot_registry` + +### Replace singular event notification config field with plural `event_notification_configs` + +Use the plural field `event_notification_configs` instead of +`event_notification_config`, which has now been removed. +Since the Cloud IoT API now accept multiple event notification configs for a +registry, the singular field no longer exists on the API resource and has been +removed from Terraform to prevent conflicts. + + +#### Old Config + +```hcl +resource "google_cloudiot_registry" "myregistry" { + name = "%s" + + event_notification_config { + pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" + } +} + +``` + +#### New Config + +```hcl +resource "google_cloudiot_registry" "myregistry" { + name = "%s" + + event_notification_configs { + pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" + } +} +``` + +### `public_key_certificate` is now required on block `google_cloudiot_registry.credentials` + +In an attempt to avoid allowing empty blocks in config files, `public_key_certificate` is now +required on the `credentials` block. + +## Resource: `google_cloudscheduler_job` + +### Exactly one of `pubsub_target`, `http_target` or `app_engine_http_target` is required on `google_cloudscheduler_job` + +In attempt to be more consistent with the API, exactly one of `pubsub_target`, `http_target` +or `app_engine_http_target` is now required on `google_cloudscheduler_job`. + +### `service_account_email` is now required on `google_cloudscheduler_job.http_target.oauth_token` and `google_cloudscheduler_job.http_target.oidc_token`. + +In an attempt to avoid allowing empty blocks in config files, `service_account_email` is now +required on the `http_target.oauth_token` and `http_target.oidc_token` blocks. + +### At least one of `retry_count`, `max_retry_duration`, `min_backoff_duration`, `max_backoff_duration`, or `max_doublings` is now required on `google_cloud_scheduler_job.retry_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `retry_count`, +`max_retry_duration`, `min_backoff_duration`, `max_backoff_duration`, or `max_doublings` is +now required on the `retry_config` block. + +### At least one of `service`, `version`, or `instance` is now required on `google_cloud_scheduler_job.app_engine_http_target.app_engine_routing` + +In an attempt to avoid allowing empty blocks in config files, at least one of `service`, +`version`, or `instance` is now required on the `app_engine_http_target.app_engine_routing` block. + +## Resource: `google_composer_environment` + +### At least one of `airflow_config_overrides`, `pypi_packages`, `env_variables`, `image_version`, or `python_version` are now required on `google_composer_environment.config.software_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `airflow_config_overrides`, +`pypi_packages`, `env_variables`, `image_version`, or `python_version` is now required on the +`config.software_config` block. + +### `use_ip_aliases` is now required on block `google_composer_environment.ip_allocation_policy` + +Previously the default value of `use_ip_aliases` was `true`. In an attempt to avoid allowing empty blocks +in config files, `use_ip_aliases` is now required on the `ip_allocation_policy` block. + +### `enable_private_endpoint` is now required on block `google_composer_environment.private_environment_config` + +Previously the default value of `enable_private_endpoint` was `true`. In an attempt to avoid allowing empty blocks +in config files, `enable_private_endpoint` is now required on the `private_environment_config` block. + +## Resource: `google_compute_backend_bucket` + +### `signed_url_cache_max_age_sec` is now required on `google_compute_backend_bucket.autoscaling_policy.cdn_policy` + +Previously the default value of `signed_url_cache_max_age_sec` was `3600`. In an attempt to avoid allowing empty +blocks in config files, `signed_url_cache_max_age_sec` is now required on the +`autoscaling_policy.cdn_policy` block. + +## Resource: `google_compute_backend_service` + +### At least one of `connect_timeout`, `max_requests_per_connection`, `max_connections`, `max_pending_requests`, `max_requests`, or `max_retries` is now required on `google_compute_backend_service.circuit_breakers` + +In an attempt to avoid allowing empty blocks in config files, at least one of `connect_timeout`, +`max_requests_per_connection`, `max_connections`, `max_pending_requests`, `max_requests`, +or `max_retries` is now required on the `circuit_breakers` block. + +### At least one of `ttl`, `name`, or `path` is now required on `google_compute_backend_service.consistent_hash.http_cookie` + +In an attempt to avoid allowing empty blocks in config files, at least one of `ttl`, `name`, or `path` +is now required on the `consistent_hash.http_cookie` block. + +### At least one of `http_cookie`, `http_header_name`, or `minimum_ring_size` is now required on `google_compute_backend_service.consistent_hash` + +In an attempt to avoid allowing empty blocks in config files, at least one of `http_cookie`, +`http_header_name`, or `minimum_ring_size` is now required on the `consistent_hash` block. + +### At least one of `cache_key_policy` or `signed_url_cache_max_age_sec` is now required on `google_compute_backend_service.cdn_policy` + +In an attempt to avoid allowing empty blocks in config files, at least one of `cache_key_policy` or +`signed_url_cache_max_age_sec` is now required on the `cdn_policy` block. + +### At least one of `include_host`, `include_protocol`, `include_query_string`, `query_string_blacklist`, or `query_string_whitelist` is now required on `google_compute_backend_service.cdn_policy.cache_key_policy` + +In an attempt to avoid allowing empty blocks in config files, at least one of `include_host`, +`include_protocol`, `include_query_string`, `query_string_blacklist`, or `query_string_whitelist` +is now required on the `cdn_policy.cache_key_policy` block. + +### At least one of `base_ejection_time`, `consecutive_errors`, `consecutive_gateway_failure`, `enforcing_consecutive_errors`, `enforcing_consecutive_gateway_failure`, `enforcing_success_rate`, `interval`, `max_ejection_percent`, `success_rate_minimum_hosts`, `success_rate_request_volume`, or `success_rate_stdev_factor` is now required on `google_compute_backend_service.outlier_detection` + +In an attempt to avoid allowing empty blocks in config files, at least one of `base_ejection_time`, +`consecutive_errors`, `consecutive_gateway_failure`, `enforcing_consecutive_errors`, +`enforcing_consecutive_gateway_failure`, `enforcing_success_rate`, `interval`, `max_ejection_percent`, +`success_rate_minimum_hosts`, `success_rate_request_volume`, or `success_rate_stdev_factor` +is now required on the `outlier_detection` block. + +### At least one of `enable` or `sample_rate` is now required on `google_compute_backend_service.log_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `enable` or `sample_rate` +is now required on the `log_config` block. + +## Resource: `google_compute_firewall` + +### Exactly one of `allow` or `deny` is required on `google_compute_firewall` + +In attempt to be more consistent with the API, exactly one of `allowed` or `denied` +is now required on `google_compute_firewall`. + +## Resource: `google_compute_forwarding_rule` + +### `ip_version` is now removed + +`ip_version` is not used for regional forwarding rules. + +### `ip_address` is now strictly validated to enforce literal IP address format + +Previously documentation suggested Terraform could use the same range of valid +IP Address formats for `ip_address` as accepted by the API (e.g. named addresses +or URLs to GCP Address resources). However, the server returns only literal IP +addresses and thus caused diffs on re-apply (i.e. a permadiff). We amended +documenation to say Terraform only accepts literal IP addresses. + +This is now strictly validated. While this shouldn't have a large breaking +impact as users would have already run into permadiff issues on re-apply, +there might be validation errors for existing configs. The solution is be to +replace other address formats with the IP address, either manually or by +interpolating values from a `google_compute_address` resource. + +#### Old Config (that would have permadiff) + +```hcl +resource "google_compute_address" "my-addr" { + name = "my-addr" +} + +resource "google_compute_forwarding_rule" "frule" { + name = "my-forwarding-rule" + + address = google_compute_address.my-addr.self_link +} +``` + +#### New Config + +```hcl +resource "google_compute_address" "my-addr" { + name = "my-addr" +} + +resource "google_compute_forwarding_rule" "frule" { + name = "my-forwarding-rule" + + address = google_compute_address.my-addr.address +} +``` + +## Resource: `google_compute_global_forwarding_rule` + +### `ip_address` is now validated to enforce literal IP address format + +See [`google_compute_forwarding_rule`](#resource-google_compute_forwarding_rule). + +## Resource: `google_compute_health_check` + +### Exactly one of `http_health_check`, `https_health_check`, `http2_health_check`, `tcp_health_check` or `ssl_health_check` is required on `google_compute_health_check` + +In attempt to be more consistent with the API, exactly one of `http_health_check`, `https_health_check`, +`http2_health_check`, `tcp_health_check` or `ssl_health_check` is now required on +`google_compute_health_check`. + +### At least one of `host`, `request_path`, `response`, `port`, `port_name`, `proxy_header`, or `port_specification` is now required on `google_compute_health_check.http_health_check`, `google_compute_health_check.https_health_check` and `google_compute_health_check.http2_health_check` + +In an attempt to avoid allowing empty blocks in config files, at least one of `host`, `request_path`, `response`, +`port`, `port_name`, `proxy_header`, or `port_specification` is now required on the +`http_health_check`, `https_health_check` and `http2_health_check` blocks. + +### At least one of `request`, `response`, `port`, `port_name`, `proxy_header`, or `port_specification` is now required on `google_compute_health_check.ssl_health_check` and `google_compute_health_check.tcp_health_check` + +In an attempt to avoid allowing empty blocks in config files, at least one of `request`, `response`, `port`, `port_name`, +`proxy_header`, or `port_specification` is now required on the `ssl_health_check` and `tcp_health_check` blocks. + +## Resource: `google_compute_image` + +### `type` is now required on `google_compute_image.guest_os_features` + +In an attempt to avoid allowing empty blocks in config files, `type` is now required on the +`guest_os_features` block. + +## Resource: `google_compute_instance` + +### `interface` is now required on block `google_compute_instance.scratch_disk` + +Previously the default value of `interface` was `SCSI`. In an attempt to avoid allowing empty blocks +in config files, `interface` is now required on the `scratch_disk` block. + +### At least one of `auto_delete`, `device_name`, `disk_encryption_key_raw`, `kms_key_self_link`, `initialize_params`, `mode` or `source` is now required on `google_compute_instance.boot_disk` + +In an attempt to avoid allowing empty blocks in config files, at least one of `auto_delete`, `device_name`, +`disk_encryption_key_raw`, `kms_key_self_link`, `initialize_params`, `mode` or `source` is now required on the +`boot_disk` block. + +### At least one of `size`, `type`, `image`, or `labels` are now required on `google_compute_instance.boot_disk.initialize_params` + +In an attempt to avoid allowing empty blocks in config files, at least one of `size`, `type`, `image`, or `labels` +is now required on the `initialize_params` block. + +### At least one of `enable_secure_boot`, `enable_vtpm`, or `enable_integrity_monitoring` is now required on `google_compute_instance.shielded_instance_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `enable_secure_boot`, `enable_vtpm`, +or `enable_integrity_monitoring` is now required on the `shielded_instance_config` block. + +### At least one of `on_host_maintenance`, `automatic_restart`, `preemptible`, or `node_affinities` is now required on `google_compute_instance.scheduling` + +In an attempt to avoid allowing empty blocks in config files, at least one of `on_host_maintenance`, `automatic_restart`, +`preemptible`, or `node_affinities` is now required on the `scheduling` block. + +## Resource: `google_compute_instance_group_manager` + +The following changes apply to both `google_compute_instance_group_manager` and `google_compute_region_instance_group_manager`. + +### `instance_template` has been replaced by `version.instance_template` + +Instance group managers should be using `version` blocks to reference which +instance template to use for provisioning. To upgrade use a single `version` +block with `instance_template` in your config and by default all traffic will be +directed to that version. + +### Old Config + +```hcl +resource "google_compute_instance_group_manager" "my_igm" { + name = "my-igm" + zone = "us-central1-c" + base_instance_name = "igm" + + instance_template = "${google_compute_instance_template.my_tmpl.self_link}" +} +``` + +### New Config + +```hcl +resource "google_compute_instance_group_manager" "my_igm" { + name = "my-igm" + zone = "us-central1-c" + base_instance_name = "igm" + + version { + name = "prod" + instance_template = "${google_compute_instance_template.my_tmpl.self_link}" + } +} +``` + +### `update_strategy` has been replaced by `update_policy` + +To allow much greater control over the updates happening to instance groups +`update_strategy` has been replaced by `update_policy`. The previous +functionality to determine if instance should be replaced or restarted can be +achieved using `update_policy.minimal_action`. For more details see the +[official guide](https://cloud.google.com/compute/docs/instance-groups/rolling-out-updates-to-managed-instance-groups). + +## Resource: `google_compute_instance_template` + +### At least one of `enable_secure_boot`, `enable_vtpm`, or `enable_integrity_monitoring` is now required on `google_compute_instance_template.shielded_instance_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `enable_secure_boot`, `enable_vtpm`, or +`enable_integrity_monitoring` is now required on the `shielded_instance_config` block. + +### At least one of `on_host_maintenance`, `automatic_restart`, `preemptible`, or `node_affinities` is now required on `google_compute_instance_template.scheduling` + +In an attempt to avoid allowing empty blocks in config files, at least one of `on_host_maintenance`, `automatic_restart`, +`preemptible`, or `node_affinities` is now required on the `scheduling` block. + +### Disks with invalid scratch disk configurations are now rejected + +The instance template API allows specifying invalid configurations in some cases, +and an error is only returned when attempting to provision them. Terraform will +now report that some configs that previously appeared valid at plan time are +now invalid. + +A disk with `type` `"SCRATCH"` must have `disk_type` `"local-ssd"` and a size of 375GB. For example, +the following is valid: + +```hcl +disk { + auto_delete = true + type = "SCRATCH" + disk_type = "local-ssd" + disk_size_gb = 375 +} +``` + +These configs would have been accepted by Terraform previously, but will now +fail: + +```hcl +disk { + source_image = "https://www.googleapis.com/compute/v1/projects/gce-uefi-images/global/images/centos-7-v20190729" + auto_delete = true + type = "SCRATCH" +} +``` + +```hcl +disk { + source_image = "https://www.googleapis.com/compute/v1/projects/gce-uefi-images/global/images/centos-7-v20190729" + auto_delete = true + disk_type = "local-ssd" +} +``` + +```hcl +disk { + auto_delete = true + type = "SCRATCH" + disk_type = "local-ssd" + disk_size_gb = 300 +} +``` + +### `kms_key_self_link` is now required on block `google_compute_instance_template.disk_encryption_key` + +In an attempt to avoid allowing empty blocks in config files, `kms_key_self_link` is now +required on the `disk_encryption_key` block. + +## Resource: `google_compute_network` + +### `ipv4_range` is now removed + +Legacy Networks are removed and you will no longer be able to create them +using this field from Feb 1, 2020 onwards. + +## Resource: `google_compute_network_peering` + +### `auto_create_routes` is now removed + +`auto_create_routes` has been removed because it's redundant and not +user-configurable. + +## Resource: `google_compute_node_template` + +### At least one of `cpus` or `memory` is now required on `google_compute_node_template.node_type_flexibility` + +In an attempt to avoid allowing empty blocks in config files, at least one of `cpus` or `memory` +is now required on the `node_type_flexibility` block. + +## Resource: `google_compute_region_backend_service` + +### At least one of `connect_timeout`, `max_requests_per_connection`, `max_connections`, `max_pending_requests`, `max_requests`, or `max_retries` is now required on `google_compute_region_backend_service.circuit_breakers` + +In an attempt to avoid allowing empty blocks in config files, at least one of `connect_timeout`, +`max_requests_per_connection`, `max_connections`, `max_pending_requests`, `max_requests`, +or `max_retries` is now required on the `circuit_breakers` block. + +### At least one of `ttl`, `name`, or `path` is now required on `google_compute_region_backend_service.consistent_hash.http_cookie` + +In an attempt to avoid allowing empty blocks in config files, at least one of `ttl`, `name`, or `path` +is now required on the `consistent_hash.http_cookie` block. + +### At least one of `http_cookie`, `http_header_name`, or `minimum_ring_size` is now required on `google_compute_region_backend_service.consistent_hash` + +In an attempt to avoid allowing empty blocks in config files, at least one of `http_cookie`, +`http_header_name`, or `minimum_ring_size` is now required on the `consistent_hash` block. + +### At least one of `disable_connection_drain_on_failover`, `drop_traffic_if_unhealthy`, or `failover_ratio` is now required on `google_compute_region_backend_service.failover_policy` + +In an attempt to avoid allowing empty blocks in config files, at least one of `disable_connection_drain_on_failover`, +`drop_traffic_if_unhealthy`, or `failover_ratio` is now required on the `failover_policy` block. + +### At least one of `base_ejection_time`, `consecutive_errors`, `consecutive_gateway_failure`, `enforcing_consecutive_errors`, `enforcing_consecutive_gateway_failure`, `enforcing_success_rate`, `interval`, `max_ejection_percent`, `success_rate_minimum_hosts`, `success_rate_request_volume`, or `success_rate_stdev_factor` is now required on `google_compute_region_backend_service.outlier_detection` + +In an attempt to avoid allowing empty blocks in config files, at least one of `base_ejection_time`, +`consecutive_errors`, `consecutive_gateway_failure`, `enforcing_consecutive_errors`, +`enforcing_consecutive_gateway_failure`, `enforcing_success_rate`, `interval`, `max_ejection_percent`, +`success_rate_minimum_hosts`, `success_rate_request_volume`, or `success_rate_stdev_factor` +is now required on the `outlier_detection` block. + +### At least one of `enable` or `sample_rate` is now required on `google_compute_region_backend_service.log_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `enable` or `sample_rate` +is now required on the `log_config` block. + +## Resource: `google_compute_region_health_check` + +### Exactly one of `http_health_check`, `https_health_check`, `http2_health_check`, `tcp_health_check` or `ssl_health_check` is required on `google_compute_health_check` + +In attempt to be more consistent with the API, exactly one of `http_health_check`, `https_health_check`, +`http2_health_check`, `tcp_health_check` or `ssl_health_check` is now required on the +`google_compute_region_health_check`. + +### At least one of `host`, `request_path`, `response`, `port`, `port_name`, `proxy_header`, or `port_specification` is now required on `google_compute_region_health_check.http_health_check`, `google_compute_region_health_check.https_health_check` and `google_compute_region_health_check.http2_health_check` + +In an attempt to avoid allowing empty blocks in config files, at least one of `host`, `request_path`, `response`, +`port`, `port_name`, `proxy_header`, or `port_specification` is now required on the +`http_health_check`, `https_health_check` and `http2_health_check` blocks. - +### At least one of `request`, `response`, `port`, `port_name`, `proxy_header`, or `port_specification` is now required on `google_compute_region_health_check.ssl_health_check` and `google_compute_region_health_check.tcp_health_check` -## Provider Version Configuration +In an attempt to avoid allowing empty blocks in config files, at least one of `request`, `response`, `port`, `port_name`, +`proxy_header`, or `port_specification` is now required on the `ssl_health_check` and `tcp_health_check` blocks. --> Before upgrading to version 3.0.0, it is recommended to upgrade to the most -recent `2.X` series release of the provider and ensure that your environment -successfully runs [`terraform plan`](https://www.terraform.io/docs/commands/plan.html) -without unexpected changes or deprecation notices. +## Resource: `google_compute_resource_policy` -It is recommended to use [version constraints](https://www.terraform.io/docs/configuration/providers.html#provider-versions) -when configuring Terraform providers. If you are following that recommendation, -update the version constraints in your Terraform configuration and run -[`terraform init`](https://www.terraform.io/docs/commands/init.html) to download -the new version. +### Exactly one of `hourly_schedule`, `daily_schedule` or `weekly_schedule` is now required on `google_compute_resource_policy.snapshot_schedule_policy.schedule` -If you aren't using version constraints, you can use `terraform init -upgrade` -in order to upgrade your provider to the latest released version. +In an attempt to avoid allowing empty blocks in config files, exactly one +of `hourly_schedule`, `daily_schedule` or `weekly_schedule` is now required +on the `snapshot_schedule_policy.schedule` block. -For example, given this previous configuration: +### At least one of `labels`, `storage_locations`, or `guest_flush` is now required on `google_compute_resource_policy.snapshot_schedule_policy.snapshot_properties` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`labels`, `storage_locations`, or `guest_flush` is now required on the +`snapshot_schedule_policy.snapshot_properties` block. + +## Resource: `google_compute_route` + +### Exactly one of `next_hop_gateway`, `next_hop_instance`, `next_hop_ip`, `next_hop_vpn_tunnel` or `next_hop_ilb` is required on `google_compute_route` + +In attempt to be more consistent with the API, exactly one of `next_hop_gateway`, `next_hop_instance`, +`next_hop_ip`, `next_hop_vpn_tunnel` or `next_hop_ilb` is now required on the +`google_compute_route`. + +## Resource: `google_compute_router` + +### `range` is now required on `google_compute_router.bgp.advertised_ip_ranges` + +In an attempt to avoid allowing empty blocks in config files, `range` is now +required on the `bgp.advertised_ip_ranges` block. + +## Resource: `google_compute_router_peer` + +### `range` is now required on block `google_compute_router_peer.advertised_ip_ranges` + +In an attempt to avoid allowing empty blocks in config files, `range` is now +required on the `advertised_ip_ranges` block. + +## Resource: `google_compute_snapshot` + +### `raw_key` is now required on block `google_compute_snapshot.source_disk_encryption_key` + +In an attempt to avoid allowing empty blocks in config files, `raw_key` is now +required on the `source_disk_encryption_key` block. + +## Resource: `google_compute_subnetwork` + +### `enable_flow_logs` is now removed + +`enable_flow_logs` has been removed and should be replaced by the `log_config` block with configurations +for flow logging. Enablement of flow logs is now controlled by whether `log_config` is defined or not instead +of by the `enable_flow_logs` variable. Users with `enable_flow_logs = false` only need to remove the field. + +### At least one of `aggregation_interval`, `flow_sampling`, or `metadata` is now required on `google_compute_subnetwork.log_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`aggregation_interval`, `flow_sampling`, or `metadata` is now required on the +`log_config` block. + + +### Old Config ```hcl -provider "google" { - # ... other configuration ... +resource "google_compute_subnetwork" "subnet-with-logging" { + name = "log-test-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.self_link}" - version = "~> 2.17.0" + enable_flow_logs = true } ``` -An updated configuration: + +### New Config ```hcl -provider "google" { - # ... other configuration ... +resource "google_compute_subnetwork" "subnet-with-logging" { + name = "log-test-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.self_link}" - version = "~> 3.0.0" + log_config { + aggregation_interval = "INTERVAL_10_MIN" + flow_sampling = 0.5 + metadata = "INCLUDE_ALL_METADATA" + } } ``` + ## Resource: `google_container_cluster` ### `ip_allocation_policy` will catch out-of-band changes, `use_ip_aliases` removed @@ -103,7 +870,7 @@ removed" are related; see the other entry for more details. In `2.X`, `ip_allocation_policy` wouldn't cause a diff if it was undefined in config but was set on the cluster itself. Additionally, it could be defined with -`use_ip_aliases` set to `false`. However, this made it difficult to reason about +`use_ip_aliases` set to `false`. However, this made it difficult to reason about whether a cluster was routes-based or VPC-native. With `3.0.0`, Terraform will detect drift on the block. The configuration has also @@ -244,6 +1011,71 @@ resource "google_container_cluster" "primary" { } ``` +### `taint` field is now authoritative when set + +The `taint` field inside of `node_config` blocks on `google_container_cluster` +and `google_container_node_pool` will no longer ignore GPU-related values when +set. + +Previously, the field ignored upstream taints when unset and ignored unset GPU +taints when other taints were set. Now it will ignore upstream taints when set +and act authoritatively when set, requiring all taints (including Kubernetes and +GKE-managed ones) to be defined in config. + +Additionally, an empty taint can now be specified with `taint = []`. As a result +of this change, the JSON/state representation of the field has changed, +introducing an incompatibility for users who specify config in JSON instead of +HCL or who use `dynamic` blocks. See more details in the [Attributes as Blocks](https://www.terraform.io/docs/configuration/attr-as-blocks.html) +documentation. + +### `addons_config.kubernetes_dashboard` is now removed + +The `kubernetes_dashboard` addon is deprecated for clusters on GKE and +will soon be removed. It is recommended to use alternative GCP Console +dashboards. + +### `channel` is now required on `google_container_cluster.release_channel` + +In an attempt to avoid allowing empty blocks in config files, `channel` is now +required on the `release_channel` block. + +### `cidr_blocks` is now required on block `google_container_cluster.master_authorized_networks_config` + +In an attempt to avoid allowing empty blocks in config files, `cidr_blocks` is now +required on the `master_authorized_networks_config` block. + +### The `disabled` field is now required on the `addons_config` blocks for `http_load_balancing`, `horizontal_pod_autoscaling`, `istio_config`, `cloudrun_config` and `network_policy_config`. + +In an attempt to avoid allowing empty blocks in config files, `disabled` is now +required on the different `google_container_cluster.addons_config` blocks. + +### At least one of `http_load_balancing`, `horizontal_pod_autoscaling` , `network_policy_config`, `cloudrun_config`, or `istio_config` is now required on `google_container_cluster.addons_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `http_load_balancing`, +`horizontal_pod_autoscaling` , `network_policy_config`, `cloudrun_config`, or `istio_config` is now required on the +`addons_config` block. + + +### At least one of `username`, `password` or `client_certificate_config` is now required on `google_container_cluster.master_auth` + +In an attempt to avoid allowing empty blocks in config files, at least one of `username`, `password` +or `client_certificate_config` is now required on the `master_auth` block. + +### `enabled` is now required on block `google_container_cluster.vertical_pod_autoscaling` + +In an attempt to avoid allowing empty blocks in config files, `enabled` is now +required on the `vertical_pod_autoscaling` block. + +### `enabled` is now required on block `google_container_cluster.network_policy` + +Previously the default value of `enabled` was `false`. In an attempt to avoid allowing empty blocks +in config files, `enabled` is now required on the `network_policy` block. + +### `enable_private_endpoint` is now required on block `google_container_cluster.private_cluster_config` + +In an attempt to avoid allowing empty blocks in config files, `enable_private_endpoint` is now +required on the `private_cluster_config` block. + ### `logging_service` and `monitoring_service` defaults changed GKE Stackdriver Monitoring (the GKE-specific Stackdriver experience) is now @@ -274,22 +1106,265 @@ logging_service = "logging.googleapis.com/kubernetes" monitoring_service = "monitoring.googleapis.com/kubernetes" ``` -### `taint` field is now authoritative when set +### `use_ip_aliases` is now required on block `google_container_cluster.ip_allocation_policy` -The `taint` field inside of `node_config` blocks on `google_container_cluster` -and `google_container_node_pool` will no longer ignore GPU-related values when -set. +Previously the default value of `use_ip_aliases` was `true`. In an attempt to avoid allowing empty blocks +in config files, `use_ip_aliases` is now required on the `ip_allocation_policy` block. -Previously, the field ignored upstream taints when unset and ignored unset GPU -taints when other taints were set. Now it will ignore upstream taints when set -and act authoritatively when set, requiring all taints (including Kubernetes and -GKE-managed ones) to be defined in config. +### `zone`, `region` and `additional_zones` are now removed -Additionally, an empty taint can now be specified with `taint = []`. As a result -of this change, the JSON/state representation of the field has changed, -introducing an incompatibility for users who specify config in JSON instead of -HCL or who use `dynamic` blocks. See more details in the [Attributes as Blocks](https://www.terraform.io/docs/configuration/attr-as-blocks.html) -documentation. +`zone` and `region` have been removed in favor of `location` and +`additional_zones` has been removed in favor of `node_locations` + +## Resource: `google_container_node_pool` + +### `zone` and `region` are now removed + +`zone` and `region` have been removed in favor of `location` + +## Resource: `google_dataproc_autoscaling_policy` + +### At least one of `min_instances`, `max_instances`, or `weight` is now required on `google_dataproc_autoscaling_policy.secondary_worker_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `min_instances`, +`max_instances`, or `weight` is now required on the `secondary_worker_config` +block. + +## Resource: `google_dataproc_cluster` + +### At least one of `staging_bucket`, `gce_cluster_config`, `master_config`, `worker_config`, `preemptible_worker_config`, `software_config`, `initialization_action` or `encryption_config` is now required on `google_dataproc_cluster.cluster_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `staging_bucket`, +`gce_cluster_config`, `master_config`, `worker_config`, `preemptible_worker_config`, `software_config`, +`initialization_action` or `encryption_config` is now required on the +`cluster_config` block. + +### At least one of `image_version`, `override_properties` or `optional_components` is now required on `google_dataproc_cluster.cluster_config.software_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `image_version`, +`override_properties` or `optional_components` is now required on the +`cluster_config.software_config` block. + +### At least one of `num_instances` or `disk_config` is now required on `google_dataproc_cluster.cluster_config.preemptible_worker_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_instances` +or `disk_config` is now required on the `cluster_config.preemptible_worker_config` block. + +### At least one of `zone`, `network`, `subnetwork`, `tags`, `service_account`, `service_account_scopes`, `internal_ip_only` or `metadata` is now required on `google_dataproc_cluster.cluster_config.gce_cluster_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `zone`, `network`, `subnetwork`, +`tags`, `service_account`, `service_account_scopes`, `internal_ip_only` or `metadata` is now required on the +`gce_cluster_config` block. + +### At least one of `num_instances`, `image_uri`, `machine_type`, `min_cpu_platform`, `disk_config`, or `accelerators` is now required on `google_dataproc_cluster.cluster_config.master_config` and `google_dataproc_cluster.cluster_config.worker_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_instances`, `image_uri`, +`machine_type`, `min_cpu_platform`, `disk_config`, or `accelerators` is now required on the +`cluster_config.master_config` and `cluster_config.worker_config` blocks. + +### At least one of `num_local_ssds`, `boot_disk_size_gb` or `boot_disk_type` is now required on `google_dataproc_cluster.cluster_config.preemptible_worker_config.disk_config`, `google_dataproc_cluster.cluster_config.master_config.disk_config` and `google_dataproc_cluster.cluster_config.worker_config.disk_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_local_ssds`, `boot_disk_size_gb` +or `boot_disk_type` is now required on the `cluster_config.preemptible_worker_config.disk_config`, +`cluster_config.master_config.disk_config` and `cluster_config.worker_config.disk_config` blocks. + + +### `policy_uri` is now required on `google_dataproc_cluster.autoscaling_config` block. + +In an attempt to avoid allowing empty blocks in config files, `policy_uri` is now +required on the `autoscaling_config` block. + +## Resource: `google_dataproc_job` + +### At least one of `query_file_uri` or `query_list` is now required on `hive_config`, `pig_config`, and `sparksql_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`query_file_uri` or `query_list` is now required on the `hive_config`, `pig_config`, and +`sparksql_config` blocks. + +### At least one of `main_class` or `main_jar_file_uri` is now required on `google_dataproc_job.spark_config` and `google_dataproc_job.hadoop_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`main_class` or `main_jar_file_uri` is now required on the `spark_config` +and `hadoop_config` blocks. + +### `driver_log_levels` is now required on `logging_config` blocks for `pyspark_config`, `hadoop_config`, `spark_config`, `pig_config`, and `sparksql_config`. + +In an attempt to avoid allowing empty blocks in config files, `driver_log_levels` is now +required on `pyspark_config`, `hadoop_config`, `spark_config`, `pig_config`, and +`sparksql_config` blocks. + +### `max_failures_per_hour` is now required on block `google_dataproc_job.scheduling` + +In an attempt to avoid allowing empty blocks in config files, `max_failures_per_hour` is now +required on the `scheduling` block. + +## Resource: `google_dns_managed_zone` + +### At least one of `kind`, `non_existence`, `state`, or `default_key_specs` is now required on `google_dns_managed_zone.dnssec_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of +`kind`, `non_existence`, `state`, or `default_key_specs` is now required on the +`dnssec_config` block. + +### `target_network` is now required on block `google_dns_managed_zone.peering_config` + +In an attempt to avoid allowing empty blocks in config files, `target_network` is now +required on the `peering_config` block. + +### `network_url` is now required on block `google_dns_managed_zone.peering_config.target_network` + +In an attempt to avoid allowing empty blocks in config files, `network_url` is now +required on the `peering_config.target_network` block. + +### `target_name_servers` is now required on block `google_dns_managed_zone.forwarding_config` + +In an attempt to avoid allowing empty blocks in config files, `target_name_servers` is now +required on the `forwarding_config` block. + +### `ipv4_address` is now required on block `google_dns_managed_zone.forwarding_config.target_name_servers` + +In an attempt to avoid allowing empty blocks in config files, `ipv4_address` is now +required on the `forwarding_config.target_name_servers` block. + +### `target_name_servers` is now required on block `google_dns_managed_zone.forwarding_config` + +In an attempt to avoid allowing empty blocks in config files, `target_name_servers` is now +required on the `forwarding_config` block. + +### `networks` is now required on block `google_dns_managed_zone.private_visibility_config` + +In an attempt to avoid allowing empty blocks in config files, `networks` is now +required on the `private_visibility_config` block. + +### `network_url` is now required on block `google_dns_managed_zone.private_visibility_config.networks` + +In an attempt to avoid allowing empty blocks in config files, `network_url` is now +required on the `private_visibility_config.networks` block. + +## Resource: `google_dns_policy` + +### `network_url` is now required on block `google_dns_policy.networks` + +In an attempt to avoid allowing empty blocks in config files, `network_url` is now +required on the `networks` block. + +### `target_name_servers` is now required on block `google_dns_policy.alternative_name_server_config` + +In an attempt to avoid allowing empty blocks in config files, `target_name_servers` is now +required on the `alternative_name_server_config` block. + +### `ipv4_address` is now required on block `google_dns_policy.alternative_name_server_config.target_name_servers` + +In an attempt to avoid allowing empty blocks in config files, `ipv4_address` is now +required on the `alternative_name_server_config.target_name_servers` block. + +## Resource: `google_healthcare_hl7_v2_store` + +### At least one of `allow_null_header ` or `segment_terminator` is now required on `google_healthcare_hl7_v2_store.parser_config` + +In an attempt to avoid allowing empty blocks in config files, at least one of `allow_null_header ` +or `segment_terminator` is now required on the `parser_config` block. + +## Resource: `google_logging_metric` + +### At least one of `linear_buckets`, `exponential_buckets` or `explicit_buckets` is now required on `google_logging_metric.bucket_options` + +In an attempt to avoid allowing empty blocks in config files, at least one of `linear_buckets`, +`exponential_buckets` or `explicit_buckets` is now required on the `bucket_options` block. + +### At least one of `num_finite_buckets`, `width` or `offset` is now required on `google_logging_metric.bucket_options.linear_buckets` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_finite_buckets`, +`width` or `offset` is now required on the `bucket_options.linear_buckets` block. + +### At least one of `num_finite_buckets`, `growth_factor` or `scale` is now required on `google_logging_metric.bucket_options.exponential_buckets` + +In an attempt to avoid allowing empty blocks in config files, at least one of `num_finite_buckets`, +`growth_factor` or `scale` is now required on the `bucket_options.exponential_buckets` block. + +### `bounds` is now required on `google_logging_metric.bucket_options.explicit_buckets` + +In an attempt to avoid allowing empty blocks in config files, `bounds` is now required on the +`bucket_options.explicit_buckets` block. + +## Resource: `google_mlengine_model` + +### `name` is now required on `google_mlengine_model.default_version` + +In an attempt to avoid allowing empty blocks in config files, `name` is now required on the +`default_version` block. + +## Resource: `google_monitoring_alert_policy` + +### `labels` is now removed + +`labels` is removed as it was never used. See `user_labels` for the correct field. + +### At least one of `content` or `mime_type` is now required on `google_monitoring_alert_policy.documentation` + +In an attempt to avoid allowing empty blocks in config files, at least one of `content` or `mime_type` +is now required on the `documentation` block. + +## Resource: `google_monitoring_uptime_check_config` + +### Exactly one of `resource_group` or `monitored_resource` is now required on `google_monitoring_uptime_check_config` + +In attempt to be more consistent with the API, exactly one of `resource_group` or `monitored_resource` is now required +on `google_monitoring_uptime_check_config`. + +### Exactly one of `http_check` or `tcp_check` is now required on `google_monitoring_uptime_check_config` + +In attempt to be more consistent with the API, exactly one of `http_check` or `tcp_check` is now required +on `google_monitoring_uptime_check_config`. + +### At least one of `auth_info`, `port`, `headers`, `path`, `use_ssl`, or `mask_headers` is now required on `google_monitoring_uptime_check_config.http_check` + +In an attempt to avoid allowing empty blocks in config files, at least one of `auth_info`, +`port`, `headers`, `path`, `use_ssl`, or `mask_headers` is now required on the `http_check` block. + +### At least one of `resource_type` or `group_id` is now required on `google_monitoring_uptime_check_config.resource_group` + +In an attempt to avoid allowing empty blocks in config files, at least one of `resource_type` or `group_id` +is now required on the `resource_group` block. + +### `content` is now required on block `google_monitoring_uptime_check_config.content_matchers` + +In an attempt to avoid allowing empty blocks in config files, `content` is now +required on the `content_matchers` block. + +### `username` and `password` are now required on block `google_monitoring_uptime_check_config.http_check.auth_info` + +In an attempt to avoid allowing empty blocks in config files, `username` and `password` are now +required on the `http_check.auth_info` block. + +### `is_internal` and `internal_checker` are now removed + +`is_internal` and `internal_checker` never worked, and are now removed. + +## Resource: `google_organization_policy` + +### Exactly one of `list_policy`, `boolean_policy`, or `restore_policy` is now required on `google_organization_policy` + +In attempt to be more consistent with the API, exactly one of `list_policy`, `boolean_policy`, +or `restore_policy` is now required on `google_organization_policy`. + +### Exactly one of `all` or `values` is now required on `google_organization_policy.list_policy.allow` and `google_organization_policy.list_policy.deny` + +In an attempt to avoid allowing empty blocks in config files, exactly one of `all` or `values` is now +required on the `list_policy.allow` and `list_policy.deny` blocks. + +### `inherit_from_parent` is now required on block `google_organization_policy.list_policy` + +In an attempt to avoid allowing empty blocks in config files, `inherit_from_parent` is now +required on the `list_policy` block. + +## Resource: `google_project_iam_audit_config` + +### Audit configs are now authoritative on create + +Audit configs are now authoritative on create, rather than merging with existing configs on create. +Writing an audit config resource will now overwrite any existing audit configs on the given project. ## Resource: `google_project_service` @@ -325,8 +1400,9 @@ Users should migrate to using `google_project_service` resources, or using the module for a similar interface to `google_project_services`. -> Prior to `2.13.0`, each `google_project_service` sent separate API enablement -requests. From `2.13.0` onwards, those requests are batched. It's recommended -that you upgrade to `2.13.0+` before migrating if you encounter quota issues +requests. From `2.13.0` onwards, those requests are batched on write, and from `2.20.0` onwards, +batched on read. It's recommended that you upgrade to `2.13.0+` before migrating if you +encounter write quota issues or `2.20.0+` before migrating if you encounter read quota issues when you migrate off `google_project_services`. #### Old Config @@ -360,15 +1436,15 @@ module "project_services" { #### New Config (google_project_service) ```hcl -resource "google_project_service" "project_iam" { - project = "your-project-id" - service = "iam.googleapis.com" - disable_on_destroy = false -} +resource "google_project_service" "service" { + for_each = toset([ + "iam.googleapis.com", + "cloudresourcemanager.googleapis.com", + ]) + + service = each.key -resource "google_project_service" "project_cloudresourcemanager" { project = "your-project-id" - service = "cloudresourcemanager.googleapis.com" disable_on_destroy = false } ``` @@ -380,39 +1456,108 @@ resource "google_project_service" "project_cloudresourcemanager" { `name` previously could have been specified by a long name (e.g. `projects/my-project/subscriptions/my-subscription`) or a shortname (e.g. `my-subscription`). `name` now must be the shortname. +### `ttl` is now required on `google_pubsub_subscription.expiration_policy` -## Resource: `google_cloudiot_registry` +Previously, an empty `expiration_policy` block would allow the resource to never expire. In an attempt to avoid +allowing empty blocks in config files, `ttl` is now required on the `expiration_policy` block. `ttl` should be set +to `""` for the resource to never expire. -### Replace singular event notification config field with plural `event_notification_configs` +## Resource: `google_security_scanner_scan_config` -Use the plural field `event_notification_configs` instead of -`event_notification_config`, which has now been removed. -Since the Cloud IoT API now accept multiple event notification configs for a -registry, the singular field no longer exists on the API resource and has been -removed from Terraform to prevent conflicts. +### At least one of `google_account` or `custom_account` is now required on `google_security_scanner_scan_config.authentication` +In an attempt to avoid allowing empty blocks in config files, at least one of `google_account` or +`custom_account` is now required on the `authentication` block. -#### Old Config +## Resource: `google_service_account_key` -```hcl -resource "google_cloudiot_registry" "myregistry" { - name = "%s" +### `pgp_key`, `private_key_fingerprint`, and `private_key_encrypted` are now removed - event_notification_config { - pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" - } -} +`google_service_account_key` previously supported encrypting the private key with +a supplied PGP key. This is [no longer supported](https://www.terraform.io/docs/extend/best-practices/sensitive-state.html#don-39-t-encrypt-state) +and has been removed as functionality. State should instead be treated as sensitive, +and ideally encrypted using a remote state backend. -``` +This will require re-provisioning your service account key, unfortunately. There +is no known alternative at this time. -#### New Config +## Resource: `google_sql_database_instance` -```hcl -resource "google_cloudiot_registry" "myregistry" { - name = "%s" +### At least one of `ca_certificate`, `client_certificate`, `client_key`, `connect_retry_interval`, `dump_file_path`, `failover_target`, `master_heartbeat_period`, `password`, `ssl_cipher`, `username`, or `verify_server_certificate` is now required on `google_sql_database_instance.settings.replica_configuration` - event_notification_configs { - pubsub_topic_name = "${google_pubsub_topic.event-topic.id}" - } -} -``` \ No newline at end of file +In an attempt to avoid allowing empty blocks in config files, at least one of `ca_certificate`, `client_certificate`, `client_key`, `connect_retry_interval`, +`dump_file_path`, `failover_target`, `master_heartbeat_period`, `password`, `ssl_cipher`, `username`, or `verify_server_certificate` is now required on the +`settings.replica_configuration` block. + +### At least one of `cert`, `common_name`, `create_time`, `expiration_time`, or `sha1_fingerprint` is now required on `google_sql_database_instance.settings.server_ca_cert` + +In an attempt to avoid allowing empty blocks in config files, at least one of `cert`, `common_name`, `create_time`, `expiration_time`, or `sha1_fingerprint` is now required on the `settings.server_ca_cert` block. + +### At least one of `day`, `hour`, or `update_track` is now required on `google_sql_database_instance.settings.maintenance_window` + +In an attempt to avoid allowing empty blocks in config files, at least one of `day`, `hour`, +or `update_track` is now required on the `settings.maintenance_window` block. + +### At least one of `binary_log_enabled`, `enabled`, `start_time`, or `location` is now required on `google_sql_database_instance.settings.backup_configuration` + +In an attempt to avoid allowing empty blocks in config files, at least one of `binary_log_enabled`, `enabled`, `start_time`, or `location` is now required on the +`settings.backup_configuration` block. + +### At least one of `authorized_networks`, `ipv4_enabled`, `require_ssl`, or `private_network` is now required on `google_sql_database_instance.settings.ip_configuration` + +In an attempt to avoid allowing empty blocks in config files, at least one of `authorized_networks`, `ipv4_enabled`, +`require_ssl`, and `private_network` is now required on the `settings.ip_configuration` block. + +### `name` and `value` are now required on block `google_sql_database_instance.settings.database_flags` + +In an attempt to avoid allowing empty blocks in config files, `name` and `value` are now required on the `settings.database_flags` block. + +### `value` is now required on block `google_sql_database_instance.settings.ip_configuration.authorized_networks` + +In an attempt to avoid allowing empty blocks in config files, `value` is now required on the `settings.ip_configuration.authorized_networks` block. + +### `zone` is now required on block `google_sql_database_instance.settings.location_preference` + +In an attempt to avoid allowing empty blocks in config files, `zone` is now +required on the `settings.location_preference` block. + +## Resource: `google_storage_bucket` + +### `enabled` is now required on block `google_storage_bucket.versioning` + +Previously the default value of `enabled` was `false`. In an attempt to avoid allowing empty blocks +in config files, `enabled` is now required on the `versioning` block. + +### At least one of `main_page_suffix` or `not_found_page` is now required on `google_storage_bucket.website` + +In an attempt to avoid allowing empty blocks in config files, at least one of `main_page_suffix` or +`not_found_page` is now required on the `website` block. + +### At least one of `min_time_elapsed_since_last_modification`, `max_time_elapsed_since_last_modification`, `include_prefixes`, or `exclude_prefixes` is now required on `google_storage_transfer_job.transfer_spec.object_conditions` + +In an attempt to avoid allowing empty blocks in config files, at least one of `min_time_elapsed_since_last_modification`, +`max_time_elapsed_since_last_modification`, `include_prefixes`, or `exclude_prefixes` is now required on the `transfer_spec.object_conditions` block. + +### `is_live` is now removed + +Please use `with_state` instead, as `is_live` is now removed. + +## Resource: `google_storage_transfer_job` + +### At least one of `overwrite_objects_already_existing_in_sink`, `delete_objects_unique_in_sink`, or `delete_objects_from_source_after_transfer` is now required on `google_storage_transfer_job.transfer_spec.transfer_options` + +In an attempt to avoid allowing empty blocks in config files, at least one of `overwrite_objects_already_existing_in_sink`, +`delete_objects_unique_in_sink`, or `delete_objects_from_source_after_transfer` is now required on the +`transfer_spec.transfer_options` block. + +### At least one of `gcs_data_source`, `aws_s3_data_source`, or `http_data_source` is now required on `google_storage_transfer_job.transfer_spec` + +In an attempt to avoid allowing empty blocks in config files, at least one of `gcs_data_source`, `aws_s3_data_source`, +or `http_data_source` is now required on the `transfer_spec` block. + +## Resource: `google_tpu_node` + +### `preemptible` is now required on block `google_tpu_node.scheduling_config` + +In an attempt to avoid allowing empty blocks in config files, `preemptible` is now +required on the `scheduling_config` block. diff --git a/website/docs/r/access_context_manager_access_level.html.markdown b/website/docs/r/access_context_manager_access_level.html.markdown index 8709975e92b..8c514ab446f 100644 --- a/website/docs/r/access_context_manager_access_level.html.markdown +++ b/website/docs/r/access_context_manager_access_level.html.markdown @@ -38,9 +38,9 @@ To get more information about AccessLevel, see: ```hcl resource "google_access_context_manager_access_level" "access-level" { - parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" - name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/chromeos_no_lock" - title = "chromeos_no_lock" + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/chromeos_no_lock" + title = "chromeos_no_lock" basic { conditions { device_policy { @@ -184,7 +184,7 @@ The `os_constraints` block supports: Format: "major.minor.patch" such as "10.5.301", "9.2.1". * `os_type` - - (Optional) + (Required) The operating system type of the device. diff --git a/website/docs/r/access_context_manager_service_perimeter.html.markdown b/website/docs/r/access_context_manager_service_perimeter.html.markdown index 388c84dfa01..9388c0a2204 100644 --- a/website/docs/r/access_context_manager_service_perimeter.html.markdown +++ b/website/docs/r/access_context_manager_service_perimeter.html.markdown @@ -46,18 +46,18 @@ To get more information about ServicePerimeter, see: ```hcl resource "google_access_context_manager_service_perimeter" "service-perimeter" { - parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" - name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/restrict_all" - title = "restrict_all" + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/restrict_all" + title = "restrict_all" status { restricted_services = ["storage.googleapis.com"] } } resource "google_access_context_manager_access_level" "access-level" { - parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" - name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/chromeos_no_lock" - title = "chromeos_no_lock" + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/chromeos_no_lock" + title = "chromeos_no_lock" basic { conditions { device_policy { diff --git a/website/docs/r/app_engine_application.html.markdown b/website/docs/r/app_engine_application.html.markdown index 99a4773d6da..d998e9df542 100755 --- a/website/docs/r/app_engine_application.html.markdown +++ b/website/docs/r/app_engine_application.html.markdown @@ -26,7 +26,7 @@ resource "google_project" "my_project" { } resource "google_app_engine_application" "app" { - project = "${google_project.my_project.project_id}" + project = google_project.my_project.project_id location_id = "us-central" } ``` diff --git a/website/docs/r/app_engine_application_url_dispatch_rules.html.markdown b/website/docs/r/app_engine_application_url_dispatch_rules.html.markdown index e694440c4e1..0ad0a0cfab1 100644 --- a/website/docs/r/app_engine_application_url_dispatch_rules.html.markdown +++ b/website/docs/r/app_engine_application_url_dispatch_rules.html.markdown @@ -40,22 +40,22 @@ To get more information about ApplicationUrlDispatchRules, see: ```hcl resource "google_app_engine_application_url_dispatch_rules" "web_service" { dispatch_rules { - domain = "*" - path = "/*" + domain = "*" + path = "/*" service = "default" } dispatch_rules { - domain = "*" - path = "/admin/*" - service = "${google_app_engine_standard_app_version.admin_v3.service}" + domain = "*" + path = "/admin/*" + service = google_app_engine_standard_app_version.admin_v3.service } } resource "google_app_engine_standard_app_version" "admin_v3" { version_id = "v3" - service = "admin" - runtime = "nodejs10" + service = "admin" + runtime = "nodejs10" entrypoint { shell = "node ./app.js" @@ -75,13 +75,13 @@ resource "google_app_engine_standard_app_version" "admin_v3" { } resource "google_storage_bucket" "bucket" { - name = "appengine-test-bucket" + name = "appengine-test-bucket" } resource "google_storage_bucket_object" "object" { - name = "hello-world.zip" - bucket = "${google_storage_bucket.bucket.name}" - source = "./test-fixtures/appengine/hello-world.zip" + name = "hello-world.zip" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/appengine/hello-world.zip" } ``` diff --git a/website/docs/r/app_engine_domain_mapping.html.markdown b/website/docs/r/app_engine_domain_mapping.html.markdown index d02278e0e3f..b5cb34c16ac 100644 --- a/website/docs/r/app_engine_domain_mapping.html.markdown +++ b/website/docs/r/app_engine_domain_mapping.html.markdown @@ -42,7 +42,7 @@ To get more information about DomainMapping, see: ```hcl resource "google_app_engine_domain_mapping" "domain_mapping" { domain_name = "dm-test-.gcp.tfacc.hashicorptest.com" - + ssl_settings { ssl_management_type = "AUTOMATIC" } @@ -87,7 +87,7 @@ The `ssl_settings` block supports: Example: 12345. * `ssl_management_type` - - (Optional) + (Required) SSL management type for this domain. If `AUTOMATIC`, a managed certificate is automatically provisioned. If `MANUAL`, `certificateId` must be manually specified in order to configure SSL for this domain. @@ -139,6 +139,8 @@ This resource provides the following DomainMapping can be imported using any of these accepted formats: ``` +$ terraform import google_app_engine_domain_mapping.default apps/{{project}}/domainMappings/{{domain_name}} +$ terraform import google_app_engine_domain_mapping.default {{project}}/{{domain_name}} $ terraform import google_app_engine_domain_mapping.default {{domain_name}} ``` diff --git a/website/docs/r/app_engine_firewall_rule.html.markdown b/website/docs/r/app_engine_firewall_rule.html.markdown index a086ce3a942..36ed7bb347b 100644 --- a/website/docs/r/app_engine_firewall_rule.html.markdown +++ b/website/docs/r/app_engine_firewall_rule.html.markdown @@ -49,14 +49,14 @@ resource "google_project" "my_project" { } resource "google_app_engine_application" "app" { - project = "${google_project.my_project.project_id}" + project = google_project.my_project.project_id location_id = "us-central" } resource "google_app_engine_firewall_rule" "rule" { - project = "${google_app_engine_application.app.project}" - priority = 1000 - action = "ALLOW" + project = google_app_engine_application.app.project + priority = 1000 + action = "ALLOW" source_range = "*" } ``` @@ -109,6 +109,7 @@ This resource provides the following FirewallRule can be imported using any of these accepted formats: ``` +$ terraform import google_app_engine_firewall_rule.default apps/{{project}}/firewall/ingressRules/{{priority}} $ terraform import google_app_engine_firewall_rule.default {{project}}/{{priority}} $ terraform import google_app_engine_firewall_rule.default {{priority}} ``` diff --git a/website/docs/r/app_engine_standard_app_version.html.markdown b/website/docs/r/app_engine_standard_app_version.html.markdown index 65044afc124..305f4ec11cc 100644 --- a/website/docs/r/app_engine_standard_app_version.html.markdown +++ b/website/docs/r/app_engine_standard_app_version.html.markdown @@ -87,13 +87,13 @@ resource "google_app_engine_standard_app_version" "myapp_v2" { } resource "google_storage_bucket" "bucket" { - name = "appengine-static-content" + name = "appengine-static-content" } resource "google_storage_bucket_object" "object" { - name = "hello-world.zip" - bucket = "${google_storage_bucket.bucket.name}" - source = "./test-fixtures/appengine/hello-world.zip" + name = "hello-world.zip" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/appengine/hello-world.zip" } ``` @@ -196,7 +196,7 @@ The `handlers` block supports: The `script` block supports: * `script_path` - - (Optional) + (Required) Path to the script from the application root directory. The `static_files` block supports: @@ -257,7 +257,7 @@ The `deployment` block supports: The `zip` block supports: * `source_url` - - (Optional) + (Required) Source URL * `files_count` - @@ -273,13 +273,13 @@ The `files` block supports: SHA1 checksum of the file * `source_url` - - (Optional) + (Required) Source URL The `entrypoint` block supports: * `shell` - - (Optional) + (Required) The format should be a shell command that can be fed to bash -c. ## Attributes Reference diff --git a/website/docs/r/bigquery_data_transfer_config.html.markdown b/website/docs/r/bigquery_data_transfer_config.html.markdown index 7cd5ec79455..e754095ffbd 100644 --- a/website/docs/r/bigquery_data_transfer_config.html.markdown +++ b/website/docs/r/bigquery_data_transfer_config.html.markdown @@ -36,37 +36,36 @@ To get more information about Config, see: ```hcl -data "google_project" "project" {} +data "google_project" "project" { +} resource "google_project_iam_member" "permissions" { - role = "roles/iam.serviceAccountShortTermTokenMinter" + role = "roles/iam.serviceAccountShortTermTokenMinter" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com" } resource "google_bigquery_data_transfer_config" "query_config" { - depends_on = [google_project_iam_member.permissions] - display_name = "my-query" - location = "asia-northeast1" - data_source_id = "scheduled_query" - schedule = "first sunday of quarter 00:00" - destination_dataset_id = "${google_bigquery_dataset.my_dataset.dataset_id}" + display_name = "my-query" + location = "asia-northeast1" + data_source_id = "scheduled_query" + schedule = "first sunday of quarter 00:00" + destination_dataset_id = google_bigquery_dataset.my_dataset.dataset_id params = { destination_table_name_template = "my-table" - write_disposition = "WRITE_APPEND" - query = "SELECT name FROM tabl WHERE x = 'y'" + write_disposition = "WRITE_APPEND" + query = "SELECT name FROM tabl WHERE x = 'y'" } } resource "google_bigquery_dataset" "my_dataset" { - depends_on = [google_project_iam_member.permissions] - dataset_id = "my_dataset" + dataset_id = "my_dataset" friendly_name = "foo" - description = "bar" - location = "asia-northeast1" + description = "bar" + location = "asia-northeast1" } ``` diff --git a/website/docs/r/bigquery_dataset.html.markdown b/website/docs/r/bigquery_dataset.html.markdown index 5af1c66452b..df4af580bdd 100644 --- a/website/docs/r/bigquery_dataset.html.markdown +++ b/website/docs/r/bigquery_dataset.html.markdown @@ -68,13 +68,13 @@ resource "google_bigquery_dataset" "dataset" { default_table_expiration_ms = 3600000 default_encryption_configuration { - kms_key_name = "${google_kms_crypto_key.crypto_key.self_link}" + kms_key_name = google_kms_crypto_key.crypto_key.self_link } } resource "google_kms_crypto_key" "crypto_key" { name = "example-key" - key_ring = "${google_kms_key_ring.key_ring.self_link}" + key_ring = google_kms_key_ring.key_ring.self_link } resource "google_kms_key_ring" "key_ring" { @@ -191,7 +191,7 @@ The `access` block supports: An email address of a Google Group to grant access to. * `role` - - (Optional) + (Required) Describes the rights granted to the user specified by the other member of the access object. Primitive, Predefined and custom roles are supported. Predefined roles that have equivalent @@ -283,8 +283,8 @@ This resource provides the following Dataset can be imported using any of these accepted formats: ``` +$ terraform import google_bigquery_dataset.default projects/{{project}}/datasets/{{dataset_id}} $ terraform import google_bigquery_dataset.default {{project}}/{{dataset_id}} -$ terraform import google_bigquery_dataset.default {{project}}:{{dataset_id}} $ terraform import google_bigquery_dataset.default {{dataset_id}} ``` diff --git a/website/docs/r/bigquery_table.html.markdown b/website/docs/r/bigquery_table.html.markdown index abfe5f9c6a9..be06f7fa42f 100644 --- a/website/docs/r/bigquery_table.html.markdown +++ b/website/docs/r/bigquery_table.html.markdown @@ -30,7 +30,7 @@ resource "google_bigquery_dataset" "default" { } resource "google_bigquery_table" "default" { - dataset_id = "${google_bigquery_dataset.default.dataset_id}" + dataset_id = google_bigquery_dataset.default.dataset_id table_id = "bar" time_partitioning { @@ -57,10 +57,11 @@ resource "google_bigquery_table" "default" { } ] EOF + } resource "google_bigquery_table" "sheet" { - dataset_id = "${google_bigquery_dataset.default.dataset_id}" + dataset_id = google_bigquery_dataset.default.dataset_id table_id = "sheet" external_data_configuration { @@ -243,5 +244,5 @@ exported: BigQuery tables can be imported using the `project`, `dataset_id`, and `table_id`, e.g. ``` -$ terraform import google_bigquery_table.default gcp-project:foo.bar +$ terraform import google_bigquery_table.default gcp-project/foo/bar ``` diff --git a/website/docs/r/bigtable_app_profile.html.markdown b/website/docs/r/bigtable_app_profile.html.markdown index 7005b4d01c0..8610c546a0a 100644 --- a/website/docs/r/bigtable_app_profile.html.markdown +++ b/website/docs/r/bigtable_app_profile.html.markdown @@ -36,21 +36,21 @@ App profile is a configuration object describing how Cloud Bigtable should treat ```hcl resource "google_bigtable_instance" "instance" { - name = "tf-test-instance-" - cluster { - cluster_id = "tf-test-instance-" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" - } + name = "tf-test-instance-" + cluster { + cluster_id = "tf-test-instance-" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_app_profile" "ap" { - instance = google_bigtable_instance.instance.name - app_profile_id = "tf-test-profile-" + instance = google_bigtable_instance.instance.name + app_profile_id = "tf-test-profile-" - multi_cluster_routing_use_any = true - ignore_warnings = true + multi_cluster_routing_use_any = true + ignore_warnings = true } ```
@@ -63,25 +63,25 @@ resource "google_bigtable_app_profile" "ap" { ```hcl resource "google_bigtable_instance" "instance" { - name = "tf-test-instance-" - cluster { - cluster_id = "tf-test-instance-" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" - } + name = "tf-test-instance-" + cluster { + cluster_id = "tf-test-instance-" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_app_profile" "ap" { - instance = google_bigtable_instance.instance.name - app_profile_id = "tf-test-profile-" + instance = google_bigtable_instance.instance.name + app_profile_id = "tf-test-profile-" - single_cluster_routing { - cluster_id = "tf-test-instance-" - allow_transactional_writes = true - } + single_cluster_routing { + cluster_id = "tf-test-instance-" + allow_transactional_writes = true + } - ignore_warnings = true + ignore_warnings = true } ``` @@ -127,7 +127,7 @@ The following arguments are supported: The `single_cluster_routing` block supports: * `cluster_id` - - (Optional) + (Required) The cluster to which read/write requests should be routed. * `allow_transactional_writes` - diff --git a/website/docs/r/bigtable_gc_policy.html.markdown b/website/docs/r/bigtable_gc_policy.html.markdown index 0e4f4edf92d..e19beed025f 100644 --- a/website/docs/r/bigtable_gc_policy.html.markdown +++ b/website/docs/r/bigtable_gc_policy.html.markdown @@ -18,27 +18,29 @@ Creates a Google Cloud Bigtable GC Policy inside a family. For more information ```hcl resource "google_bigtable_instance" "instance" { - name = "tf-instance" - cluster_id = "tf-instance-cluster" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" + name = "tf-instance" + cluster { + cluster_id = "tf-instance-cluster" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_table" "table" { name = "tf-table" - instance_name = "${google_bigtable_instance.instance.name}" - + instance_name = google_bigtable_instance.instance.name + column_family { family = "name" } } resource "google_bigtable_gc_policy" "policy" { - instance_name = "${google_bigtable_instance.instance.name}" - table = "${google_bigtable_table.table.name}" + instance_name = google_bigtable_instance.instance.name + table = google_bigtable_table.table.name column_family = "name" - + max_age { days = 7 } @@ -46,18 +48,18 @@ resource "google_bigtable_gc_policy" "policy" { ``` Multiple conditions is also supported. `UNION` when any of its sub-policies apply (OR). `INTERSECTION` when all its sub-policies apply (AND) -``` +```hcl resource "google_bigtable_gc_policy" "policy" { - instance_name = "${google_bigtable_instance.instance.name}" - table = "${google_bigtable_table.table.name}" + instance_name = google_bigtable_instance.instance.name + table = google_bigtable_table.table.name column_family = "name" - + mode = "UNION" - + max_age { days = 7 } - + max_version { number = 10 } diff --git a/website/docs/r/bigtable_instance.html.markdown b/website/docs/r/bigtable_instance.html.markdown index e54ab9d79e5..083db724b39 100644 --- a/website/docs/r/bigtable_instance.html.markdown +++ b/website/docs/r/bigtable_instance.html.markdown @@ -18,7 +18,7 @@ Creates a Google Bigtable instance. For more information see ```hcl resource "google_bigtable_instance" "production-instance" { - name = "tf-instance" + name = "tf-instance" cluster { cluster_id = "tf-instance-cluster" diff --git a/website/docs/r/bigtable_instance_iam.html.markdown b/website/docs/r/bigtable_instance_iam.html.markdown index c05293c5283..1fc149aff35 100644 --- a/website/docs/r/bigtable_instance_iam.html.markdown +++ b/website/docs/r/bigtable_instance_iam.html.markdown @@ -24,7 +24,7 @@ Three different resources help you manage IAM policies on bigtable instances. Ea ```hcl data "google_iam_policy" "admin" { binding { - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] @@ -32,9 +32,9 @@ data "google_iam_policy" "admin" { } resource "google_bigtable_instance_iam_policy" "editor" { - project = "your-project" - instance = "your-bigtable-instance" - policy_data = "${data.google_iam_policy.admin.policy_data}" + project = "your-project" + instance = "your-bigtable-instance" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -42,9 +42,9 @@ resource "google_bigtable_instance_iam_policy" "editor" { ```hcl resource "google_bigtable_instance_iam_binding" "editor" { - instance = "your-bigtable-instance" - role = "roles/editor" - members = [ + instance = "your-bigtable-instance" + role = "roles/editor" + members = [ "user:jane@example.com", ] } @@ -54,9 +54,9 @@ resource "google_bigtable_instance_iam_binding" "editor" { ```hcl resource "google_bigtable_instance_iam_member" "editor" { - instance = "your-bigtable-instance" - role = "roles/editor" - member = "user:jane@example.com" + instance = "your-bigtable-instance" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/bigtable_table.html.markdown b/website/docs/r/bigtable_table.html.markdown index b69fcd5bc55..8c035c6d35f 100644 --- a/website/docs/r/bigtable_table.html.markdown +++ b/website/docs/r/bigtable_table.html.markdown @@ -18,16 +18,19 @@ Creates a Google Cloud Bigtable table inside an instance. For more information s ```hcl resource "google_bigtable_instance" "instance" { - name = "tf-instance" - cluster_id = "tf-instance-cluster" - zone = "us-central1-b" - num_nodes = 3 - storage_type = "HDD" + name = "tf-instance" + + cluster { + cluster_id = "tf-instance-cluster" + zone = "us-central1-b" + num_nodes = 3 + storage_type = "HDD" + } } resource "google_bigtable_table" "table" { name = "tf-table" - instance_name = "${google_bigtable_instance.instance.name}" + instance_name = google_bigtable_instance.instance.name split_keys = ["a", "b", "c"] } ``` diff --git a/website/docs/r/binary_authorization_attestor.html.markdown b/website/docs/r/binary_authorization_attestor.html.markdown index a0ce708bf2f..8ed99217b03 100644 --- a/website/docs/r/binary_authorization_attestor.html.markdown +++ b/website/docs/r/binary_authorization_attestor.html.markdown @@ -43,7 +43,7 @@ To get more information about Attestor, see: resource "google_binary_authorization_attestor" "attestor" { name = "test-attestor" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name public_keys { ascii_armored_pgp_public_key = < If you're importing a resource with beta features, make sure to include `-provider=google-beta` diff --git a/website/docs/r/binary_authorization_policy.html.markdown b/website/docs/r/binary_authorization_policy.html.markdown index 71014d65c59..26a6203eede 100644 --- a/website/docs/r/binary_authorization_policy.html.markdown +++ b/website/docs/r/binary_authorization_policy.html.markdown @@ -37,19 +37,19 @@ To get more information about Policy, see: ```hcl resource "google_binary_authorization_policy" "policy" { admission_whitelist_patterns { - name_pattern= "gcr.io/google_containers/*" + name_pattern = "gcr.io/google_containers/*" } default_admission_rule { - evaluation_mode = "ALWAYS_ALLOW" + evaluation_mode = "ALWAYS_ALLOW" enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" } cluster_admission_rules { - cluster = "us-central1-a.prod-cluster" - evaluation_mode = "REQUIRE_ATTESTATION" - enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" - require_attestations_by = ["${google_binary_authorization_attestor.attestor.name}"] + cluster = "us-central1-a.prod-cluster" + evaluation_mode = "REQUIRE_ATTESTATION" + enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" + require_attestations_by = [google_binary_authorization_attestor.attestor.name] } } @@ -65,7 +65,7 @@ resource "google_container_analysis_note" "note" { resource "google_binary_authorization_attestor" "attestor" { name = "test-attestor" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name } } ``` @@ -74,15 +74,13 @@ resource "google_binary_authorization_attestor" "attestor" { ```hcl resource "google_binary_authorization_policy" "policy" { - default_admission_rule { - evaluation_mode = "REQUIRE_ATTESTATION" - enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" - require_attestations_by = ["${google_binary_authorization_attestor.attestor.name}"] + evaluation_mode = "REQUIRE_ATTESTATION" + enforcement_mode = "ENFORCED_BLOCK_AND_AUDIT_LOG" + require_attestations_by = [google_binary_authorization_attestor.attestor.name] } global_policy_evaluation_mode = "ENABLE" - } resource "google_container_analysis_note" "note" { @@ -97,7 +95,7 @@ resource "google_container_analysis_note" "note" { resource "google_binary_authorization_attestor" "attestor" { name = "test-attestor" attestation_authority_note { - note_reference = "${google_container_analysis_note.note.name}" + note_reference = google_container_analysis_note.note.name } } ``` @@ -172,7 +170,7 @@ The `default_admission_rule` block supports: The `admission_whitelist_patterns` block supports: * `name_pattern` - - (Optional) + (Required) An image name pattern to whitelist, in the form `registry/path/to/image`. This supports a trailing * as a wildcard, but this is allowed only in text after the registry/ @@ -183,7 +181,7 @@ The `cluster_admission_rules` block supports: * `cluster` - (Required) The identifier for this object. Format specified above. * `evaluation_mode` - - (Optional) + (Required) How this admission rule will be evaluated. * `require_attestations_by` - @@ -198,7 +196,7 @@ The `cluster_admission_rules` block supports: specifies REQUIRE_ATTESTATION, otherwise it must be empty. * `enforcement_mode` - - (Optional) + (Required) The action when a pod creation is denied by the admission rule. diff --git a/website/docs/r/cloud_scheduler_job.html.markdown b/website/docs/r/cloud_scheduler_job.html.markdown index d3f61e4664d..df68111fbc5 100644 --- a/website/docs/r/cloud_scheduler_job.html.markdown +++ b/website/docs/r/cloud_scheduler_job.html.markdown @@ -51,13 +51,13 @@ resource "google_pubsub_topic" "topic" { } resource "google_cloud_scheduler_job" "job" { - name = "test-job" + name = "test-job" description = "test job" - schedule = "*/2 * * * *" + schedule = "*/2 * * * *" pubsub_target { - topic_name = "${google_pubsub_topic.topic.id}" - data = "${base64encode("test")}" + topic_name = google_pubsub_topic.topic.id + data = base64encode("test") } } ``` @@ -71,14 +71,14 @@ resource "google_cloud_scheduler_job" "job" { ```hcl resource "google_cloud_scheduler_job" "job" { - name = "test-job" + name = "test-job" description = "test http job" - schedule = "*/8 * * * *" - time_zone = "America/New_York" + schedule = "*/8 * * * *" + time_zone = "America/New_York" http_target { http_method = "POST" - uri = "https://example.com/ping" + uri = "https://example.com/ping" } } ``` @@ -92,17 +92,17 @@ resource "google_cloud_scheduler_job" "job" { ```hcl resource "google_cloud_scheduler_job" "job" { - name = "test-job" - schedule = "*/4 * * * *" + name = "test-job" + schedule = "*/4 * * * *" description = "test app engine job" - time_zone = "Europe/London" + time_zone = "Europe/London" app_engine_http_target { http_method = "POST" app_engine_routing { - service = "web" - version = "prod" + service = "web" + version = "prod" instance = "my-instance-001" } @@ -119,20 +119,21 @@ resource "google_cloud_scheduler_job" "job" { ```hcl -data "google_compute_default_service_account" "default" { } +data "google_compute_default_service_account" "default" { +} resource "google_cloud_scheduler_job" "job" { - name = "test-job" + name = "test-job" description = "test http job" - schedule = "*/8 * * * *" - time_zone = "America/New_York" + schedule = "*/8 * * * *" + time_zone = "America/New_York" http_target { http_method = "GET" - uri = "https://cloudscheduler.googleapis.com/v1/projects/my-project-name/locations/us-west1/jobs" + uri = "https://cloudscheduler.googleapis.com/v1/projects/my-project-name/locations/us-west1/jobs" oauth_token { - service_account_email = "${data.google_compute_default_service_account.default.email}" + service_account_email = data.google_compute_default_service_account.default.email } } } @@ -146,20 +147,21 @@ resource "google_cloud_scheduler_job" "job" { ```hcl -data "google_compute_default_service_account" "default" { } +data "google_compute_default_service_account" "default" { +} resource "google_cloud_scheduler_job" "job" { - name = "test-job" + name = "test-job" description = "test http job" - schedule = "*/8 * * * *" - time_zone = "America/New_York" + schedule = "*/8 * * * *" + time_zone = "America/New_York" http_target { http_method = "GET" - uri = "https://example.com/ping" + uri = "https://example.com/ping" oidc_token { - service_account_email = "${data.google_compute_default_service_account.default.email}" + service_account_email = data.google_compute_default_service_account.default.email } } } @@ -356,7 +358,7 @@ The `http_target` block supports: The `oauth_token` block supports: * `service_account_email` - - (Optional) + (Required) Service account email to be used for generating OAuth token. The service account must be within the same project as the job. @@ -368,7 +370,7 @@ The `oauth_token` block supports: The `oidc_token` block supports: * `service_account_email` - - (Optional) + (Required) Service account email to be used for generating OAuth token. The service account must be within the same project as the job. diff --git a/website/docs/r/cloudbuild_trigger.html.markdown b/website/docs/r/cloudbuild_trigger.html.markdown index 925c312820f..2ee0e5bb034 100644 --- a/website/docs/r/cloudbuild_trigger.html.markdown +++ b/website/docs/r/cloudbuild_trigger.html.markdown @@ -60,6 +60,45 @@ resource "google_cloudbuild_trigger" "filename-trigger" { The following arguments are supported: +* `trigger_template` - + (Required) + Template describing the types of source changes to trigger a build. + Branch and tag names in trigger templates are interpreted as regular + expressions. Any branch or tag change that matches that regular + expression will trigger a build. Structure is documented below. + + +The `trigger_template` block supports: + +* `project_id` - + (Optional) + ID of the project that owns the Cloud Source Repository. If + omitted, the project ID requesting the build is assumed. + +* `repo_name` - + (Optional) + Name of the Cloud Source Repository. If omitted, the name "default" is assumed. + +* `dir` - + (Optional) + Directory, relative to the source root, in which to run the build. + This must be a relative path. If a step's dir is specified and + is an absolute path, this value is ignored for that step's + execution. + +* `branch_name` - + (Optional) + Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. + This field is a regular expression. + +* `tag_name` - + (Optional) + Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. + This field is a regular expression. + +* `commit_sha` - + (Optional) + Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided. - - - @@ -106,14 +145,6 @@ The following arguments are supported: those files matches a includedFiles glob. If not, then we do not trigger a build. -* `trigger_template` - - (Optional) - Template describing the types of source changes to trigger a build. - Branch and tag names in trigger templates are interpreted as regular - expressions. Any branch or tag change that matches that regular - expression will trigger a build. - This field is required, and will be validated as such in 3.0.0. Structure is documented below. - * `build` - (Optional) Contents of the build template. Either a filename or build template must be provided. Structure is documented below. @@ -122,38 +153,6 @@ The following arguments are supported: If it is not provided, the provider project is used. -The `trigger_template` block supports: - -* `project_id` - - (Optional) - ID of the project that owns the Cloud Source Repository. If - omitted, the project ID requesting the build is assumed. - -* `repo_name` - - (Optional) - Name of the Cloud Source Repository. If omitted, the name "default" is assumed. - -* `dir` - - (Optional) - Directory, relative to the source root, in which to run the build. - This must be a relative path. If a step's dir is specified and - is an absolute path, this value is ignored for that step's - execution. - -* `branch_name` - - (Optional) - Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. - This field is a regular expression. - -* `tag_name` - - (Optional) - Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. - This field is a regular expression. - -* `commit_sha` - - (Optional) - Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided. - The `build` block supports: * `tags` - @@ -168,14 +167,14 @@ The `build` block supports: If any of the images fail to be pushed, the build status is marked FAILURE. * `step` - - (Optional) + (Required) The operations to be performed on the workspace. Structure is documented below. The `step` block supports: * `name` - - (Optional) + (Required) The name of the container image that will run this particular build step. If the image is available in the host's Docker daemon's cache, it will be run directly. If not, the host will attempt to pull the image first, using @@ -267,13 +266,13 @@ The `step` block supports: The `volumes` block supports: * `name` - - (Optional) + (Required) Name of the volume to mount. Volume names must be unique per build step and must be valid names for Docker volumes. Each named volume must be used by at least two build steps. * `path` - - (Optional) + (Required) Path at which to mount the volume. Paths must be absolute and cannot conflict with other volume paths on the same build step or with certain reserved volume paths. diff --git a/website/docs/r/cloudfunctions_cloud_function_iam.html.markdown b/website/docs/r/cloudfunctions_cloud_function_iam.html.markdown index e54181d538c..84086f508cc 100644 --- a/website/docs/r/cloudfunctions_cloud_function_iam.html.markdown +++ b/website/docs/r/cloudfunctions_cloud_function_iam.html.markdown @@ -129,17 +129,17 @@ CloudFunctions cloudfunction IAM resources can be imported using the resource id IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. ``` -$ terraform import google_cloudfunctions_function_iam_member.editor "{{project}}/{{region}}/{{cloud_function}} roles/viewer jane@example.com" +$ terraform import google_cloudfunctions_function_iam_member.editor "projects/{{project}}/locations/{{region}}/functions/{{cloud_function}} roles/viewer jane@example.com" ``` IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. ``` -$ terraform import google_cloudfunctions_function_iam_binding.editor "{{project}}/{{region}}/{{cloud_function}} roles/viewer" +$ terraform import google_cloudfunctions_function_iam_binding.editor "projects/{{project}}/locations/{{region}}/functions/{{cloud_function}} roles/viewer" ``` IAM policy imports use the identifier of the resource in question, e.g. ``` -$ terraform import google_cloudfunctions_function_iam_policy.editor {{project}}/{{region}}/{{cloud_function}} +$ terraform import google_cloudfunctions_function_iam_policy.editor projects/{{project}}/locations/{{region}}/functions/{{cloud_function}} ``` -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` diff --git a/website/docs/r/cloudfunctions_function.html.markdown b/website/docs/r/cloudfunctions_function.html.markdown index ffec17d642b..98611aa045f 100644 --- a/website/docs/r/cloudfunctions_function.html.markdown +++ b/website/docs/r/cloudfunctions_function.html.markdown @@ -29,29 +29,29 @@ resource "google_storage_bucket" "bucket" { resource "google_storage_bucket_object" "archive" { name = "index.zip" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name source = "./path/to/zip/file/which/contains/code" } resource "google_cloudfunctions_function" "function" { - name = "function-test" - description = "My function" - runtime = "nodejs10" + name = "function-test" + description = "My function" + runtime = "nodejs10" available_memory_mb = 128 - source_archive_bucket = "${google_storage_bucket.bucket.name}" - source_archive_object = "${google_storage_bucket_object.archive.name}" + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name trigger_http = true entry_point = "helloGET" } # IAM entry for all users to invoke the function resource "google_cloudfunctions_function_iam_member" "invoker" { - project = "${google_cloudfunctions_function.function.project}" - region = "${google_cloudfunctions_function.function.region}" - cloud_function = "${google_cloudfunctions_function.function.name}" + project = google_cloudfunctions_function.function.project + region = google_cloudfunctions_function.function.region + cloud_function = google_cloudfunctions_function.function.name - role = "roles/cloudfunctions.invoker" + role = "roles/cloudfunctions.invoker" member = "allUsers" } ``` @@ -65,18 +65,18 @@ resource "google_storage_bucket" "bucket" { resource "google_storage_bucket_object" "archive" { name = "index.zip" - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name source = "./path/to/zip/file/which/contains/code" } resource "google_cloudfunctions_function" "function" { - name = "function-test" - description = "My function" - runtime = "nodejs10" + name = "function-test" + description = "My function" + runtime = "nodejs10" available_memory_mb = 128 - source_archive_bucket = "${google_storage_bucket.bucket.name}" - source_archive_object = "${google_storage_bucket_object.archive.name}" + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name trigger_http = true timeout = 60 entry_point = "helloGET" @@ -91,11 +91,11 @@ resource "google_cloudfunctions_function" "function" { # IAM entry for a single user to invoke the function resource "google_cloudfunctions_function_iam_member" "invoker" { - project = "${google_cloudfunctions_function.function.project}" - region = "${google_cloudfunctions_function.function.region}" - cloud_function = "${google_cloudfunctions_function.function.name}" + project = google_cloudfunctions_function.function.project + region = google_cloudfunctions_function.function.region + cloud_function = google_cloudfunctions_function.function.name - role = "roles/cloudfunctions.invoker" + role = "roles/cloudfunctions.invoker" member = "user:myFunctionInvoker@example.com" } ``` @@ -106,10 +106,8 @@ The following arguments are supported: * `name` - (Required) A user-defined name of the function. Function names must be unique globally. -* `runtime` - (Optional) The runtime in which the function is going to run. One -of `"nodejs6"`, `"nodejs8"`, `"nodejs10"`, `"python37"`, `"go111"`. If empty, -defaults to `"nodejs6"`. It's recommended that you override the default, as -`"nodejs6"` is deprecated. +* `runtime` - (Required) The runtime in which the function is going to run. +Eg. `"nodejs8"`, `"nodejs10"`, `"python37"`, `"go111"`. - - - diff --git a/website/docs/r/cloudiot_registry.html.markdown b/website/docs/r/cloudiot_registry.html.markdown index ec2188850d4..5aca4f3eaf6 100644 --- a/website/docs/r/cloudiot_registry.html.markdown +++ b/website/docs/r/cloudiot_registry.html.markdown @@ -29,11 +29,11 @@ resource "google_cloudiot_registry" "default-registry" { name = "default-registry" event_notification_configs { - pubsub_topic_name = "${google_pubsub_topic.default-telemetry.id}" + pubsub_topic_name = google_pubsub_topic.default-telemetry.id } state_notification_config = { - pubsub_topic_name = "${google_pubsub_topic.default-devicestatus.id}" + pubsub_topic_name = google_pubsub_topic.default-devicestatus.id } http_config = { @@ -47,7 +47,7 @@ resource "google_cloudiot_registry" "default-registry" { credentials { public_key_certificate = { format = "X509_CERTIFICATE_PEM" - certificate = "${file("rsa_cert.pem")}" + certificate = file("rsa_cert.pem") } } } @@ -66,8 +66,6 @@ The following arguments are supported: * `region` - (Optional) The Region in which the created address should reside. If it is not provided, the provider region is used. -* `event_notification_config` - (Deprecated) Use `event_notification_configs` instead. - * `event_notification_configs` - (Optional) List of configurations for event notification, such as PubSub topics to publish device events to. Structure is documented below. diff --git a/website/docs/r/composer_environment.html.markdown b/website/docs/r/composer_environment.html.markdown index 77a9feacefe..2b830feab88 100644 --- a/website/docs/r/composer_environment.html.markdown +++ b/website/docs/r/composer_environment.html.markdown @@ -50,27 +50,27 @@ on the IAM policy binding (see `google_project_iam_member` below). ```hcl resource "google_composer_environment" "test" { - name = "%s" + name = "%s" region = "us-central1" config { node_count = 4 node_config { - zone = "us-central1-a" + zone = "us-central1-a" machine_type = "n1-standard-1" - network = "${google_compute_network.test.self_link}" - subnetwork = "${google_compute_subnetwork.test.self_link}" + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link - service_account = "${google_service_account.test.name}" + service_account = google_service_account.test.name } } - depends_on = ["google_project_iam_member.composer-worker"] + depends_on = [google_project_iam_member.composer-worker] } resource "google_compute_network" "test" { - name = "composer-test-network" + name = "composer-test-network" auto_create_subnetworks = false } @@ -78,7 +78,7 @@ resource "google_compute_subnetwork" "test" { name = "composer-test-subnetwork" ip_cidr_range = "10.2.0.0/16" region = "us-central1" - network = "${google_compute_network.test.self_link}" + network = google_compute_network.test.self_link } resource "google_service_account" "test" { @@ -87,15 +87,15 @@ resource "google_service_account" "test" { } resource "google_project_iam_member" "composer-worker" { - role = "roles/composer.worker" - member = "serviceAccount:${google_service_account.test.email}" + role = "roles/composer.worker" + member = "serviceAccount:${google_service_account.test.email}" } ``` ### With Software (Airflow) Config ```hcl resource "google_composer_environment" "test" { - name = "%s" + name = "%s" region = "us-central1" config { @@ -110,7 +110,7 @@ resource "google_composer_environment" "test" { } env_variables = { - FOO = "bar" + FOO = "bar" } } } diff --git a/website/docs/r/compute_address.html.markdown b/website/docs/r/compute_address.html.markdown index a7a9a09c57e..b709b4946e4 100644 --- a/website/docs/r/compute_address.html.markdown +++ b/website/docs/r/compute_address.html.markdown @@ -74,12 +74,12 @@ resource "google_compute_subnetwork" "default" { name = "my-subnet" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } resource "google_compute_address" "internal_with_subnet_and_address" { name = "my-internal-address" - subnetwork = "${google_compute_subnetwork.default.self_link}" + subnetwork = google_compute_subnetwork.default.self_link address_type = "INTERNAL" address = "10.0.42.42" region = "us-central1" @@ -114,27 +114,27 @@ resource "google_compute_address" "static" { } data "google_compute_image" "debian_image" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } resource "google_compute_instance" "instance_with_ip" { - name = "vm-instance" - machine_type = "f1-micro" - zone = "us-central1-a" - - boot_disk { - initialize_params{ - image = "${data.google_compute_image.debian_image.self_link}" - } - } - - network_interface { - network = "default" - access_config { - nat_ip = "${google_compute_address.static.address}" - } - } + name = "vm-instance" + machine_type = "f1-micro" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.debian_image.self_link + } + } + + network_interface { + network = "default" + access_config { + nat_ip = google_compute_address.static.address + } + } } ``` diff --git a/website/docs/r/compute_attached_disk.html.markdown b/website/docs/r/compute_attached_disk.html.markdown index cf274793930..df9c0e11139 100644 --- a/website/docs/r/compute_attached_disk.html.markdown +++ b/website/docs/r/compute_attached_disk.html.markdown @@ -28,8 +28,8 @@ To get more information about attaching disks, see: ## Example Usage ```hcl resource "google_compute_attached_disk" "default" { - disk = "${google_compute_disk.default.self_link}" - instance = "${google_compute_instance.default.self_link}" + disk = google_compute_disk.default.self_link + instance = google_compute_instance.default.self_link } resource "google_compute_instance" "default" { @@ -37,7 +37,6 @@ resource "google_compute_instance" "default" { machine_type = "n1-standard-1" zone = "us-west1-a" - boot_disk { initialize_params { image = "debian-cloud/debian-9" @@ -49,7 +48,7 @@ resource "google_compute_instance" "default" { } lifecycle { - ignore_changes = ["attached_disk"] + ignore_changes = [attached_disk] } } ``` @@ -118,6 +117,6 @@ This resource provides the following Attached Disk can be imported the following ways: ``` -$ terraform import google_compute_disk.default projects/{{project}}/zones/{{zone}}/disks/{{instance.name}}:{{disk.name}} -$ terraform import google_compute_disk.default {{project}}/{{zone}}/{{instance.name}}:{{disk.name}} +$ terraform import google_compute_disk.default projects/{{project}}/zones/{{zone}}/instances/{{instance.name}}/{{disk.name}} +$ terraform import google_compute_disk.default {{project}}/{{zone}}/{{instance.name}}/{{disk.name}} ``` diff --git a/website/docs/r/compute_autoscaler.html.markdown b/website/docs/r/compute_autoscaler.html.markdown index 26c8e66d5b9..1b7327f280e 100644 --- a/website/docs/r/compute_autoscaler.html.markdown +++ b/website/docs/r/compute_autoscaler.html.markdown @@ -45,11 +45,11 @@ To get more information about Autoscaler, see: ```hcl resource "google_compute_autoscaler" "default" { - provider = "google-beta" + provider = google-beta name = "my-autoscaler" zone = "us-central1-f" - target = "${google_compute_instance_group_manager.default.self_link}" + target = google_compute_instance_group_manager.default.self_link autoscaling_policy { max_replicas = 5 @@ -65,7 +65,7 @@ resource "google_compute_autoscaler" "default" { } resource "google_compute_instance_template" "default" { - provider = "google-beta" + provider = google-beta name = "my-instance-template" machine_type = "n1-standard-1" @@ -74,7 +74,7 @@ resource "google_compute_instance_template" "default" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -91,34 +91,34 @@ resource "google_compute_instance_template" "default" { } resource "google_compute_target_pool" "default" { - provider = "google-beta" + provider = google-beta name = "my-target-pool" } resource "google_compute_instance_group_manager" "default" { - provider = "google-beta" + provider = google-beta name = "my-igm" zone = "us-central1-f" version { - instance_template = "${google_compute_instance_template.default.self_link}" - name = "primary" + instance_template = google_compute_instance_template.default.self_link + name = "primary" } - target_pools = ["${google_compute_target_pool.default.self_link}"] + target_pools = [google_compute_target_pool.default.self_link] base_instance_name = "autoscaler-sample" } data "google_compute_image" "debian_9" { - provider = "google-beta" + provider = google-beta family = "debian-9" project = "debian-cloud" } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } @@ -135,7 +135,7 @@ provider "google-beta"{ resource "google_compute_autoscaler" "foobar" { name = "my-autoscaler" zone = "us-central1-f" - target = "${google_compute_instance_group_manager.foobar.self_link}" + target = google_compute_instance_group_manager.foobar.self_link autoscaling_policy { max_replicas = 5 @@ -156,7 +156,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -181,17 +181,17 @@ resource "google_compute_instance_group_manager" "foobar" { zone = "us-central1-f" version { - instance_template = "${google_compute_instance_template.foobar.self_link}" + instance_template = google_compute_instance_template.foobar.self_link name = "primary" } - target_pools = ["${google_compute_target_pool.foobar.self_link}"] + target_pools = [google_compute_target_pool.foobar.self_link] base_instance_name = "foobar" } data "google_compute_image" "debian_9" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } ``` diff --git a/website/docs/r/compute_backend_bucket.html.markdown b/website/docs/r/compute_backend_bucket.html.markdown index fd7e3a0c345..63032f12974 100644 --- a/website/docs/r/compute_backend_bucket.html.markdown +++ b/website/docs/r/compute_backend_bucket.html.markdown @@ -50,7 +50,7 @@ To get more information about BackendBucket, see: resource "google_compute_backend_bucket" "image_backend" { name = "image-backend-bucket" description = "Contains beautiful images" - bucket_name = "${google_storage_bucket.image_bucket.name}" + bucket_name = google_storage_bucket.image_bucket.name enable_cdn = true } @@ -103,9 +103,9 @@ The following arguments are supported: The `cdn_policy` block supports: * `signed_url_cache_max_age_sec` - - (Optional) + (Required) Maximum number of seconds the response to a signed URL request will - be considered fresh. Defaults to 1hr (3600s). After this time period, + be considered fresh. After this time period, the response will be revalidated before being served. When serving responses to signed URL requests, Cloud CDN will internally behave as though diff --git a/website/docs/r/compute_backend_bucket_signed_url_key.html.markdown b/website/docs/r/compute_backend_bucket_signed_url_key.html.markdown index 17c9d106fb8..54a5ae20dd7 100644 --- a/website/docs/r/compute_backend_bucket_signed_url_key.html.markdown +++ b/website/docs/r/compute_backend_bucket_signed_url_key.html.markdown @@ -43,13 +43,13 @@ we cannot confirm or reverse changes to a key outside of Terraform. resource "google_compute_backend_bucket_signed_url_key" "backend_key" { name = "test-key" key_value = "pPsVemX8GM46QVeezid6Rw==" - backend_bucket = "${google_compute_backend_bucket.test_backend.name}" + backend_bucket = google_compute_backend_bucket.test_backend.name } resource "google_compute_backend_bucket" "test_backend" { name = "test-signed-backend-bucket" description = "Contains beautiful images" - bucket_name = "${google_storage_bucket.bucket.name}" + bucket_name = google_storage_bucket.bucket.name enable_cdn = true } diff --git a/website/docs/r/compute_backend_service.html.markdown b/website/docs/r/compute_backend_service.html.markdown index bf7b577c551..1e685051e54 100644 --- a/website/docs/r/compute_backend_service.html.markdown +++ b/website/docs/r/compute_backend_service.html.markdown @@ -48,7 +48,7 @@ To get more information about BackendService, see: ```hcl resource "google_compute_backend_service" "default" { name = "backend-service" - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { @@ -68,20 +68,20 @@ resource "google_compute_http_health_check" "default" { ```hcl resource "google_compute_backend_service" "default" { - provider = "google-beta" + provider = google-beta - name = "backend-service" - health_checks = ["${google_compute_health_check.health_check.self_link}"] + name = "backend-service" + health_checks = [google_compute_health_check.health_check.self_link] load_balancing_scheme = "INTERNAL_SELF_MANAGED" - locality_lb_policy = "ROUND_ROBIN" + locality_lb_policy = "ROUND_ROBIN" } resource "google_compute_health_check" "health_check" { - provider = "google-beta" + provider = google-beta - name = "health-check" + name = "health-check" http_health_check { - + port = 80 } } ``` @@ -95,13 +95,13 @@ resource "google_compute_health_check" "health_check" { ```hcl resource "google_compute_backend_service" "default" { - provider = "google-beta" + provider = google-beta - name = "backend-service" - health_checks = ["${google_compute_health_check.health_check.self_link}"] + name = "backend-service" + health_checks = [google_compute_health_check.health_check.self_link] load_balancing_scheme = "INTERNAL_SELF_MANAGED" - locality_lb_policy = "RING_HASH" - session_affinity = "HTTP_COOKIE" + locality_lb_policy = "RING_HASH" + session_affinity = "HTTP_COOKIE" circuit_breakers { max_connections = 10 } @@ -109,7 +109,7 @@ resource "google_compute_backend_service" "default" { http_cookie { ttl { seconds = 11 - nanos = 1111 + nanos = 1111 } name = "mycookie" } @@ -120,11 +120,11 @@ resource "google_compute_backend_service" "default" { } resource "google_compute_health_check" "health_check" { - provider = "google-beta" + provider = google-beta - name = "health-check" + name = "health-check" http_health_check { - + port = 80 } } ``` @@ -250,7 +250,7 @@ The `backend` block supports: Provide this property when you create the resource. * `group` - - (Optional) + (Required) The fully-qualified URL of an Instance Group or Network Endpoint Group resource. In case of instance group this defines the list of instances that serve traffic. Member virtual machine diff --git a/website/docs/r/compute_backend_service_signed_url_key.html.markdown b/website/docs/r/compute_backend_service_signed_url_key.html.markdown index 16689eaf07c..d4946a1c70b 100644 --- a/website/docs/r/compute_backend_service_signed_url_key.html.markdown +++ b/website/docs/r/compute_backend_service_signed_url_key.html.markdown @@ -41,9 +41,9 @@ we cannot confirm or reverse changes to a key outside of Terraform. ```hcl resource "google_compute_backend_service_signed_url_key" "backend_key" { - name = "test-key" - key_value = "pPsVemX8GM46QVeezid6Rw==" - backend_service = "${google_compute_backend_service.example_backend.name}" + name = "test-key" + key_value = "pPsVemX8GM46QVeezid6Rw==" + backend_service = google_compute_backend_service.example_backend.name } resource "google_compute_backend_service" "example_backend" { @@ -55,17 +55,17 @@ resource "google_compute_backend_service" "example_backend" { enable_cdn = true backend { - group = "${google_compute_instance_group_manager.webservers.instance_group}" + group = google_compute_instance_group_manager.webservers.instance_group } - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_instance_group_manager" "webservers" { name = "my-webservers" version { - instance_template = "${google_compute_instance_template.webserver.self_link}" + instance_template = google_compute_instance_template.webserver.self_link name = "primary" } diff --git a/website/docs/r/compute_firewall.html.markdown b/website/docs/r/compute_firewall.html.markdown index 020a9d0dda1..2ab70fc9201 100644 --- a/website/docs/r/compute_firewall.html.markdown +++ b/website/docs/r/compute_firewall.html.markdown @@ -54,7 +54,7 @@ To get more information about Firewall, see: ```hcl resource "google_compute_firewall" "default" { name = "test-firewall" - network = "${google_compute_network.default.name}" + network = google_compute_network.default.name allow { protocol = "icmp" diff --git a/website/docs/r/compute_forwarding_rule.html.markdown b/website/docs/r/compute_forwarding_rule.html.markdown index a1d7d8c0d15..c101499f824 100644 --- a/website/docs/r/compute_forwarding_rule.html.markdown +++ b/website/docs/r/compute_forwarding_rule.html.markdown @@ -44,7 +44,7 @@ To get more information about ForwardingRule, see: ```hcl resource "google_compute_forwarding_rule" "default" { name = "website-forwarding-rule" - target = "${google_compute_target_pool.default.self_link}" + target = google_compute_target_pool.default.self_link port_range = "80" } @@ -63,20 +63,20 @@ resource "google_compute_target_pool" "default" { ```hcl // Forwarding rule for Internal Load Balancing resource "google_compute_forwarding_rule" "default" { - name = "website-forwarding-rule" - region = "us-central1" + name = "website-forwarding-rule" + region = "us-central1" load_balancing_scheme = "INTERNAL" - backend_service = "${google_compute_region_backend_service.backend.self_link}" + backend_service = google_compute_region_backend_service.backend.self_link all_ports = true - network = "${google_compute_network.default.name}" - subnetwork = "${google_compute_subnetwork.default.name}" + network = google_compute_network.default.name + subnetwork = google_compute_subnetwork.default.name } resource "google_compute_region_backend_service" "backend" { - name = "website-backend" - region = "us-central1" - health_checks = ["${google_compute_health_check.hc.self_link}"] + name = "website-backend" + region = "us-central1" + health_checks = [google_compute_health_check.hc.self_link] } resource "google_compute_health_check" "hc" { @@ -90,7 +90,7 @@ resource "google_compute_health_check" "hc" { } resource "google_compute_network" "default" { - name = "website-net" + name = "website-net" auto_create_subnetworks = false } @@ -98,7 +98,7 @@ resource "google_compute_subnetwork" "default" { name = "website-net" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } ``` @@ -143,14 +143,11 @@ The following arguments are supported: forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule. - ~> **NOTE** The address should be specified as a literal IP address, - e.g. `100.1.2.3` to avoid a permanent diff, as the server returns the - IP address regardless of the input value. - The server accepts a literal IP address or a URL reference to an existing - Address resource. The following examples are all valid but only the first - will prevent a permadiff. If you are using `google_compute_address` or - similar, interpolate using `.address` instead of `.self_link` or similar - to prevent a diff on re-apply. + An address must be specified by a literal IP address. ~> **NOTE**: While + the API allows you to specify various resource paths for an address resource + instead, Terraform requires this to specifically be an IP address to + avoid needing to fetching the IP address from resource paths on refresh + or unnecessary diffs. * `ip_protocol` - (Optional) @@ -164,10 +161,6 @@ The following arguments are supported: A BackendService to receive the matched traffic. This is used only for INTERNAL load balancing. -* `ip_version` - - (Optional, Deprecated) - ipVersion is not a valid field for regional forwarding rules. - * `load_balancing_scheme` - (Optional) This signifies what the ForwardingRule will be used for and can be diff --git a/website/docs/r/compute_global_forwarding_rule.html.markdown b/website/docs/r/compute_global_forwarding_rule.html.markdown index d22604ab52d..bce3a665634 100644 --- a/website/docs/r/compute_global_forwarding_rule.html.markdown +++ b/website/docs/r/compute_global_forwarding_rule.html.markdown @@ -43,20 +43,20 @@ https://cloud.google.com/compute/docs/load-balancing/http/ ```hcl resource "google_compute_global_forwarding_rule" "default" { name = "global-rule" - target = "${google_compute_target_http_proxy.default.self_link}" + target = google_compute_target_http_proxy.default.self_link port_range = "80" } resource "google_compute_target_http_proxy" "default" { name = "target-proxy" description = "a description" - url_map = "${google_compute_url_map.default.self_link}" + url_map = google_compute_url_map.default.self_link } resource "google_compute_url_map" "default" { name = "url-map-target-proxy" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -65,11 +65,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -80,7 +80,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { @@ -100,33 +100,33 @@ resource "google_compute_http_health_check" "default" { ```hcl resource "google_compute_global_forwarding_rule" "default" { - provider = "google-beta" + provider = google-beta name = "global-rule" - target = "${google_compute_target_http_proxy.default.self_link}" + target = google_compute_target_http_proxy.default.self_link port_range = "80" load_balancing_scheme = "INTERNAL_SELF_MANAGED" ip_address = "0.0.0.0" metadata_filters { filter_match_criteria = "MATCH_ANY" filter_labels { - name = "PLANET" + name = "PLANET" value = "MARS" } } } resource "google_compute_target_http_proxy" "default" { - provider = "google-beta" + provider = google-beta name = "target-proxy" description = "a description" - url_map = "${google_compute_url_map.default.self_link}" + url_map = google_compute_url_map.default.self_link } resource "google_compute_url_map" "default" { - provider = "google-beta" + provider = google-beta name = "url-map-target-proxy" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -135,17 +135,17 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } resource "google_compute_backend_service" "default" { - provider = "google-beta" + provider = google-beta name = "backend" port_name = "http" protocol = "HTTP" @@ -153,27 +153,27 @@ resource "google_compute_backend_service" "default" { load_balancing_scheme = "INTERNAL_SELF_MANAGED" backend { - group = "${google_compute_instance_group_manager.igm.instance_group}" - balancing_mode = "RATE" - capacity_scaler = 0.4 + group = google_compute_instance_group_manager.igm.instance_group + balancing_mode = "RATE" + capacity_scaler = 0.4 max_rate_per_instance = 50 } - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] } data "google_compute_image" "debian_image" { - provider = "google-beta" + provider = google-beta family = "debian-9" project = "debian-cloud" } resource "google_compute_instance_group_manager" "igm" { - provider = "google-beta" - name = "igm-internal" + provider = google-beta + name = "igm-internal" version { - instance_template = "${google_compute_instance_template.instance_template.self_link}" - name = "primary" + instance_template = google_compute_instance_template.instance_template.self_link + name = "primary" } base_instance_name = "internal-glb" zone = "us-central1-f" @@ -181,7 +181,7 @@ resource "google_compute_instance_group_manager" "igm" { } resource "google_compute_instance_template" "instance_template" { - provider = "google-beta" + provider = google-beta name = "template-backend" machine_type = "n1-standard-1" @@ -190,14 +190,14 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "${data.google_compute_image.debian_image.self_link}" + source_image = data.google_compute_image.debian_image.self_link auto_delete = true boot = true } } resource "google_compute_health_check" "default" { - provider = "google-beta" + provider = google-beta name = "check-backend" check_interval_sec = 1 timeout_sec = 1 @@ -256,14 +256,11 @@ The following arguments are supported: forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule. - ~> **NOTE** The address should be specified as a literal IP address, - e.g. `100.1.2.3` to avoid a permanent diff, as the server returns the - IP address regardless of the input value. - The server accepts a literal IP address or a URL reference to an existing - Address resource. The following examples are all valid but only the first - will prevent a permadiff. If you are using `google_compute_address` or - similar, interpolate using `.address` instead of `.self_link` or similar - to prevent a diff on re-apply. + An address must be specified by a literal IP address. ~> **NOTE**: While + the API allows you to specify various resource paths for an address resource + instead, Terraform requires this to specifically be an IP address to + avoid needing to fetching the IP address from resource paths on refresh + or unnecessary diffs. * `ip_protocol` - (Optional) diff --git a/website/docs/r/compute_health_check.html.markdown b/website/docs/r/compute_health_check.html.markdown index 27d6a629c9f..5b4a95eb41c 100644 --- a/website/docs/r/compute_health_check.html.markdown +++ b/website/docs/r/compute_health_check.html.markdown @@ -51,14 +51,14 @@ To get more information about HealthCheck, see: ```hcl resource "google_compute_health_check" "tcp-health-check" { - name = "tcp-health-check" + name = "tcp-health-check" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - tcp_health_check { - port = "80" - } + tcp_health_check { + port = "80" + } } ```
@@ -71,7 +71,7 @@ resource "google_compute_health_check" "tcp-health-check" { ```hcl resource "google_compute_health_check" "tcp-health-check" { - name = "tcp-health-check" + name = "tcp-health-check" description = "Health check via tcp" timeout_sec = 1 @@ -80,11 +80,11 @@ resource "google_compute_health_check" "tcp-health-check" { unhealthy_threshold = 5 tcp_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - request = "ARE YOU HEALTHY?" - proxy_header = "NONE" - response = "I AM HEALTHY" + request = "ARE YOU HEALTHY?" + proxy_header = "NONE" + response = "I AM HEALTHY" } } ``` @@ -98,14 +98,14 @@ resource "google_compute_health_check" "tcp-health-check" { ```hcl resource "google_compute_health_check" "ssl-health-check" { - name = "ssl-health-check" + name = "ssl-health-check" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - ssl_health_check { - port = "443" - } + ssl_health_check { + port = "443" + } } ```
@@ -118,7 +118,7 @@ resource "google_compute_health_check" "ssl-health-check" { ```hcl resource "google_compute_health_check" "ssl-health-check" { - name = "ssl-health-check" + name = "ssl-health-check" description = "Health check via ssl" timeout_sec = 1 @@ -127,11 +127,11 @@ resource "google_compute_health_check" "ssl-health-check" { unhealthy_threshold = 5 ssl_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - request = "ARE YOU HEALTHY?" - proxy_header = "NONE" - response = "I AM HEALTHY" + request = "ARE YOU HEALTHY?" + proxy_header = "NONE" + response = "I AM HEALTHY" } } ``` @@ -145,14 +145,14 @@ resource "google_compute_health_check" "ssl-health-check" { ```hcl resource "google_compute_health_check" "http-health-check" { - name = "http-health-check" + name = "http-health-check" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - http_health_check { - port = 80 - } + http_health_check { + port = 80 + } } ```
@@ -165,7 +165,7 @@ resource "google_compute_health_check" "http-health-check" { ```hcl resource "google_compute_health_check" "http-health-check" { - name = "http-health-check" + name = "http-health-check" description = "Health check via http" timeout_sec = 1 @@ -174,12 +174,12 @@ resource "google_compute_health_check" "http-health-check" { unhealthy_threshold = 5 http_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } ``` @@ -193,14 +193,14 @@ resource "google_compute_health_check" "http-health-check" { ```hcl resource "google_compute_health_check" "https-health-check" { - name = "https-health-check" + name = "https-health-check" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - https_health_check { - port = "443" - } + https_health_check { + port = "443" + } } ```
@@ -213,7 +213,7 @@ resource "google_compute_health_check" "https-health-check" { ```hcl resource "google_compute_health_check" "https-health-check" { - name = "https-health-check" + name = "https-health-check" description = "Health check via https" timeout_sec = 1 @@ -222,12 +222,12 @@ resource "google_compute_health_check" "https-health-check" { unhealthy_threshold = 5 https_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } ``` @@ -241,14 +241,14 @@ resource "google_compute_health_check" "https-health-check" { ```hcl resource "google_compute_health_check" "http2-health-check" { - name = "http2-health-check" + name = "http2-health-check" - timeout_sec = 1 - check_interval_sec = 1 + timeout_sec = 1 + check_interval_sec = 1 - http2_health_check { - port = "443" - } + http2_health_check { + port = "443" + } } ```
@@ -261,7 +261,7 @@ resource "google_compute_health_check" "http2-health-check" { ```hcl resource "google_compute_health_check" "http2-health-check" { - name = "http2-health-check" + name = "http2-health-check" description = "Health check via http2" timeout_sec = 1 @@ -270,12 +270,12 @@ resource "google_compute_health_check" "http2-health-check" { unhealthy_threshold = 5 http2_health_check { - port_name = "health-check-port" + port_name = "health-check-port" port_specification = "USE_NAMED_PORT" - host = "1.2.3.4" - request_path = "/mypath" - proxy_header = "NONE" - response = "I AM HEALTHY" + host = "1.2.3.4" + request_path = "/mypath" + proxy_header = "NONE" + response = "I AM HEALTHY" } } ``` diff --git a/website/docs/r/compute_image.html.markdown b/website/docs/r/compute_image.html.markdown index 41c36aa00c0..80dda725989 100644 --- a/website/docs/r/compute_image.html.markdown +++ b/website/docs/r/compute_image.html.markdown @@ -155,7 +155,7 @@ The following arguments are supported: The `guest_os_features` block supports: * `type` - - (Optional) + (Required) The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. The `raw_disk` block supports: diff --git a/website/docs/r/compute_instance_from_template.html.markdown b/website/docs/r/compute_instance_from_template.html.markdown index 99491ad5458..1458e654c47 100644 --- a/website/docs/r/compute_instance_from_template.html.markdown +++ b/website/docs/r/compute_instance_from_template.html.markdown @@ -23,14 +23,14 @@ This resource is specifically to create a compute instance from a given ```hcl resource "google_compute_instance_template" "tpl" { - name = "template" + name = "template" machine_type = "n1-standard-1" disk { source_image = "debian-cloud/debian-9" - auto_delete = true + auto_delete = true disk_size_gb = 100 - boot = true + boot = true } network_interface { @@ -45,15 +45,15 @@ resource "google_compute_instance_template" "tpl" { } resource "google_compute_instance_from_template" "tpl" { - name = "instance-from-template" - zone = "us-central1-a" + name = "instance-from-template" + zone = "us-central1-a" - source_instance_template = "${google_compute_instance_template.tpl.self_link}" + source_instance_template = google_compute_instance_template.tpl.self_link // Override fields from instance template can_ip_forward = false labels = { - my_key = "my_value" + my_key = "my_value" } } ``` diff --git a/website/docs/r/compute_instance_group.html.markdown b/website/docs/r/compute_instance_group.html.markdown index 5009ac062ad..4407f62caea 100644 --- a/website/docs/r/compute_instance_group.html.markdown +++ b/website/docs/r/compute_instance_group.html.markdown @@ -24,7 +24,7 @@ resource "google_compute_instance_group" "test" { name = "terraform-test" description = "Terraform test instance group" zone = "us-central1-a" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } ``` @@ -36,8 +36,8 @@ resource "google_compute_instance_group" "webservers" { description = "Terraform test instance group" instances = [ - "${google_compute_instance.test.self_link}", - "${google_compute_instance.test2.self_link}", + google_compute_instance.test.self_link, + google_compute_instance.test2.self_link, ] named_port { @@ -61,9 +61,9 @@ as shown in this example to avoid this type of error. ```hcl resource "google_compute_instance_group" "staging_group" { - name = "staging-instance-group" - zone = "us-central1-c" - instances = [ "${google_compute_instance.staging_vm.self_link}" ] + name = "staging-instance-group" + zone = "us-central1-c" + instances = [google_compute_instance.staging_vm.self_link] named_port { name = "http" port = "8080" @@ -85,12 +85,12 @@ data "google_compute_image" "debian_image" { } resource "google_compute_instance" "staging_vm" { - name = "staging-vm" + name = "staging-vm" machine_type = "n1-standard-1" - zone = "us-central1-c" + zone = "us-central1-c" boot_disk { initialize_params { - image = "${data.google_compute_image.debian_image.self_link}" + image = data.google_compute_image.debian_image.self_link } } @@ -105,11 +105,11 @@ resource "google_compute_backend_service" "staging_service" { protocol = "HTTPS" backend { - group = "${google_compute_instance_group.staging_group.self_link}" + group = google_compute_instance_group.staging_group.self_link } health_checks = [ - "${google_compute_https_health_check.staging_health.self_link}", + google_compute_https_health_check.staging_health.self_link, ] } @@ -181,4 +181,5 @@ Instance group can be imported using the `zone` and `name` with an optional `pro ``` $ terraform import google_compute_instance_group.webservers us-central1-a/terraform-webservers $ terraform import google_compute_instance_group.webservers big-project/us-central1-a/terraform-webservers +$ terraform import google_compute_instance_group.webservers projects/big-project/zones/us-central1-a/instanceGroups/terraform-webservers ``` diff --git a/website/docs/r/compute_instance_group_manager.html.markdown b/website/docs/r/compute_instance_group_manager.html.markdown index 5ae5e2b5183..4107bf09119 100644 --- a/website/docs/r/compute_instance_group_manager.html.markdown +++ b/website/docs/r/compute_instance_group_manager.html.markdown @@ -24,7 +24,7 @@ resource "google_compute_health_check" "autohealing" { check_interval_sec = 5 timeout_sec = 5 healthy_threshold = 2 - unhealthy_threshold = 10 # 50 seconds + unhealthy_threshold = 10 # 50 seconds http_health_check { request_path = "/healthz" @@ -39,10 +39,10 @@ resource "google_compute_instance_group_manager" "appserver" { zone = "us-central1-a" version { - instance_template = "${google_compute_instance_template.appserver.self_link}" + instance_template = google_compute_instance_template.appserver.self_link } - target_pools = ["${google_compute_target_pool.appserver.self_link}"] + target_pools = [google_compute_target_pool.appserver.self_link] target_size = 2 named_port { @@ -51,7 +51,7 @@ resource "google_compute_instance_group_manager" "appserver" { } auto_healing_policies { - health_check = "${google_compute_health_check.autohealing.self_link}" + health_check = google_compute_health_check.autohealing.self_link initial_delay_sec = 300 } } @@ -60,22 +60,22 @@ resource "google_compute_instance_group_manager" "appserver" { ## Example Usage with multiple versions (`google-beta` provider) ```hcl resource "google_compute_instance_group_manager" "appserver" { - provider = "google-beta" - name = "appserver-igm" + provider = google-beta + name = "appserver-igm" base_instance_name = "app" zone = "us-central1-a" - target_size = 5 + target_size = 5 version { - name = "appserver" - instance_template = "${google_compute_instance_template.appserver.self_link}" + name = "appserver" + instance_template = google_compute_instance_template.appserver.self_link } version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link target_size { fixed = 1 } @@ -94,16 +94,9 @@ The following arguments are supported: appending a hyphen and a random four-character string to the base instance name. -* `instance_template` - (Deprecated) The - full URL to an instance template from which all new instances - will be created. This field is replaced by `version.instance_template`. You must - specify at least one `version` block with an `instance_template`. - -* `version` - (Optional) Application versions managed by this instance group. Each +* `version` - (Required) Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios. Structure is documented below. - Until `instance_template` is removed this field will be Optional to allow for a - graceful upgrade. In the Beta provider and as of 3.0.0 it will be Required. * `name` - (Required) The name of the instance group manager. Must be 1-63 characters long and comply with @@ -124,9 +117,6 @@ The following arguments are supported: * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. -* `update_strategy` - (Deprecated) This field has been deprecated, use `update_policy` - instead. - * `target_size` - (Optional) The target number of running instances for this managed instance group. This value should always be explicitly set unless this resource is attached to an autoscaler, in which case it should never be set. Defaults to `0`. @@ -150,12 +140,12 @@ group. You can specify only one value. Structure is documented below. For more i The `update_policy` block supports: ```hcl -update_policy{ - type = "PROACTIVE" - minimal_action = "REPLACE" - max_surge_percent = 20 +update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + max_surge_percent = 20 max_unavailable_fixed = 2 - min_ready_sec = 50 + min_ready_sec = 50 } ``` @@ -192,21 +182,23 @@ The `version` block supports: ```hcl version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" - target_size { - fixed = 1 - } + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link + + target_size { + fixed = 1 + } } ``` ```hcl version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" - target_size { - percent = 20 - } + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link + + target_size { + percent = 20 + } } ``` @@ -255,6 +247,7 @@ This resource provides the following Instance group managers can be imported using any of these accepted formats: ``` +$ terraform import google_compute_instance_group_manager.appserver projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}} $ terraform import google_compute_instance_group_manager.appserver {{project}}/{{zone}}/{{name}} $ terraform import google_compute_instance_group_manager.appserver {{project}}/{{name}} $ terraform import google_compute_instance_group_manager.appserver {{name}} diff --git a/website/docs/r/compute_instance_iam.html.markdown b/website/docs/r/compute_instance_iam.html.markdown index a6c692ba0dc..ef455dd3df8 100644 --- a/website/docs/r/compute_instance_iam.html.markdown +++ b/website/docs/r/compute_instance_iam.html.markdown @@ -129,17 +129,17 @@ Compute instance IAM resources can be imported using the resource identifiers, r IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. ``` -$ terraform import google_compute_instance_iam_member.editor "{{instance}} roles/compute.osLogin jane@example.com" +$ terraform import google_compute_instance_iam_member.editor "projects/{{project}}/zones/{{zone}}/instances/{{instance}} roles/compute.osLogin jane@example.com" ``` IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. ``` -$ terraform import google_compute_instance_iam_binding.editor "{{instance}} roles/compute.osLogin" +$ terraform import google_compute_instance_iam_binding.editor "projects/{{project}}/zones/{{zone}}/instances/{{instance}} roles/compute.osLogin" ``` IAM policy imports use the identifier of the resource in question, e.g. ``` -$ terraform import google_compute_instance_iam_policy.editor {{instance}} +$ terraform import google_compute_instance_iam_policy.editor projects/{{project}}/zones/{{zone}}/instances/{{instance}} ``` -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` diff --git a/website/docs/r/compute_instance_template.html.markdown b/website/docs/r/compute_instance_template.html.markdown index c4376272688..c8d00105abc 100644 --- a/website/docs/r/compute_instance_template.html.markdown +++ b/website/docs/r/compute_instance_template.html.markdown @@ -47,7 +47,7 @@ resource "google_compute_instance_template" "default" { // Use an existing disk resource disk { // Instance Templates reference disks by name, not self link - source = "${google_compute_disk.foobar.name}" + source = google_compute_disk.foobar.name auto_delete = false boot = false } @@ -72,7 +72,7 @@ data "google_compute_image" "my_image" { resource "google_compute_disk" "foobar" { name = "existing-disk" - image = "${data.google_compute_image.my_image.self_link}" + image = data.google_compute_image.my_image.self_link size = 10 type = "pd-ssd" zone = "us-central1-a" @@ -112,7 +112,7 @@ resource "google_compute_instance_template" "instance_template" { resource "google_compute_instance_group_manager" "instance_group_manager" { name = "instance-group-manager" - instance_template = "${google_compute_instance_template.instance_template.self_link}" + instance_template = google_compute_instance_template.instance_template.self_link base_instance_name = "instance-group-manager" zone = "us-central1-f" target_size = "1" @@ -153,7 +153,7 @@ resource "google_compute_instance_template" "instance_template" { // boot disk disk { - source_image = "${google_compute_image.my_image.self_link}" + source_image = google_compute_image.my_image.self_link } } ``` @@ -422,10 +422,12 @@ exported: ## Import -Instance templates can be imported using the `name`, e.g. +Instance templates can be imported using any of these accepted formats: ``` -$ terraform import google_compute_instance_template.default appserver-template +$ terraform import google_compute_instance_template.default projects/{{project}}/global/instanceTemplates/{{name}} +$ terraform import google_compute_instance_template.default {{project}}/{{name}} +$ terraform import google_compute_instance_template.default {{name}} ``` [custom-vm-types]: https://cloud.google.com/dataproc/docs/concepts/compute/custom-machine-types diff --git a/website/docs/r/compute_interconnect_attachment.html.markdown b/website/docs/r/compute_interconnect_attachment.html.markdown index 00b8982da42..fd38acbeaa9 100644 --- a/website/docs/r/compute_interconnect_attachment.html.markdown +++ b/website/docs/r/compute_interconnect_attachment.html.markdown @@ -34,12 +34,12 @@ information, see Creating VLAN Attachments. resource "google_compute_interconnect_attachment" "on_prem" { name = "on-prem-attachment" interconnect = "my-interconnect-id" - router = "${google_compute_router.foobar.self_link}" + router = google_compute_router.foobar.self_link } resource "google_compute_router" "foobar" { name = "router" - network = "${google_compute_network.foobar.name}" + network = google_compute_network.foobar.name } ``` diff --git a/website/docs/r/compute_network.html.markdown b/website/docs/r/compute_network.html.markdown index 93deae77520..624d278b4f7 100644 --- a/website/docs/r/compute_network.html.markdown +++ b/website/docs/r/compute_network.html.markdown @@ -69,16 +69,6 @@ The following arguments are supported: An optional description of this resource. The resource must be recreated to modify this field. -* `ipv4_range` - - (Optional, Deprecated) - If this field is specified, a deprecated legacy network is created. - You will no longer be able to create a legacy network on Feb 1, 2020. - See the [legacy network docs](https://cloud.google.com/vpc/docs/legacy) - for more details. - The range of internal addresses that are legal on this legacy network. - This range is a CIDR specification, for example: `192.168.0.0/16`. - The resource must be recreated to modify this field. - * `auto_create_subnetworks` - (Optional) When set to `true`, the network is created in "auto subnet mode" and diff --git a/website/docs/r/compute_network_endpoint.html.markdown b/website/docs/r/compute_network_endpoint.html.markdown index 2850d8c237f..33540c394dd 100644 --- a/website/docs/r/compute_network_endpoint.html.markdown +++ b/website/docs/r/compute_network_endpoint.html.markdown @@ -41,11 +41,11 @@ To get more information about NetworkEndpoint, see: ```hcl resource "google_compute_network_endpoint" "default-endpoint" { - network_endpoint_group = "${google_compute_network_endpoint_group.neg.name}" + network_endpoint_group = google_compute_network_endpoint_group.neg.name - instance = "${google_compute_instance.endpoint-instance.name}" - port = "${google_compute_network_endpoint_group.neg.default_port}" - ip_address = "${google_compute_instance.endpoint-instance.network_interface.0.network_ip}" + instance = google_compute_instance.endpoint-instance.name + port = google_compute_network_endpoint_group.neg.default_port + ip_address = google_compute_instance.endpoint-instance.network_interface[0].network_ip } data "google_compute_image" "my_image" { @@ -54,31 +54,32 @@ data "google_compute_image" "my_image" { } resource "google_compute_instance" "endpoint-instance" { - name = "endpoint-instance" + name = "endpoint-instance" machine_type = "n1-standard-1" boot_disk { - initialize_params{ - image = "${data.google_compute_image.my_image.self_link}" + initialize_params { + image = data.google_compute_image.my_image.self_link } } network_interface { - subnetwork = "${google_compute_subnetwork.default.self_link}" - access_config { } + subnetwork = google_compute_subnetwork.default.self_link + access_config { + } } } resource "google_compute_network_endpoint_group" "group" { name = "my-lb-neg" - network = "${google_compute_network.default.self_link}" - subnetwork = "${google_compute_subnetwork.default.self_link}" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link default_port = "90" zone = "us-central1-a" } resource "google_compute_network" "default" { - name = "neg-network" + name = "neg-network" auto_create_subnetworks = false } @@ -86,7 +87,7 @@ resource "google_compute_subnetwork" "default" { name = "neg-subnetwork" ip_cidr_range = "10.0.0.1/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } ``` diff --git a/website/docs/r/compute_network_endpoint_group.html.markdown b/website/docs/r/compute_network_endpoint_group.html.markdown index 24cf98ba00e..cede3eb7881 100644 --- a/website/docs/r/compute_network_endpoint_group.html.markdown +++ b/website/docs/r/compute_network_endpoint_group.html.markdown @@ -53,14 +53,14 @@ To get more information about NetworkEndpointGroup, see: ```hcl resource "google_compute_network_endpoint_group" "neg" { name = "my-lb-neg" - network = "${google_compute_network.default.self_link}" - subnetwork = "${google_compute_subnetwork.default.self_link}" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link default_port = "90" zone = "us-central1-a" } resource "google_compute_network" "default" { - name = "neg-network" + name = "neg-network" auto_create_subnetworks = false } @@ -68,7 +68,7 @@ resource "google_compute_subnetwork" "default" { name = "neg-subnetwork" ip_cidr_range = "10.0.0.0/16" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } ``` diff --git a/website/docs/r/compute_network_peering.html.markdown b/website/docs/r/compute_network_peering.html.markdown index c8d0fdb20a3..3357a38aedc 100644 --- a/website/docs/r/compute_network_peering.html.markdown +++ b/website/docs/r/compute_network_peering.html.markdown @@ -22,15 +22,15 @@ and ```hcl resource "google_compute_network_peering" "peering1" { - name = "peering1" - network = "${google_compute_network.default.self_link}" - peer_network = "${google_compute_network.other.self_link}" + name = "peering1" + network = google_compute_network.default.self_link + peer_network = google_compute_network.other.self_link } resource "google_compute_network_peering" "peering2" { - name = "peering2" - network = "${google_compute_network.other.self_link}" - peer_network = "${google_compute_network.default.self_link}" + name = "peering2" + network = google_compute_network.other.self_link + peer_network = google_compute_network.default.self_link } resource "google_compute_network" "default" { @@ -54,9 +54,6 @@ The following arguments are supported: * `peer_network` - (Required) Resource link of the peer network. -* `auto_create_routes` - (Optional) If set to `true`, the routes between the two networks will - be created and managed automatically. Defaults to `true`. - ## Attributes Reference In addition to the arguments listed above, the following computed attributes are diff --git a/website/docs/r/compute_node_group.html.markdown b/website/docs/r/compute_node_group.html.markdown index 594419e8fc2..f5d44c47b4d 100644 --- a/website/docs/r/compute_node_group.html.markdown +++ b/website/docs/r/compute_node_group.html.markdown @@ -50,18 +50,18 @@ data "google_compute_node_types" "central1a" { } resource "google_compute_node_template" "soletenant-tmpl" { - name = "soletenant-tmpl" - region = "us-central1" - node_type = "${data.google_compute_node_types.central1a.names[0]}" + name = "soletenant-tmpl" + region = "us-central1" + node_type = data.google_compute_node_types.central1a.names[0] } resource "google_compute_node_group" "nodes" { - name = "soletenant-group" - zone = "us-central1-a" + name = "soletenant-group" + zone = "us-central1-a" description = "example google_compute_node_group for Terraform Google Provider" - size = 1 - node_template = "${google_compute_node_template.soletenant-tmpl.self_link}" + size = 1 + node_template = google_compute_node_template.soletenant-tmpl.self_link } ``` diff --git a/website/docs/r/compute_node_template.html.markdown b/website/docs/r/compute_node_template.html.markdown index 4af59d372b9..da01e1f0cfe 100644 --- a/website/docs/r/compute_node_template.html.markdown +++ b/website/docs/r/compute_node_template.html.markdown @@ -47,9 +47,9 @@ data "google_compute_node_types" "central1a" { } resource "google_compute_node_template" "template" { - name = "soletenant-tmpl" - region = "us-central1" - node_type = "${data.google_compute_node_types.central1a.names[0]}" + name = "soletenant-tmpl" + region = "us-central1" + node_type = data.google_compute_node_types.central1a.names[0] } ```
@@ -67,16 +67,16 @@ provider "google-beta" { } data "google_compute_node_types" "central1a" { - provider = "google-beta" - zone = "us-central1-a" + provider = google-beta + zone = "us-central1-a" } resource "google_compute_node_template" "template" { - provider = "google-beta" + provider = google-beta - name = "soletenant-with-licenses" - region = "us-central1" - node_type = "${data.google_compute_node_types.central1a.names[0]}" + name = "soletenant-with-licenses" + region = "us-central1" + node_type = data.google_compute_node_types.central1a.names[0] node_affinity_labels = { foo = "baz" diff --git a/website/docs/r/compute_project_metadata_item.html.markdown b/website/docs/r/compute_project_metadata_item.html.markdown index d3f92a6a997..b82d909e552 100644 --- a/website/docs/r/compute_project_metadata_item.html.markdown +++ b/website/docs/r/compute_project_metadata_item.html.markdown @@ -18,7 +18,7 @@ project metadata map. ```hcl resource "google_compute_project_metadata_item" "default" { - key = "my_metadata" + key = "my_metadata" value = "my_value" } ``` diff --git a/website/docs/r/compute_region_autoscaler.html.markdown b/website/docs/r/compute_region_autoscaler.html.markdown index 4b03215fb8e..d4b38606172 100644 --- a/website/docs/r/compute_region_autoscaler.html.markdown +++ b/website/docs/r/compute_region_autoscaler.html.markdown @@ -45,11 +45,11 @@ To get more information about RegionAutoscaler, see: ```hcl resource "google_compute_region_autoscaler" "foobar" { - provider = "google-beta" + provider = google-beta name = "my-region-autoscaler" region = "us-central1" - target = "${google_compute_region_instance_group_manager.foobar.self_link}" + target = google_compute_region_instance_group_manager.foobar.self_link autoscaling_policy { max_replicas = 5 @@ -63,7 +63,7 @@ resource "google_compute_region_autoscaler" "foobar" { } resource "google_compute_instance_template" "foobar" { - provider = "google-beta" + provider = google-beta name = "my-instance-template" machine_type = "n1-standard-1" @@ -72,7 +72,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -89,34 +89,34 @@ resource "google_compute_instance_template" "foobar" { } resource "google_compute_target_pool" "foobar" { - provider = "google-beta" + provider = google-beta name = "my-target-pool" } resource "google_compute_region_instance_group_manager" "foobar" { - provider = "google-beta" + provider = google-beta name = "my-region-igm" region = "us-central1" version { - instance_template = "${google_compute_instance_template.foobar.self_link}" - name = "primary" + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" } - target_pools = ["${google_compute_target_pool.foobar.self_link}"] + target_pools = [google_compute_target_pool.foobar.self_link] base_instance_name = "foobar" } data "google_compute_image" "debian_9" { - provider = "google-beta" + provider = google-beta - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } @@ -133,7 +133,7 @@ provider "google-beta"{ resource "google_compute_region_autoscaler" "foobar" { name = "my-region-autoscaler" region = "us-central1" - target = "${google_compute_region_instance_group_manager.foobar.self_link}" + target = google_compute_region_instance_group_manager.foobar.self_link autoscaling_policy { max_replicas = 5 @@ -154,7 +154,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "${data.google_compute_image.debian_9.self_link}" + source_image = data.google_compute_image.debian_9.self_link } network_interface { @@ -179,17 +179,17 @@ resource "google_compute_region_instance_group_manager" "foobar" { region = "us-central1" version { - instance_template = "${google_compute_instance_template.foobar.self_link}" + instance_template = google_compute_instance_template.foobar.self_link name = "primary" } - target_pools = ["${google_compute_target_pool.foobar.self_link}"] + target_pools = [google_compute_target_pool.foobar.self_link] base_instance_name = "foobar" } data "google_compute_image" "debian_9" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } ``` diff --git a/website/docs/r/compute_region_backend_service.html.markdown b/website/docs/r/compute_region_backend_service.html.markdown index 85dd587312c..744a722d6bb 100644 --- a/website/docs/r/compute_region_backend_service.html.markdown +++ b/website/docs/r/compute_region_backend_service.html.markdown @@ -45,7 +45,7 @@ To get more information about RegionBackendService, see: resource "google_compute_region_backend_service" "default" { name = "region-backend-service" region = "us-central1" - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] connection_draining_timeout_sec = 10 session_affinity = "CLIENT_IP" } @@ -85,7 +85,7 @@ resource "google_compute_health_check" "health_check" { name = "health-check" http_health_check { - + port = 80 } } ``` @@ -130,7 +130,7 @@ resource "google_compute_health_check" "health_check" { name = "health-check" http_health_check { - + port = 80 } } ``` diff --git a/website/docs/r/compute_region_disk.html.markdown b/website/docs/r/compute_region_disk.html.markdown index 2cddbdac726..d11b0d177ad 100644 --- a/website/docs/r/compute_region_disk.html.markdown +++ b/website/docs/r/compute_region_disk.html.markdown @@ -60,27 +60,27 @@ state as plain-text. ```hcl resource "google_compute_region_disk" "regiondisk" { - name = "my-region-disk" - snapshot = "${google_compute_snapshot.snapdisk.self_link}" - type = "pd-ssd" - region = "us-central1" + name = "my-region-disk" + snapshot = google_compute_snapshot.snapdisk.self_link + type = "pd-ssd" + region = "us-central1" physical_block_size_bytes = 4096 replica_zones = ["us-central1-a", "us-central1-f"] } resource "google_compute_disk" "disk" { - name = "my-disk" + name = "my-disk" image = "debian-cloud/debian-9" - size = 50 - type = "pd-ssd" - zone = "us-central1-a" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" } resource "google_compute_snapshot" "snapdisk" { - name = "my-snapshot" - source_disk = "${google_compute_disk.disk.name}" - zone = "us-central1-a" + name = "my-snapshot" + source_disk = google_compute_disk.disk.name + zone = "us-central1-a" } ``` diff --git a/website/docs/r/compute_region_instance_group_manager.html.markdown b/website/docs/r/compute_region_instance_group_manager.html.markdown index 16e1d4ecdab..6ab67dcc857 100644 --- a/website/docs/r/compute_region_instance_group_manager.html.markdown +++ b/website/docs/r/compute_region_instance_group_manager.html.markdown @@ -24,7 +24,7 @@ resource "google_compute_health_check" "autohealing" { check_interval_sec = 5 timeout_sec = 5 healthy_threshold = 2 - unhealthy_threshold = 10 # 50 seconds + unhealthy_threshold = 10 # 50 seconds http_health_check { request_path = "/healthz" @@ -36,14 +36,14 @@ resource "google_compute_region_instance_group_manager" "appserver" { name = "appserver-igm" base_instance_name = "app" + region = "us-central1" + distribution_policy_zones = ["us-central1-a", "us-central1-f"] version { - instance_template = "${google_compute_instance_template.appserver.self_link}" + instance_template = google_compute_instance_template.appserver.self_link } - region = "us-central1" - distribution_policy_zones = ["us-central1-a", "us-central1-f"] - target_pools = ["${google_compute_target_pool.appserver.self_link}"] + target_pools = [google_compute_target_pool.appserver.self_link] target_size = 2 named_port { @@ -52,11 +52,10 @@ resource "google_compute_region_instance_group_manager" "appserver" { } auto_healing_policies { - health_check = "${google_compute_health_check.autohealing.self_link}" + health_check = google_compute_health_check.autohealing.self_link initial_delay_sec = 300 } } - ``` ## Example Usage with multiple versions @@ -67,14 +66,14 @@ resource "google_compute_region_instance_group_manager" "appserver" { base_instance_name = "app" region = "us-central1" - target_size = 5 + target_size = 5 version { - instance_template = "${google_compute_instance_template.appserver.self_link}" + instance_template = google_compute_instance_template.appserver.self_link } version { - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" + instance_template = google_compute_instance_template.appserver-canary.self_link target_size { fixed = 1 } @@ -93,16 +92,9 @@ The following arguments are supported: appending a hyphen and a random four-character string to the base instance name. -* `instance_template` - (Deprecated) The - full URL to an instance template from which all new instances - will be created. This field is replaced by `version.instance_template`. You must - specify at least one `version` block with an `instance_template`. - -* `version` - (Optional) Application versions managed by this instance group. Each +* `version` - (Required) Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios. Structure is documented below. - Until `instance_template` is removed this field will be Optional to allow for a - graceful upgrade. In the Beta provider and as of 3.0.0 it will be Required. * `name` - (Required) The name of the instance group manager. Must be 1-63 characters long and comply with @@ -150,13 +142,13 @@ group. You can specify one or more values. For more information, see the [offici The `update_policy` block supports: ```hcl -update_policy{ - type = "PROACTIVE" +update_policy { + type = "PROACTIVE" instance_redistribution_type = "PROACTIVE" - minimal_action = "REPLACE" - max_surge_percent = 20 - max_unavailable_fixed = 2 - min_ready_sec = 50 + minimal_action = "REPLACE" + max_surge_percent = 20 + max_unavailable_fixed = 2 + min_ready_sec = 50 } ``` @@ -195,21 +187,23 @@ The `version` block supports: ```hcl version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" - target_size { - fixed = 1 - } + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link + + target_size { + fixed = 1 + } } ``` ```hcl version { - name = "appserver-canary" - instance_template = "${google_compute_instance_template.appserver-canary.self_link}" - target_size { - percent = 20 - } + name = "appserver-canary" + instance_template = google_compute_instance_template.appserver-canary.self_link + + target_size { + percent = 20 + } } ``` diff --git a/website/docs/r/compute_reservation.html.markdown b/website/docs/r/compute_reservation.html.markdown index 98da578395b..caf0eb9d5b4 100644 --- a/website/docs/r/compute_reservation.html.markdown +++ b/website/docs/r/compute_reservation.html.markdown @@ -55,7 +55,7 @@ resource "google_compute_reservation" "gce_reservation" { count = 1 instance_properties { min_cpu_platform = "Intel Cascade Lake" - machine_type = "n2-standard-2" + machine_type = "n2-standard-2" } } } diff --git a/website/docs/r/compute_resource_policy.html.markdown b/website/docs/r/compute_resource_policy.html.markdown index d41aae970be..b704b5c8cd4 100644 --- a/website/docs/r/compute_resource_policy.html.markdown +++ b/website/docs/r/compute_resource_policy.html.markdown @@ -36,13 +36,13 @@ A policy that can be attached to a resource to specify or schedule actions on th ```hcl resource "google_compute_resource_policy" "foo" { - name = "policy" + name = "policy" region = "us-central1" snapshot_schedule_policy { schedule { daily_schedule { days_in_cycle = 1 - start_time = "04:00" + start_time = "04:00" } } } @@ -58,17 +58,17 @@ resource "google_compute_resource_policy" "foo" { ```hcl resource "google_compute_resource_policy" "bar" { - name = "policy" + name = "policy" region = "us-central1" snapshot_schedule_policy { schedule { hourly_schedule { hours_in_cycle = 20 - start_time = "23:00" + start_time = "23:00" } } retention_policy { - max_retention_days = 10 + max_retention_days = 10 on_source_disk_delete = "KEEP_AUTO_SNAPSHOTS" } snapshot_properties { @@ -76,7 +76,7 @@ resource "google_compute_resource_policy" "bar" { my_label = "value" } storage_locations = ["us"] - guest_flush = true + guest_flush = true } } } diff --git a/website/docs/r/compute_route.html.markdown b/website/docs/r/compute_route.html.markdown index ad70668c5be..3e657e2500e 100644 --- a/website/docs/r/compute_route.html.markdown +++ b/website/docs/r/compute_route.html.markdown @@ -42,7 +42,8 @@ Engine-operated gateway. Packets that do not match any route in the sending virtual machine's routing table will be dropped. A Route resource must have exactly one specification of either -nextHopGateway, nextHopInstance, nextHopIp, or nextHopVpnTunnel. +nextHopGateway, nextHopInstance, nextHopIp, nextHopVpnTunnel, or +nextHopIlb. To get more information about Route, see: @@ -63,7 +64,7 @@ To get more information about Route, see: resource "google_compute_route" "default" { name = "network-route" dest_range = "15.0.0.0/24" - network = "${google_compute_network.default.name}" + network = google_compute_network.default.name next_hop_ip = "10.132.1.5" priority = 100 } @@ -82,21 +83,21 @@ resource "google_compute_network" "default" { ```hcl resource "google_compute_network" "default" { - provider = "google-beta" + provider = google-beta name = "compute-network" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - provider = "google-beta" + provider = google-beta name = "compute-subnet" ip_cidr_range = "10.0.1.0/24" region = "us-central1" - network = "${google_compute_network.default.self_link}" + network = google_compute_network.default.self_link } resource "google_compute_health_check" "hc" { - provider = "google-beta" + provider = google-beta name = "proxy-health-check" check_interval_sec = 1 timeout_sec = 1 @@ -107,30 +108,30 @@ resource "google_compute_health_check" "hc" { } resource "google_compute_region_backend_service" "backend" { - provider = "google-beta" - name = "compute-backend" - region = "us-central1" - health_checks = ["${google_compute_health_check.hc.self_link}"] + provider = google-beta + name = "compute-backend" + region = "us-central1" + health_checks = [google_compute_health_check.hc.self_link] } resource "google_compute_forwarding_rule" "default" { - provider = "google-beta" - name = "compute-forwarding-rule" - region = "us-central1" + provider = google-beta + name = "compute-forwarding-rule" + region = "us-central1" load_balancing_scheme = "INTERNAL" - backend_service = "${google_compute_region_backend_service.backend.self_link}" + backend_service = google_compute_region_backend_service.backend.self_link all_ports = true - network = "${google_compute_network.default.name}" - subnetwork = "${google_compute_subnetwork.default.name}" + network = google_compute_network.default.name + subnetwork = google_compute_subnetwork.default.name } resource "google_compute_route" "route-ilb-beta" { - provider = "google-beta" + provider = google-beta name = "route-ilb-beta" dest_range = "0.0.0.0/0" - network = "${google_compute_network.default.name}" - next_hop_ilb = "${google_compute_forwarding_rule.default.self_link}" + network = google_compute_network.default.name + next_hop_ilb = google_compute_forwarding_rule.default.self_link priority = 2000 } ``` diff --git a/website/docs/r/compute_router.html.markdown b/website/docs/r/compute_router.html.markdown index 6c3f17e7219..ba592343f62 100644 --- a/website/docs/r/compute_router.html.markdown +++ b/website/docs/r/compute_router.html.markdown @@ -42,7 +42,7 @@ To get more information about Router, see: ```hcl resource "google_compute_router" "foobar" { name = "my-router" - network = "${google_compute_network.foobar.name}" + network = google_compute_network.foobar.name bgp { asn = 64514 advertise_mode = "CUSTOM" @@ -57,7 +57,7 @@ resource "google_compute_router" "foobar" { } resource "google_compute_network" "foobar" { - name = "my-network" + name = "my-network" auto_create_subnetworks = false } ``` @@ -135,7 +135,7 @@ The `bgp` block supports: The `advertised_ip_ranges` block supports: * `range` - - (Optional) + (Required) The IP range to advertise. The value must be a CIDR-formatted string. diff --git a/website/docs/r/compute_router_nat.html.markdown b/website/docs/r/compute_router_nat.html.markdown index 99eff32775b..397495cc727 100644 --- a/website/docs/r/compute_router_nat.html.markdown +++ b/website/docs/r/compute_router_nat.html.markdown @@ -36,37 +36,37 @@ To get more information about RouterNat, see: ```hcl resource "google_compute_network" "net" { - name = "my-network" + name = "my-network" } resource "google_compute_subnetwork" "subnet" { - name = "my-subnetwork" - network = google_compute_network.net.self_link - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" + name = "my-subnetwork" + network = google_compute_network.net.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" } -resource "google_compute_router" "router"{ - name = "my-router" - region = google_compute_subnetwork.subnet.region - network = google_compute_network.net.self_link +resource "google_compute_router" "router" { + name = "my-router" + region = google_compute_subnetwork.subnet.region + network = google_compute_network.net.self_link - bgp { - asn = 64514 - } + bgp { + asn = 64514 + } } resource "google_compute_router_nat" "nat" { - name = "my-router-nat" - router = google_compute_router.router.name - region = google_compute_router.router.region - nat_ip_allocate_option = "AUTO_ONLY" - source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" - - log_config { - enable = true - filter = "ERRORS_ONLY" - } + name = "my-router-nat" + router = google_compute_router.router.name + region = google_compute_router.router.region + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" + + log_config { + enable = true + filter = "ERRORS_ONLY" + } } ``` ## Example Usage - Router Nat Manual Ips @@ -74,41 +74,41 @@ resource "google_compute_router_nat" "nat" { ```hcl resource "google_compute_network" "net" { - name = "my-network" + name = "my-network" } resource "google_compute_subnetwork" "subnet" { - name = "my-subnetwork" - network = google_compute_network.net.self_link - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" + name = "my-subnetwork" + network = google_compute_network.net.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" } -resource "google_compute_router" "router"{ - name = "my-router" - region = google_compute_subnetwork.subnet.region - network = google_compute_network.net.self_link +resource "google_compute_router" "router" { + name = "my-router" + region = google_compute_subnetwork.subnet.region + network = google_compute_network.net.self_link } resource "google_compute_address" "address" { - count = 2 - name = "nat-manual-ip-${count.index}" - region = google_compute_subnetwork.subnet.region + count = 2 + name = "nat-manual-ip-${count.index}" + region = google_compute_subnetwork.subnet.region } resource "google_compute_router_nat" "nat_manual" { - name = "my-router-nat" - router = google_compute_router.router.name - region = google_compute_router.router.region - - nat_ip_allocate_option = "MANUAL_ONLY" - nat_ips = google_compute_address.address[*].self_link - - source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" - subnetwork { - name = google_compute_subnetwork.default.self_link - source_ip_ranges_to_nat = ["ALL_IP_RANGES"] - } + name = "my-router-nat" + router = google_compute_router.router.name + region = google_compute_router.router.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = google_compute_address.address.*.self_link + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.default.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } } ``` diff --git a/website/docs/r/compute_security_policy.html.markdown b/website/docs/r/compute_security_policy.html.markdown index 778bd199eda..88ea7b09f20 100644 --- a/website/docs/r/compute_security_policy.html.markdown +++ b/website/docs/r/compute_security_policy.html.markdown @@ -106,8 +106,10 @@ exported: ## Import -Security policies can be imported using the `name`, e.g. +Security policies can be imported using any of the following formats ``` -$ terraform import google_compute_security_policy.policy my-policy +$ terraform import google_compute_security_policy.policy projects/{{project}}/global/securityPolicies/{{name}} +$ terraform import google_compute_security_policy.policy {{project}}/{{name}} +$ terraform import google_compute_security_policy.policy {{name}} ``` diff --git a/website/docs/r/compute_shared_vpc_host_project.html.markdown b/website/docs/r/compute_shared_vpc_host_project.html.markdown index cbefbe3e9b8..6a5a56f5cfa 100644 --- a/website/docs/r/compute_shared_vpc_host_project.html.markdown +++ b/website/docs/r/compute_shared_vpc_host_project.html.markdown @@ -28,11 +28,12 @@ resource "google_compute_shared_vpc_host_project" "host" { # A service project gains access to network resources provided by its # associated host project. resource "google_compute_shared_vpc_service_project" "service1" { - host_project = "${google_compute_shared_vpc_host_project.host.project}" + host_project = google_compute_shared_vpc_host_project.host.project service_project = "service-project-id-1" } + resource "google_compute_shared_vpc_service_project" "service2" { - host_project = "${google_compute_shared_vpc_host_project.host.project}" + host_project = google_compute_shared_vpc_host_project.host.project service_project = "service-project-id-2" } ``` diff --git a/website/docs/r/compute_snapshot.html.markdown b/website/docs/r/compute_snapshot.html.markdown index 43b825f92bd..2793b3b2b4a 100644 --- a/website/docs/r/compute_snapshot.html.markdown +++ b/website/docs/r/compute_snapshot.html.markdown @@ -52,25 +52,25 @@ To get more information about Snapshot, see: ```hcl resource "google_compute_snapshot" "snapshot" { - name = "my-snapshot" - source_disk = "${google_compute_disk.persistent.name}" - zone = "us-central1-a" - labels = { - my_label = "value" - } + name = "my-snapshot" + source_disk = google_compute_disk.persistent.name + zone = "us-central1-a" + labels = { + my_label = "value" + } } data "google_compute_image" "debian" { - family = "debian-9" - project = "debian-cloud" + family = "debian-9" + project = "debian-cloud" } resource "google_compute_disk" "persistent" { - name = "debian-disk" - image = "${data.google_compute_image.debian.self_link}" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" + name = "debian-disk" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" } ``` @@ -127,7 +127,7 @@ The following arguments are supported: The `snapshot_encryption_key` block supports: * `raw_key` - - (Optional) + (Required) Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. diff --git a/website/docs/r/compute_ssl_certificate.html.markdown b/website/docs/r/compute_ssl_certificate.html.markdown index 11e06628be9..0be15bb85bc 100644 --- a/website/docs/r/compute_ssl_certificate.html.markdown +++ b/website/docs/r/compute_ssl_certificate.html.markdown @@ -45,8 +45,8 @@ To get more information about SslCertificate, see: resource "google_compute_ssl_certificate" "default" { name_prefix = "my-certificate-" description = "a description" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true @@ -66,9 +66,9 @@ resource "google_compute_ssl_certificate" "default" { resource "google_compute_ssl_certificate" "default" { # The name will contain 8 random hex digits, # e.g. "my-certificate-48ab27cd2a" - name = "${random_id.certificate.hex}" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + name = random_id.certificate.hex + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true @@ -81,8 +81,8 @@ resource "random_id" "certificate" { # For security, do not expose raw certificate values in the output keepers = { - private_key = "${base64sha256(file("path/to/private.key"))}" - certificate = "${base64sha256(file("path/to/certificate.crt"))}" + private_key = filebase64sha256("path/to/private.key") + certificate = filebase64sha256("path/to/certificate.crt") } } ``` @@ -107,8 +107,8 @@ resource "random_id" "certificate" { resource "google_compute_ssl_certificate" "default" { name_prefix = "my-certificate-" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") lifecycle { create_before_destroy = true @@ -117,15 +117,15 @@ resource "google_compute_ssl_certificate" "default" { resource "google_compute_target_https_proxy" "default" { name = "test-proxy" - url_map = "${google_compute_url_map.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + url_map = google_compute_url_map.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_url_map" "default" { name = "url-map" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -134,11 +134,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -149,7 +149,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/website/docs/r/compute_subnetwork.html.markdown b/website/docs/r/compute_subnetwork.html.markdown index 57ae263774b..84c10cceedb 100644 --- a/website/docs/r/compute_subnetwork.html.markdown +++ b/website/docs/r/compute_subnetwork.html.markdown @@ -67,7 +67,7 @@ resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" name = "test-subnetwork" ip_cidr_range = "10.2.0.0/16" region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" + network = google_compute_network.custom-test.self_link secondary_ip_range { range_name = "tf-test-secondary-range-update1" ip_cidr_range = "192.168.10.0/24" @@ -92,7 +92,7 @@ resource "google_compute_subnetwork" "subnet-with-logging" { name = "log-test-subnetwork" ip_cidr_range = "10.2.0.0/16" region = "us-central1" - network = "${google_compute_network.custom-test.self_link}" + network = google_compute_network.custom-test.self_link log_config { aggregation_interval = "INTERVAL_10_MIN" @@ -116,18 +116,18 @@ resource "google_compute_network" "custom-test" { ```hcl resource "google_compute_subnetwork" "network-for-l7lb" { - provider = "google-beta" + provider = google-beta name = "l7lb-test-subnetwork" ip_cidr_range = "10.0.0.0/22" region = "us-central1" purpose = "INTERNAL_HTTPS_LOAD_BALANCER" role = "ACTIVE" - network = "${google_compute_network.custom-test.self_link}" + network = google_compute_network.custom-test.self_link } resource "google_compute_network" "custom-test" { - provider = "google-beta" + provider = google-beta name = "l7lb-test-network" auto_create_subnetworks = false @@ -171,10 +171,6 @@ The following arguments are supported: you create the resource. This field can be set only at resource creation time. -* `enable_flow_logs` - - (Optional, Deprecated) - Whether to enable flow logging for this subnetwork. - * `secondary_ip_range` - (Optional) An array of configurations for secondary IP ranges for VM instances @@ -197,7 +193,8 @@ The following arguments are supported: * `log_config` - (Optional) Denotes the logging options for the subnetwork flow logs. If logging is enabled - logs will be exported to Stackdriver. Structure is documented below. + logs will be exported to Stackdriver. This field cannot be set if the `purpose` of this + subnetwork is `INTERNAL_HTTPS_LOAD_BALANCER` Structure is documented below. * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. diff --git a/website/docs/r/compute_target_http_proxy.html.markdown b/website/docs/r/compute_target_http_proxy.html.markdown index 2998e439313..49f3175c3a0 100644 --- a/website/docs/r/compute_target_http_proxy.html.markdown +++ b/website/docs/r/compute_target_http_proxy.html.markdown @@ -43,13 +43,13 @@ To get more information about TargetHttpProxy, see: ```hcl resource "google_compute_target_http_proxy" "default" { - name = "test-proxy" - url_map = "${google_compute_url_map.default.self_link}" + name = "test-proxy" + url_map = google_compute_url_map.default.self_link } resource "google_compute_url_map" "default" { - name = "url-map" - default_service = "${google_compute_backend_service.default.self_link}" + name = "url-map" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -58,11 +58,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -73,7 +73,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/website/docs/r/compute_target_https_proxy.html.markdown b/website/docs/r/compute_target_https_proxy.html.markdown index 9c38b4976f6..1a7d41410c3 100644 --- a/website/docs/r/compute_target_https_proxy.html.markdown +++ b/website/docs/r/compute_target_https_proxy.html.markdown @@ -44,21 +44,21 @@ To get more information about TargetHttpsProxy, see: ```hcl resource "google_compute_target_https_proxy" "default" { name = "test-proxy" - url_map = "${google_compute_url_map.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + url_map = google_compute_url_map.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_ssl_certificate" "default" { name = "my-certificate" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") } resource "google_compute_url_map" "default" { name = "url-map" description = "a description" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link host_rule { hosts = ["mysite.com"] @@ -67,11 +67,11 @@ resource "google_compute_url_map" "default" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.default.self_link}" + default_service = google_compute_backend_service.default.self_link path_rule { paths = ["/*"] - service = "${google_compute_backend_service.default.self_link}" + service = google_compute_backend_service.default.self_link } } } @@ -82,7 +82,7 @@ resource "google_compute_backend_service" "default" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { diff --git a/website/docs/r/compute_target_instance.html.markdown b/website/docs/r/compute_target_instance.html.markdown index b832ff3c6bb..a1e0ca72842 100644 --- a/website/docs/r/compute_target_instance.html.markdown +++ b/website/docs/r/compute_target_instance.html.markdown @@ -47,8 +47,8 @@ To get more information about TargetInstance, see: ```hcl resource "google_compute_target_instance" "default" { - name = "target" - instance = "${google_compute_instance.target-vm.self_link}" + name = "target" + instance = google_compute_instance.target-vm.self_link } data "google_compute_image" "vmimage" { @@ -62,8 +62,8 @@ resource "google_compute_instance" "target-vm" { zone = "us-central1-a" boot_disk { - initialize_params{ - image = "${data.google_compute_image.vmimage.self_link}" + initialize_params { + image = data.google_compute_image.vmimage.self_link } } diff --git a/website/docs/r/compute_target_pool.html.markdown b/website/docs/r/compute_target_pool.html.markdown index 4d7a7453385..47119d23d41 100644 --- a/website/docs/r/compute_target_pool.html.markdown +++ b/website/docs/r/compute_target_pool.html.markdown @@ -28,7 +28,7 @@ resource "google_compute_target_pool" "default" { ] health_checks = [ - "${google_compute_http_health_check.default.name}", + google_compute_http_health_check.default.name, ] } @@ -85,8 +85,11 @@ exported: ## Import -Target pools can be imported using the `name`, e.g. +Target pools can be imported using any of the following formats: ``` -$ terraform import google_compute_target_pool.default instance-pool +$ terraform import google_compute_target_pool.default projects/{{project}}/regions/{{region}}/targetPools/{{name}} +$ terraform import google_compute_target_pool.default {{project}}/{{region}}/{{name}} +$ terraform import google_compute_target_pool.default {{region}}/{{name}} +$ terraform import google_compute_target_pool.default {{name}} ``` diff --git a/website/docs/r/compute_target_ssl_proxy.html.markdown b/website/docs/r/compute_target_ssl_proxy.html.markdown index 2bafca006e8..ae95f1b4c05 100644 --- a/website/docs/r/compute_target_ssl_proxy.html.markdown +++ b/website/docs/r/compute_target_ssl_proxy.html.markdown @@ -46,20 +46,20 @@ To get more information about TargetSslProxy, see: ```hcl resource "google_compute_target_ssl_proxy" "default" { name = "test-proxy" - backend_service = "${google_compute_backend_service.default.self_link}" - ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"] + backend_service = google_compute_backend_service.default.self_link + ssl_certificates = [google_compute_ssl_certificate.default.self_link] } resource "google_compute_ssl_certificate" "default" { name = "default-cert" - private_key = "${file("path/to/private.key")}" - certificate = "${file("path/to/certificate.crt")}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") } resource "google_compute_backend_service" "default" { name = "backend-service" protocol = "SSL" - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] } resource "google_compute_health_check" "default" { diff --git a/website/docs/r/compute_target_tcp_proxy.html.markdown b/website/docs/r/compute_target_tcp_proxy.html.markdown index 34d55abe099..d36338ab80d 100644 --- a/website/docs/r/compute_target_tcp_proxy.html.markdown +++ b/website/docs/r/compute_target_tcp_proxy.html.markdown @@ -46,15 +46,15 @@ To get more information about TargetTcpProxy, see: ```hcl resource "google_compute_target_tcp_proxy" "default" { name = "test-proxy" - backend_service = "${google_compute_backend_service.default.self_link}" + backend_service = google_compute_backend_service.default.self_link } resource "google_compute_backend_service" "default" { - name = "backend-service" - protocol = "TCP" - timeout_sec = 10 + name = "backend-service" + protocol = "TCP" + timeout_sec = 10 - health_checks = ["${google_compute_health_check.default.self_link}"] + health_checks = [google_compute_health_check.default.self_link] } resource "google_compute_health_check" "default" { diff --git a/website/docs/r/compute_url_map.html.markdown b/website/docs/r/compute_url_map.html.markdown index 6429659fc70..0c19607dbd6 100644 --- a/website/docs/r/compute_url_map.html.markdown +++ b/website/docs/r/compute_url_map.html.markdown @@ -41,7 +41,7 @@ resource "google_compute_url_map" "urlmap" { name = "urlmap" description = "a description" - default_service = "${google_compute_backend_service.home.self_link}" + default_service = google_compute_backend_service.home.self_link host_rule { hosts = ["mysite.com"] @@ -50,26 +50,26 @@ resource "google_compute_url_map" "urlmap" { path_matcher { name = "allpaths" - default_service = "${google_compute_backend_service.home.self_link}" + default_service = google_compute_backend_service.home.self_link path_rule { paths = ["/home"] - service = "${google_compute_backend_service.home.self_link}" + service = google_compute_backend_service.home.self_link } path_rule { paths = ["/login"] - service = "${google_compute_backend_service.login.self_link}" + service = google_compute_backend_service.login.self_link } path_rule { paths = ["/static"] - service = "${google_compute_backend_bucket.static.self_link}" + service = google_compute_backend_bucket.static.self_link } } test { - service = "${google_compute_backend_service.home.self_link}" + service = google_compute_backend_service.home.self_link host = "hi.com" path = "/home" } @@ -81,7 +81,7 @@ resource "google_compute_backend_service" "login" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_backend_service" "home" { @@ -90,7 +90,7 @@ resource "google_compute_backend_service" "home" { protocol = "HTTP" timeout_sec = 10 - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = [google_compute_http_health_check.default.self_link] } resource "google_compute_http_health_check" "default" { @@ -102,7 +102,7 @@ resource "google_compute_http_health_check" "default" { resource "google_compute_backend_bucket" "static" { name = "static-asset-backend-bucket" - bucket_name = "${google_storage_bucket.static.name}" + bucket_name = google_storage_bucket.static.name enable_cdn = true } diff --git a/website/docs/r/compute_vpn_gateway.html.markdown b/website/docs/r/compute_vpn_gateway.html.markdown index a8475220c0d..1ff467eb379 100644 --- a/website/docs/r/compute_vpn_gateway.html.markdown +++ b/website/docs/r/compute_vpn_gateway.html.markdown @@ -41,38 +41,38 @@ To get more information about VpnGateway, see: ```hcl resource "google_compute_vpn_gateway" "target_gateway" { name = "vpn1" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - name = "network1" + name = "network1" } resource "google_compute_address" "vpn_static_ip" { - name = "vpn-static-ip" + name = "vpn-static-ip" } resource "google_compute_forwarding_rule" "fr_esp" { name = "fr-esp" ip_protocol = "ESP" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp500" { name = "fr-udp500" ip_protocol = "UDP" port_range = "500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp4500" { name = "fr-udp4500" ip_protocol = "UDP" port_range = "4500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_vpn_tunnel" "tunnel1" { @@ -80,22 +80,22 @@ resource "google_compute_vpn_tunnel" "tunnel1" { peer_ip = "15.0.0.120" shared_secret = "a secret message" - target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}" + target_vpn_gateway = google_compute_vpn_gateway.target_gateway.self_link depends_on = [ - "google_compute_forwarding_rule.fr_esp", - "google_compute_forwarding_rule.fr_udp500", - "google_compute_forwarding_rule.fr_udp4500", + google_compute_forwarding_rule.fr_esp, + google_compute_forwarding_rule.fr_udp500, + google_compute_forwarding_rule.fr_udp4500, ] } resource "google_compute_route" "route1" { name = "route1" - network = "${google_compute_network.network1.name}" + network = google_compute_network.network1.name dest_range = "15.0.0.0/24" priority = 1000 - next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}" + next_hop_vpn_tunnel = google_compute_vpn_tunnel.tunnel1.self_link } ``` diff --git a/website/docs/r/compute_vpn_tunnel.html.markdown b/website/docs/r/compute_vpn_tunnel.html.markdown index 202ab664f72..490bb7a8f67 100644 --- a/website/docs/r/compute_vpn_tunnel.html.markdown +++ b/website/docs/r/compute_vpn_tunnel.html.markdown @@ -50,58 +50,58 @@ resource "google_compute_vpn_tunnel" "tunnel1" { peer_ip = "15.0.0.120" shared_secret = "a secret message" - target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}" + target_vpn_gateway = google_compute_vpn_gateway.target_gateway.self_link depends_on = [ - "google_compute_forwarding_rule.fr_esp", - "google_compute_forwarding_rule.fr_udp500", - "google_compute_forwarding_rule.fr_udp4500", + google_compute_forwarding_rule.fr_esp, + google_compute_forwarding_rule.fr_udp500, + google_compute_forwarding_rule.fr_udp4500, ] } resource "google_compute_vpn_gateway" "target_gateway" { name = "vpn1" - network = "${google_compute_network.network1.self_link}" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - name = "network1" + name = "network1" } resource "google_compute_address" "vpn_static_ip" { - name = "vpn-static-ip" + name = "vpn-static-ip" } resource "google_compute_forwarding_rule" "fr_esp" { name = "fr-esp" ip_protocol = "ESP" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp500" { name = "fr-udp500" ip_protocol = "UDP" port_range = "500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp4500" { name = "fr-udp4500" ip_protocol = "UDP" port_range = "4500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_route" "route1" { name = "route1" - network = "${google_compute_network.network1.name}" + network = google_compute_network.network1.name dest_range = "15.0.0.0/24" priority = 1000 - next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}" + next_hop_vpn_tunnel = google_compute_vpn_tunnel.tunnel1.self_link } ```
@@ -114,17 +114,17 @@ resource "google_compute_route" "route1" { ```hcl resource "google_compute_vpn_tunnel" "tunnel1" { - provider = "google-beta" + provider = google-beta name = "tunnel1" peer_ip = "15.0.0.120" shared_secret = "a secret message" - target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}" + target_vpn_gateway = google_compute_vpn_gateway.target_gateway.self_link depends_on = [ - "google_compute_forwarding_rule.fr_esp", - "google_compute_forwarding_rule.fr_udp500", - "google_compute_forwarding_rule.fr_udp4500", + google_compute_forwarding_rule.fr_esp, + google_compute_forwarding_rule.fr_udp500, + google_compute_forwarding_rule.fr_udp4500, ] labels = { @@ -133,58 +133,58 @@ resource "google_compute_vpn_tunnel" "tunnel1" { } resource "google_compute_vpn_gateway" "target_gateway" { - provider = "google-beta" - name = "vpn1" - network = "${google_compute_network.network1.self_link}" + provider = google-beta + name = "vpn1" + network = google_compute_network.network1.self_link } resource "google_compute_network" "network1" { - provider = "google-beta" - name = "network1" + provider = google-beta + name = "network1" } resource "google_compute_address" "vpn_static_ip" { - provider = "google-beta" - name = "vpn-static-ip" + provider = google-beta + name = "vpn-static-ip" } resource "google_compute_forwarding_rule" "fr_esp" { - provider = "google-beta" + provider = google-beta name = "fr-esp" ip_protocol = "ESP" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp500" { - provider = "google-beta" + provider = google-beta name = "fr-udp500" ip_protocol = "UDP" port_range = "500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_forwarding_rule" "fr_udp4500" { - provider = "google-beta" + provider = google-beta name = "fr-udp4500" ip_protocol = "UDP" port_range = "4500" - ip_address = "${google_compute_address.vpn_static_ip.address}" - target = "${google_compute_vpn_gateway.target_gateway.self_link}" + ip_address = google_compute_address.vpn_static_ip.address + target = google_compute_vpn_gateway.target_gateway.self_link } resource "google_compute_route" "route1" { - provider = "google-beta" + provider = google-beta name = "route1" - network = "${google_compute_network.network1.name}" + network = google_compute_network.network1.name dest_range = "15.0.0.0/24" priority = 1000 - next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}" + next_hop_vpn_tunnel = google_compute_vpn_tunnel.tunnel1.self_link } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index f06cfbaefb6..6947913c954 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -28,7 +28,7 @@ resource "google_container_cluster" "primary" { # separately managed node pools. So we create the smallest possible default # node pool and immediately delete it. remove_default_node_pool = true - initial_node_count = 1 + initial_node_count = 1 master_auth { username = "" @@ -43,7 +43,7 @@ resource "google_container_cluster" "primary" { resource "google_container_node_pool" "primary_preemptible_nodes" { name = "my-node-pool" location = "us-central1" - cluster = "${google_container_cluster.primary.name}" + cluster = google_container_cluster.primary.name node_count = 1 node_config { @@ -115,19 +115,7 @@ master will be created, as well as the default node location. If you specify a zone (such as `us-central1-a`), the cluster will be a zonal cluster with a single cluster master. If you specify a region (such as `us-west1`), the cluster will be a regional cluster with multiple masters spread across zones in -the region, and with default node locations in those zones as well. - -* `zone` - (Optional, Deprecated) The zone that the cluster master and nodes -should be created in. If specified, this cluster will be a zonal cluster. `zone` -has been deprecated in favour of `location`. - -* `region` (Optional, Deprecated) The region that the cluster master and nodes -should be created in. If specified, this cluster will be a [regional clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/multi-zone-and-regional-clusters#regional) -where the cluster master and nodes (by default) will be created in several zones -throughout the region. `region` has been deprecated in favour of `location`. - -~> Only one of `location`, `zone`, and `region` may be set. If none are set, -the provider zone is used to create a zonal cluster. +the region, and with default node locations in those zones as well * `node_locations` - (Optional) The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the @@ -141,23 +129,13 @@ locations. In contrast, in a regional cluster, cluster master nodes are present in multiple zones in the region. For that reason, regional clusters should be preferred. -* `additional_zones` - (Optional) The list of zones in which the cluster's nodes -should be located. These must be in the same region as the cluster zone for -zonal clusters, or in the region of a regional cluster. In a multi-zonal cluster, -the number of nodes specified in `initial_node_count` is created in -all specified zones as well as the primary zone. If specified for a regional -cluster, nodes will only be created in these zones. `additional_zones` has been -deprecated in favour of `node_locations`. - * `addons_config` - (Optional) The configuration for addons supported by GKE. Structure is documented below. * `cluster_ipv4_cidr` - (Optional) The IP address range of the Kubernetes pods -in this cluster in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one -automatically chosen or specify a /14 block in 10.0.0.0/8. This field will only -work if your cluster is not VPC-native- when an `ip_allocation_policy` block is -not defined, or `ip_allocation_policy.use_ip_aliases` is set to false. If your -cluster is VPC-native, use `ip_allocation_policy.cluster_ipv4_cidr_block`. +in this cluster in CIDR notation (e.g. `10.96.0.0/14`). Leave blank to have one +automatically chosen or specify a `/14` block in `10.0.0.0/8`. This field will +only work for routes-based clusters, where `ip_allocation_policy` is not defined. * `cluster_autoscaling` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to @@ -200,14 +178,14 @@ number of nodes per zone. Must be set if `node_pool` is not set. If you're using set this to a value of at least `1`, alongside setting `remove_default_node_pool` to `true`. -* `ip_allocation_policy` - (Optional) Configuration for cluster IP allocation. As of now, only pre-allocated subnetworks (custom type with secondary ranges) are supported. - This will activate IP aliases. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases) - Structure is documented below. This field is marked to use [Attribute as Block](/docs/configuration/attr-as-blocks.html) - in order to support explicit removal with `ip_allocation_policy = []`. +* `ip_allocation_policy` - (Optional) Configuration of cluster IP allocation for +VPC-native clusters. Adding this block enables [IP aliasing](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases), +making the cluster VPC-native instead of routes-based. Structure is documented +below. * `logging_service` - (Optional) The logging service that the cluster should write logs to. Available options include `logging.googleapis.com`, - `logging.googleapis.com/kubernetes`, and `none`. Defaults to `logging.googleapis.com` + `logging.googleapis.com/kubernetes`, and `none`. Defaults to `logging.googleapis.com/kubernetes` * `maintenance_policy` - (Optional) The maintenance policy to use for the cluster. Structure is documented below. @@ -234,9 +212,9 @@ Structure is documented below. [the docs](https://cloud.google.com/kubernetes-engine/versioning-and-upgrades#specifying_cluster_version) describe the various acceptable formats for this field. --> If you are using the `google_container_engine_versions` datasource with a regional cluster, ensure that you have provided a `region` -to the datasource. A `region` can have a different set of supported versions than its corresponding `zone`s, and not all `zone`s in a -`region` are guaranteed to support the same version. +-> If you are using the `google_container_engine_versions` datasource with a regional cluster, ensure that you have provided a `location` +to the datasource. A region can have a different set of supported versions than its corresponding zones, and not all zones in a +region are guaranteed to support the same version. * `monitoring_service` - (Optional) The monitoring service that the cluster should write metrics to. @@ -244,7 +222,7 @@ to the datasource. A `region` can have a different set of supported versions tha VM metrics will be collected by Google Compute Engine regardless of this setting Available options include `monitoring.googleapis.com`, `monitoring.googleapis.com/kubernetes`, and `none`. - Defaults to `monitoring.googleapis.com` + Defaults to `monitoring.googleapis.com/kubernetes` * `network` - (Optional) The name or self_link of the Google Compute Engine network to which the cluster is connected. For Shared VPC, set this to the self link of the @@ -305,8 +283,8 @@ clusters with private nodes. Structure is documented below. [ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature. Structure is documented below. -* `subnetwork` - (Optional) The name or self_link of the Google Compute Engine subnetwork in - which the cluster's instances are launched. +* `subnetwork` - (Optional) The name or self_link of the Google Compute Engine +subnetwork in which the cluster's instances are launched. * `vertical_pod_autoscaling` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it. @@ -333,10 +311,6 @@ The `addons_config` block supports: controller addon, which makes it easy to set up HTTP load balancers for services in a cluster. It is enabled by default; set `disabled = true` to disable. -* `kubernetes_dashboard` - (Optional, Deprecated) The status of the Kubernetes Dashboard - add-on, which controls whether the Kubernetes Dashboard is enabled for this cluster. - It is disabled by default; set `disabled = false` to enable. - * `network_policy_config` - (Optional) Whether we should enable the network policy addon for the master. This must be enabled in order to enable network policy for the nodes. To enable this, you must also define a [`network_policy`](#network_policy) block, @@ -353,11 +327,12 @@ The `addons_config` block supports: This example `addons_config` disables two addons: -``` +```hcl addons_config { http_load_balancing { disabled = true } + horizontal_pod_autoscaling { disabled = true } @@ -407,7 +382,7 @@ The `maintenance_policy` block supports: Specify `start_time` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "HH:MM”, where HH : \[00-23\] and MM : \[00-59\] GMT. For example: -``` +```hcl maintenance_policy { daily_maintenance_window { start_time = "03:00" @@ -437,46 +412,26 @@ In beta, one or the other of `recurring_window` and `daily_maintenance_window` i The `ip_allocation_policy` block supports: -* `use_ip_aliases` - (Optional) Whether alias IPs will be used for pod IPs in -the cluster. Defaults to `true` if the `ip_allocation_policy` block is defined, -and to the API default otherwise. Prior to June 17th 2019, the default on the -API is `false`; afterwards, it's `true`. +* `cluster_secondary_range_name` - (Optional) The name of the existing secondary +range in the cluster's subnetwork to use for pod IP addresses. Alternatively, +`cluster_ipv4_cidr_block` can be used to automatically create a GKE-managed one. -* `cluster_secondary_range_name` - (Optional) The name of the secondary range to be - used as for the cluster CIDR block. The secondary range will be used for pod IP - addresses. This must be an existing secondary range associated with the cluster - subnetwork. - -* `services_secondary_range_name` - (Optional) The name of the secondary range to be - used as for the services CIDR block. The secondary range will be used for service - ClusterIPs. This must be an existing secondary range associated with the cluster - subnetwork. +* `services_secondary_range_name` - (Optional) The name of the existing +secondary range in the cluster's subnetwork to use for service `ClusterIP`s. +Alternatively, `services_ipv4_cidr_block` can be used to automatically create a +GKE-managed one. * `cluster_ipv4_cidr_block` - (Optional) The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to -pick a specific range to use. This field will only work if your cluster is -VPC-native- when `ip_allocation_policy.use_ip_aliases` is undefined or set to -true. If your cluster is not VPC-native, use `cluster_ipv4_cidr`. - -* `node_ipv4_cidr_block` - (Optional) The IP address range of the node IPs in this cluster. - This should be set only if `create_subnetwork` is true. - Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) - to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) - from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to - pick a specific range to use. +pick a specific range to use. * `services_ipv4_cidr_block` - (Optional) The IP address range of the services IPs in this cluster. - Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) - to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) - from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to - pick a specific range to use. - -* `create_subnetwork`- (Optional) Whether a new subnetwork will be created automatically for the cluster. - -* `subnetwork_name` - (Optional) A custom subnetwork name to be used if create_subnetwork is true. - If this field is empty, then an automatic name will be chosen for the new subnetwork. +Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) +to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) +from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to +pick a specific range to use. The `master_auth` block supports: @@ -488,7 +443,7 @@ The `master_auth` block supports: * `client_certificate_config` - (Optional) Whether client certificate authorization is enabled for this cluster. For example: -``` +```hcl master_auth { client_certificate_config { issue_client_certificate = false @@ -564,9 +519,9 @@ The `node_config` block supports: Note this will grant read access to ALL GCS content unless you also specify a custom role. See https://cloud.google.com/kubernetes-engine/docs/how-to/access-scopes * `logging-write` (`https://www.googleapis.com/auth/logging.write`), - if `logging_service` points to Google + if `logging_service` is not `none`. * `monitoring` (`https://www.googleapis.com/auth/monitoring`), - if `monitoring_service` points to Google + if `monitoring_service` is not `none`. * `preemptible` - (Optional) A boolean that represents whether or not the underlying node VMs are preemptible. See the [official documentation](https://cloud.google.com/container-engine/docs/preemptible-vm) @@ -609,7 +564,7 @@ The `guest_accelerator` block supports: The `workload_identity_config` block supports: * `identity_namespace` (Required) - Currently, the only supported identity namespace is the project's default. -``` +```hcl workload_identity_config { identity_namespace = "${data.google_project.project.project_id}.svc.id.goog" } @@ -675,9 +630,10 @@ The `resource_usage_export_config` block supports: * `bigquery_destination.dataset_id` (Required) - The ID of a BigQuery Dataset. For Example: -``` +```hcl resource_usage_export_config { enable_network_egress_metering = false + bigquery_destination { dataset_id = "cluster_resource_usage" } @@ -762,10 +718,12 @@ This resource provides the following ## Import -GKE clusters can be imported using the `project` , `zone` or `region`, and `name`. If the project is omitted, the default +GKE clusters can be imported using the `project` , `location`, and `name`. If the project is omitted, the default provider value will be used. Examples: ``` +$ terraform import google_container_cluster.mycluster projects/my-gcp-project/locations/us-east1-a/clusters/my-cluster + $ terraform import google_container_cluster.mycluster my-gcp-project/us-east1-a/my-cluster $ terraform import google_container_cluster.mycluster us-east1-a/my-cluster diff --git a/website/docs/r/container_node_pool.html.markdown b/website/docs/r/container_node_pool.html.markdown index cce77db73f7..dbb9d8d3914 100644 --- a/website/docs/r/container_node_pool.html.markdown +++ b/website/docs/r/container_node_pool.html.markdown @@ -19,18 +19,18 @@ and [the API reference](https://cloud.google.com/container-engine/reference/rest resource "google_container_cluster" "primary" { name = "my-gke-cluster" location = "us-central1" - + # We can't create a cluster with no node pool defined, but we want to only use # separately managed node pools. So we create the smallest possible default # node pool and immediately delete it. remove_default_node_pool = true - initial_node_count = 1 + initial_node_count = 1 } resource "google_container_node_pool" "primary_preemptible_nodes" { name = "my-node-pool" location = "us-central1" - cluster = "${google_container_cluster.primary.name}" + cluster = google_container_cluster.primary.name node_count = 1 node_config { @@ -51,7 +51,7 @@ resource "google_container_node_pool" "primary_preemptible_nodes" { resource "google_container_node_pool" "np" { name = "my-node-pool" location = "us-central1-a" - cluster = "${google_container_cluster.primary.name}" + cluster = google_container_cluster.primary.name node_count = 3 timeouts { @@ -94,26 +94,16 @@ resource "google_container_cluster" "primary" { } } } - ``` ## Argument Reference -* `cluster` - (Required) The cluster to create the node pool for. Cluster must be present in `zone` provided for zonal clusters. +* `cluster` - (Required) The cluster to create the node pool for. Cluster must be present in `location` provided for zonal clusters. - - - * `location` - (Optional) The location (region or zone) of the cluster. -* `zone` - (Optional, Deprecated) The zone in which the cluster resides. `zone` -has been deprecated in favor of `location`. - -* `region` - (Optional, Deprecated) The region in which the cluster resides (for -regional clusters). `region` has been deprecated in favor of `location`. - --> Note: You must specify a `location` for either cluster type or the -type-specific `region` for regional clusters / `zone` for zonal clusters. - - - - * `autoscaling` - (Optional) Configuration required by cluster autoscaler to adjust diff --git a/website/docs/r/dataflow_job.html.markdown b/website/docs/r/dataflow_job.html.markdown index a93ab643ef8..7b4aa2127cb 100644 --- a/website/docs/r/dataflow_job.html.markdown +++ b/website/docs/r/dataflow_job.html.markdown @@ -18,13 +18,13 @@ the official documentation for ```hcl resource "google_dataflow_job" "big_data_job" { - name = "dataflow-job" - template_gcs_path = "gs://my-bucket/templates/template_file" - temp_gcs_location = "gs://my-bucket/tmp_dir" - parameters = { - foo = "bar" - baz = "qux" - } + name = "dataflow-job" + template_gcs_path = "gs://my-bucket/templates/template_file" + temp_gcs_location = "gs://my-bucket/tmp_dir" + parameters = { + foo = "bar" + baz = "qux" + } } ``` diff --git a/website/docs/r/dataproc_cluster.html.markdown b/website/docs/r/dataproc_cluster.html.markdown index 384554d54a3..c94aabe99b8 100644 --- a/website/docs/r/dataproc_cluster.html.markdown +++ b/website/docs/r/dataproc_cluster.html.markdown @@ -21,8 +21,8 @@ whole cluster! ```hcl resource "google_dataproc_cluster" "simplecluster" { - name = "simplecluster" - region = "us-central1" + name = "simplecluster" + region = "us-central1" } ``` @@ -30,69 +30,62 @@ resource "google_dataproc_cluster" "simplecluster" { ```hcl resource "google_dataproc_cluster" "mycluster" { - name = "mycluster" - region = "us-central1" - labels = { - foo = "bar" + name = "mycluster" + region = "us-central1" + labels = { + foo = "bar" + } + + cluster_config { + staging_bucket = "dataproc-staging-bucket" + + master_config { + num_instances = 1 + machine_type = "n1-standard-1" + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 15 + } } - cluster_config { - staging_bucket = "dataproc-staging-bucket" - - master_config { - num_instances = 1 - machine_type = "n1-standard-1" - disk_config { - boot_disk_type = "pd-ssd" - boot_disk_size_gb = 15 - } - } - - worker_config { - num_instances = 2 - machine_type = "n1-standard-1" - min_cpu_platform = "Intel Skylake" - disk_config { - boot_disk_size_gb = 15 - num_local_ssds = 1 - } - } - - preemptible_worker_config { - num_instances = 0 - } - - # Override or set some custom properties - software_config { - image_version = "1.3.7-deb9" - override_properties = { - "dataproc:dataproc.allow.zero.workers" = "true" - } - } - - gce_cluster_config { - #network = "${google_compute_network.dataproc_network.name}" - tags = ["foo", "bar"] - service_account_scopes = [ - # User supplied scopes - "https://www.googleapis.com/auth/monitoring", - - # The following scopes necessary for the cluster to function properly are - # always added, even if not explicitly specified: - # useraccounts-ro: https://www.googleapis.com/auth/cloud.useraccounts.readonly - # storage-rw: https://www.googleapis.com/auth/devstorage.read_write - # logging-write: https://www.googleapis.com/auth/logging.write - "useraccounts-ro","storage-rw","logging-write" - ] - } + worker_config { + num_instances = 2 + machine_type = "n1-standard-1" + min_cpu_platform = "Intel Skylake" + disk_config { + boot_disk_size_gb = 15 + num_local_ssds = 1 + } + } - # You can define multiple initialization_action blocks - initialization_action { - script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" - timeout_sec = 500 - } + preemptible_worker_config { + num_instances = 0 + } + + # Override or set some custom properties + software_config { + image_version = "1.3.7-deb9" + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + gce_cluster_config { + tags = ["foo", "bar"] + service_account_scopes = [ + "https://www.googleapis.com/auth/monitoring", + "useraccounts-ro", + "storage-rw", + "logging-write", + ] + } + # You can define multiple initialization_action blocks + initialization_action { + script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" + timeout_sec = 500 } + } } ``` @@ -100,21 +93,21 @@ resource "google_dataproc_cluster" "mycluster" { ```hcl resource "google_dataproc_cluster" "accelerated_cluster" { - name = "my-cluster-with-gpu" - region = "us-central1" + name = "my-cluster-with-gpu" + region = "us-central1" - cluster_config { - gce_cluster_config { - zone = "us-central1-a" - } - - master_config { - accelerators { - accelerator_type = "nvidia-tesla-k80" - accelerator_count = "1" - } - } + cluster_config { + gce_cluster_config { + zone = "us-central1-a" + } + + master_config { + accelerators { + accelerator_type = "nvidia-tesla-k80" + accelerator_count = "1" + } } + } } ``` @@ -189,18 +182,17 @@ The `cluster_config` block supports: The `cluster_config.gce_cluster_config` block supports: ```hcl - cluster_config { - gce_cluster_config { - - zone = "us-central1-a" + cluster_config { + gce_cluster_config { + zone = "us-central1-a" - # One of the below to hook into a custom network / subnetwork - network = "${google_compute_network.dataproc_network.name}" - subnetwork = "${google_compute_network.dataproc_subnetwork.name}" + # One of the below to hook into a custom network / subnetwork + network = google_compute_network.dataproc_network.name + subnetwork = google_compute_network.dataproc_subnetwork.name - tags = ["foo", "bar"] - } + tags = ["foo", "bar"] } + } ``` * `zone` - (Optional, Computed) The GCP zone where your data is stored and used (i.e. where @@ -248,18 +240,19 @@ The `cluster_config.gce_cluster_config` block supports: The `cluster_config.master_config` block supports: ```hcl - cluster_config { - master_config { - num_instances = 1 - machine_type = "n1-standard-1" - min_cpu_platform = "Intel Skylake" - disk_config { - boot_disk_type = "pd-ssd" - boot_disk_size_gb = 15 - num_local_ssds = 1 - } - } +cluster_config { + master_config { + num_instances = 1 + machine_type = "n1-standard-1" + min_cpu_platform = "Intel Skylake" + + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 15 + num_local_ssds = 1 } + } +} ``` * `num_instances`- (Optional, Computed) Specifies the number of master nodes to create. @@ -306,18 +299,19 @@ if you are trying to use accelerators in a given zone. The `cluster_config.worker_config` block supports: ```hcl - cluster_config { - worker_config { - num_instances = 3 - machine_type = "n1-standard-1" - min_cpu_platform = "Intel Skylake" - disk_config { - boot_disk_type = "pd-standard" - boot_disk_size_gb = 15 - num_local_ssds = 1 - } - } +cluster_config { + worker_config { + num_instances = 3 + machine_type = "n1-standard-1" + min_cpu_platform = "Intel Skylake" + + disk_config { + boot_disk_type = "pd-standard" + boot_disk_size_gb = 15 + num_local_ssds = 1 } + } +} ``` * `num_instances`- (Optional, Computed) Specifies the number of worker nodes to create. @@ -368,16 +362,17 @@ if you are trying to use accelerators in a given zone. The `cluster_config.preemptible_worker_config` block supports: ```hcl - cluster_config { - preemptible_worker_config { - num_instances = 1 - disk_config { - boot_disk_type = "pd-standard" - boot_disk_size_gb = 15 - num_local_ssds = 1 - } - } +cluster_config { + preemptible_worker_config { + num_instances = 1 + + disk_config { + boot_disk_type = "pd-standard" + boot_disk_size_gb = 15 + num_local_ssds = 1 } + } +} ``` Note: Unlike `worker_config`, you cannot set the `machine_type` value directly. This @@ -404,15 +399,16 @@ will be set for you based on whatever was set for the `worker_config.machine_typ The `cluster_config.software_config` block supports: ```hcl - cluster_config { - # Override or set some custom properties - software_config { - image_version = "1.3.7-deb9" - override_properties = { - "dataproc:dataproc.allow.zero.workers" = "true" - } - } +cluster_config { + # Override or set some custom properties + software_config { + image_version = "1.3.7-deb9" + + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" } + } +} ``` * `image_version` - (Optional, Computed) The Cloud Dataproc image version to use @@ -431,13 +427,13 @@ The `cluster_config.software_config` block supports: The `initialization_action` block (Optional) can be specified multiple times and supports: ```hcl - cluster_config { - # You can define multiple initialization_action blocks - initialization_action { - script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" - timeout_sec = 500 - } - } +cluster_config { + # You can define multiple initialization_action blocks + initialization_action { + script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" + timeout_sec = 500 + } +} ``` * `script`- (Required) The script to be executed during initialization of the cluster. @@ -452,11 +448,10 @@ The `initialization_action` block (Optional) can be specified multiple times and The `encryption_config` block supports: ```hcl - cluster_config { - encryption_config { - kms_key_name = "projects/projectId/locations/region/keyRings/keyRingName/cryptoKeys/keyName" - } - } +cluster_config { + encryption_config { + kms_key_name = "projects/projectId/locations/region/keyRings/keyRingName/cryptoKeys/keyName" + } } ``` diff --git a/website/docs/r/dataproc_cluster_iam.html.markdown b/website/docs/r/dataproc_cluster_iam.html.markdown index 0668b4b1a6b..8e9dbbe84cc 100644 --- a/website/docs/r/dataproc_cluster_iam.html.markdown +++ b/website/docs/r/dataproc_cluster_iam.html.markdown @@ -24,7 +24,7 @@ Three different resources help you manage IAM policies on dataproc clusters. Eac ```hcl data "google_iam_policy" "admin" { binding { - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] @@ -32,10 +32,10 @@ data "google_iam_policy" "admin" { } resource "google_dataproc_cluster_iam_policy" "editor" { - project = "your-project" - region = "your-region" - cluster = "your-dataproc-cluster" - policy_data = "${data.google_iam_policy.admin.policy_data}" + project = "your-project" + region = "your-region" + cluster = "your-dataproc-cluster" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -43,9 +43,9 @@ resource "google_dataproc_cluster_iam_policy" "editor" { ```hcl resource "google_dataproc_cluster_iam_binding" "editor" { - cluster = "your-dataproc-cluster" - role = "roles/editor" - members = [ + cluster = "your-dataproc-cluster" + role = "roles/editor" + members = [ "user:jane@example.com", ] } @@ -55,9 +55,9 @@ resource "google_dataproc_cluster_iam_binding" "editor" { ```hcl resource "google_dataproc_cluster_iam_member" "editor" { - cluster = "your-dataproc-cluster" - role = "roles/editor" - member = "user:jane@example.com" + cluster = "your-dataproc-cluster" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/dataproc_job.html.markdown b/website/docs/r/dataproc_job.html.markdown index 0fa795e1478..abfcc73da2c 100644 --- a/website/docs/r/dataproc_job.html.markdown +++ b/website/docs/r/dataproc_job.html.markdown @@ -18,58 +18,58 @@ Manages a job resource within a Dataproc cluster within GCE. For more informatio ```hcl resource "google_dataproc_cluster" "mycluster" { - name = "dproc-cluster-unique-name" - region = "us-central1" + name = "dproc-cluster-unique-name" + region = "us-central1" } # Submit an example spark job to a dataproc cluster resource "google_dataproc_job" "spark" { - region = "${google_dataproc_cluster.mycluster.region}" - force_delete = true - placement { - cluster_name = "${google_dataproc_cluster.mycluster.name}" + region = google_dataproc_cluster.mycluster.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.mycluster.name + } + + spark_config { + main_class = "org.apache.spark.examples.SparkPi" + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + args = ["1000"] + + properties = { + "spark.logConf" = "true" } - spark_config { - main_class = "org.apache.spark.examples.SparkPi" - jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] - args = ["1000"] - - properties = { - "spark.logConf" = "true" - } - - logging_config { - driver_log_levels = { - "root" = "INFO" - } - } + logging_config { + driver_log_levels = { + "root" = "INFO" + } } + } } # Submit an example pyspark job to a dataproc cluster resource "google_dataproc_job" "pyspark" { - region = "${google_dataproc_cluster.mycluster.region}" - force_delete = true - placement { - cluster_name = "${google_dataproc_cluster.mycluster.name}" - } - - pyspark_config { - main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" - properties = { - "spark.logConf" = "true" - } + region = google_dataproc_cluster.mycluster.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.mycluster.name + } + + pyspark_config { + main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" + properties = { + "spark.logConf" = "true" } + } } # Check out current state of the jobs output "spark_status" { - value = "${google_dataproc_job.spark.status.0.state}" + value = google_dataproc_job.spark.status[0].state } output "pyspark_status" { - value = "${google_dataproc_job.pyspark.status.0.state}" + value = google_dataproc_job.pyspark.status[0].state } ``` @@ -112,17 +112,15 @@ The `pyspark_config` block supports: Submitting a pyspark job to the cluster. Below is an example configuration: ```hcl - # Submit a pyspark job to the cluster resource "google_dataproc_job" "pyspark" { - ... - - pyspark_config { - main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" - properties = { - "spark.logConf" = "true" - } + ... + pyspark_config { + main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" + properties = { + "spark.logConf" = "true" } + } } ``` @@ -152,26 +150,24 @@ are generally applicable: The `spark_config` block supports: ```hcl - # Submit a spark job to the cluster resource "google_dataproc_job" "spark" { - ... - - spark_config { - main_class = "org.apache.spark.examples.SparkPi" - jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] - args = ["1000"] - - properties = { - "spark.logConf" = "true" - } - - logging_config { - driver_log_levels = { - "root" = "INFO" - } - } + ... + spark_config { + main_class = "org.apache.spark.examples.SparkPi" + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + args = ["1000"] + + properties = { + "spark.logConf" = "true" + } + + logging_config { + driver_log_levels = { + "root" = "INFO" + } } + } } ``` @@ -197,19 +193,17 @@ resource "google_dataproc_job" "spark" { The `hadoop_config` block supports: ```hcl - # Submit a hadoop job to the cluster resource "google_dataproc_job" "hadoop" { - ... - - hadoop_config { - main_jar_file_uri = "file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar" - args = [ - "wordcount", - "file:///usr/lib/spark/NOTICE", - "gs://${google_dataproc_cluster.basic.cluster_config.0.bucket}/hadoopjob_output" - ] - } + ... + hadoop_config { + main_jar_file_uri = "file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar" + args = [ + "wordcount", + "file:///usr/lib/spark/NOTICE", + "gs://${google_dataproc_cluster.basic.cluster_config[0].bucket}/hadoopjob_output", + ] + } } ``` @@ -232,18 +226,16 @@ resource "google_dataproc_job" "hadoop" { The `hive_config` block supports: ```hcl - # Submit a hive job to the cluster resource "google_dataproc_job" "hive" { - ... - - hive_config { - query_list = [ - "DROP TABLE IF EXISTS dprocjob_test", - "CREATE EXTERNAL TABLE dprocjob_test(bar int) LOCATION 'gs://${google_dataproc_cluster.basic.cluster_config.0.bucket}/hive_dprocjob_test/'", - "SELECT * FROM dprocjob_test WHERE bar > 2", - ] - } + ... + hive_config { + query_list = [ + "DROP TABLE IF EXISTS dprocjob_test", + "CREATE EXTERNAL TABLE dprocjob_test(bar int) LOCATION 'gs://${google_dataproc_cluster.basic.cluster_config[0].bucket}/hive_dprocjob_test/'", + "SELECT * FROM dprocjob_test WHERE bar > 2", + ] + } } ``` @@ -264,20 +256,18 @@ resource "google_dataproc_job" "hive" { The `pig_config` block supports: ```hcl - # Submit a pig job to the cluster resource "google_dataproc_job" "pig" { - ... - - pig_config { - query_list = [ - "LNS = LOAD 'file:///usr/lib/pig/LICENSE.txt ' AS (line)", - "WORDS = FOREACH LNS GENERATE FLATTEN(TOKENIZE(line)) AS word", - "GROUPS = GROUP WORDS BY word", - "WORD_COUNTS = FOREACH GROUPS GENERATE group, COUNT(WORDS)", - "DUMP WORD_COUNTS" - ] - } + ... + pig_config { + query_list = [ + "LNS = LOAD 'file:///usr/lib/pig/LICENSE.txt ' AS (line)", + "WORDS = FOREACH LNS GENERATE FLATTEN(TOKENIZE(line)) AS word", + "GROUPS = GROUP WORDS BY word", + "WORD_COUNTS = FOREACH GROUPS GENERATE group, COUNT(WORDS)", + "DUMP WORD_COUNTS", + ] + } } ``` @@ -301,18 +291,16 @@ resource "google_dataproc_job" "pig" { The `sparksql_config` block supports: ```hcl - # Submit a spark SQL job to the cluster resource "google_dataproc_job" "sparksql" { - ... - - sparksql_config { - query_list = [ - "DROP TABLE IF EXISTS dprocjob_test", - "CREATE TABLE dprocjob_test(bar int)", - "SELECT * FROM dprocjob_test WHERE bar > 2", - ] - } + ... + sparksql_config { + query_list = [ + "DROP TABLE IF EXISTS dprocjob_test", + "CREATE TABLE dprocjob_test(bar int)", + "SELECT * FROM dprocjob_test WHERE bar > 2", + ] + } } ``` diff --git a/website/docs/r/dataproc_job_iam.html.markdown b/website/docs/r/dataproc_job_iam.html.markdown index 694d3126d7f..0cdea119c9d 100644 --- a/website/docs/r/dataproc_job_iam.html.markdown +++ b/website/docs/r/dataproc_job_iam.html.markdown @@ -24,7 +24,7 @@ Three different resources help you manage IAM policies on dataproc jobs. Each of ```hcl data "google_iam_policy" "admin" { binding { - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] @@ -32,10 +32,10 @@ data "google_iam_policy" "admin" { } resource "google_dataproc_job_iam_policy" "editor" { - project = "your-project" - region = "your-region" - job_id = "your-dataproc-job" - policy_data = "${data.google_iam_policy.admin.policy_data}" + project = "your-project" + region = "your-region" + job_id = "your-dataproc-job" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -43,9 +43,9 @@ resource "google_dataproc_job_iam_policy" "editor" { ```hcl resource "google_dataproc_job_iam_binding" "editor" { - job_id = "your-dataproc-job" - role = "roles/editor" - members = [ + job_id = "your-dataproc-job" + role = "roles/editor" + members = [ "user:jane@example.com", ] } @@ -55,9 +55,9 @@ resource "google_dataproc_job_iam_binding" "editor" { ```hcl resource "google_dataproc_job_iam_member" "editor" { - job_id = "your-dataproc-job" - role = "roles/editor" - member = "user:jane@example.com" + job_id = "your-dataproc-job" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/dns_managed_zone.html.markdown b/website/docs/r/dns_managed_zone.html.markdown index 371ee44b5c8..81bc3658b25 100644 --- a/website/docs/r/dns_managed_zone.html.markdown +++ b/website/docs/r/dns_managed_zone.html.markdown @@ -44,8 +44,8 @@ To get more information about ManagedZone, see: ```hcl resource "google_dns_managed_zone" "example-zone" { - name = "example-zone" - dns_name = "example-${random_id.rnd.hex}.com." + name = "example-zone" + dns_name = "example-${random_id.rnd.hex}.com." description = "Example DNS zone" labels = { foo = "bar" @@ -66,8 +66,8 @@ resource "random_id" "rnd" { ```hcl resource "google_dns_managed_zone" "private-zone" { - name = "private-zone" - dns_name = "private.example.com." + name = "private-zone" + dns_name = "private.example.com." description = "Example private DNS zone" labels = { foo = "bar" @@ -77,21 +77,21 @@ resource "google_dns_managed_zone" "private-zone" { private_visibility_config { networks { - network_url = "${google_compute_network.network-1.self_link}" + network_url = google_compute_network.network-1.self_link } networks { - network_url = "${google_compute_network.network-2.self_link}" + network_url = google_compute_network.network-2.self_link } } } resource "google_compute_network" "network-1" { - name = "network-1" + name = "network-1" auto_create_subnetworks = false } resource "google_compute_network" "network-2" { - name = "network-2" + name = "network-2" auto_create_subnetworks = false } ``` @@ -100,9 +100,9 @@ resource "google_compute_network" "network-2" { ```hcl resource "google_dns_managed_zone" "private-zone" { - provider = "google-beta" - name = "private-zone" - dns_name = "private.example.com." + provider = google-beta + name = "private-zone" + dns_name = "private.example.com." description = "Example private DNS zone" labels = { foo = "bar" @@ -112,10 +112,10 @@ resource "google_dns_managed_zone" "private-zone" { private_visibility_config { networks { - network_url = "${google_compute_network.network-1.self_link}" + network_url = google_compute_network.network-1.self_link } networks { - network_url = "${google_compute_network.network-2.self_link}" + network_url = google_compute_network.network-2.self_link } } @@ -127,16 +127,15 @@ resource "google_dns_managed_zone" "private-zone" { ipv4_address = "172.16.1.20" } } - } resource "google_compute_network" "network-1" { - name = "network-1" + name = "network-1" auto_create_subnetworks = false } resource "google_compute_network" "network-2" { - name = "network-2" + name = "network-2" auto_create_subnetworks = false } ``` @@ -150,38 +149,38 @@ resource "google_compute_network" "network-2" { ```hcl resource "google_dns_managed_zone" "peering-zone" { - provider = "google-beta" + provider = google-beta - name = "peering-zone" - dns_name = "peering.example.com." + name = "peering-zone" + dns_name = "peering.example.com." description = "Example private DNS peering zone" visibility = "private" private_visibility_config { networks { - network_url = "${google_compute_network.network-source.self_link}" + network_url = google_compute_network.network-source.self_link } } peering_config { target_network { - network_url = "${google_compute_network.network-target.self_link}" + network_url = google_compute_network.network-target.self_link } } } resource "google_compute_network" "network-source" { - provider = "google-beta" + provider = google-beta - name = "network-source" + name = "network-source" auto_create_subnetworks = false } resource "google_compute_network" "network-target" { - provider = "google-beta" + provider = google-beta - name = "network-target" + name = "network-target" auto_create_subnetworks = false } @@ -283,7 +282,7 @@ The `default_key_specs` block supports: The `private_visibility_config` block supports: * `networks` - - (Optional) + (Required) The list of VPC networks that can see this zone. Until the provider updates to use the Terraform 0.12 SDK in a future release, you may experience issues with this resource while updating. If you've defined a `networks` block and add another `networks` block while keeping the old block, Terraform will see an incorrect diff @@ -294,7 +293,7 @@ The `private_visibility_config` block supports: The `networks` block supports: * `network_url` - - (Optional) + (Required) The fully qualified URL of the VPC network to bind to. This should be formatted like `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` diff --git a/website/docs/r/dns_record_set.markdown b/website/docs/r/dns_record_set.html.markdown similarity index 80% rename from website/docs/r/dns_record_set.markdown rename to website/docs/r/dns_record_set.html.markdown index 1c48f640a0a..9bf54bf116e 100644 --- a/website/docs/r/dns_record_set.markdown +++ b/website/docs/r/dns_record_set.html.markdown @@ -24,9 +24,9 @@ resource "google_dns_record_set" "frontend" { type = "A" ttl = 300 - managed_zone = "${google_dns_managed_zone.prod.name}" + managed_zone = google_dns_managed_zone.prod.name - rrdatas = ["${google_compute_instance.frontend.network_interface.0.access_config.0.nat_ip}"] + rrdatas = [google_compute_instance.frontend.network_interface[0].access_config[0].nat_ip] } resource "google_compute_instance" "frontend" { @@ -41,8 +41,9 @@ resource "google_compute_instance" "frontend" { } network_interface { - network = "default" - access_config = {} + network = "default" + access_config { + } } } @@ -56,10 +57,10 @@ resource "google_dns_managed_zone" "prod" { ```hcl resource "google_dns_record_set" "a" { - name = "backend.${google_dns_managed_zone.prod.dns_name}" - managed_zone = "${google_dns_managed_zone.prod.name}" - type = "A" - ttl = 300 + name = "backend.${google_dns_managed_zone.prod.dns_name}" + managed_zone = google_dns_managed_zone.prod.name + type = "A" + ttl = 300 rrdatas = ["8.8.8.8"] } @@ -74,17 +75,17 @@ resource "google_dns_managed_zone" "prod" { ```hcl resource "google_dns_record_set" "mx" { - name = "${google_dns_managed_zone.prod.dns_name}" - managed_zone = "${google_dns_managed_zone.prod.name}" - type = "MX" - ttl = 3600 + name = google_dns_managed_zone.prod.dns_name + managed_zone = google_dns_managed_zone.prod.name + type = "MX" + ttl = 3600 rrdatas = [ "1 aspmx.l.google.com.", "5 alt1.aspmx.l.google.com.", "5 alt2.aspmx.l.google.com.", "10 alt3.aspmx.l.google.com.", - "10 alt4.aspmx.l.google.com." + "10 alt4.aspmx.l.google.com.", ] } @@ -100,10 +101,10 @@ Quotes (`""`) must be added around your `rrdatas` for a SPF record. Otherwise `r ```hcl resource "google_dns_record_set" "spf" { - name = "frontend.${google_dns_managed_zone.prod.dns_name}" - managed_zone = "${google_dns_managed_zone.prod.name}" - type = "TXT" - ttl = 300 + name = "frontend.${google_dns_managed_zone.prod.dns_name}" + managed_zone = google_dns_managed_zone.prod.name + type = "TXT" + ttl = 300 rrdatas = ["\"v=spf1 ip4:111.111.111.111 include:backoff.email-example.com -all\""] } @@ -120,16 +121,16 @@ resource "google_dns_managed_zone" "prod" { ```hcl resource "google_dns_record_set" "cname" { - name = "frontend.${google_dns_managed_zone.prod.dns_name}" - managed_zone = "${google_dns_managed_zone.prod.name}" - type = "CNAME" - ttl = 300 - rrdatas = ["frontend.mydomain.com."] + name = "frontend.${google_dns_managed_zone.prod.dns_name}" + managed_zone = google_dns_managed_zone.prod.name + type = "CNAME" + ttl = 300 + rrdatas = ["frontend.mydomain.com."] } resource "google_dns_managed_zone" "prod" { - name = "prod-zone" - dns_name = "prod.mydomain.com." + name = "prod-zone" + dns_name = "prod.mydomain.com." } ``` diff --git a/website/docs/r/endpoints_service.html.markdown b/website/docs/r/endpoints_service.html.markdown index c4ee63714ce..232068c8e5c 100644 --- a/website/docs/r/endpoints_service.html.markdown +++ b/website/docs/r/endpoints_service.html.markdown @@ -17,14 +17,14 @@ This resource creates and rolls out a Cloud Endpoints service using OpenAPI or g resource "google_endpoints_service" "openapi_service" { service_name = "api-name.endpoints.project-id.cloud.goog" project = "project-id" - openapi_config = "${file("openapi_spec.yml")}" + openapi_config = file("openapi_spec.yml") } resource "google_endpoints_service" "grpc_service" { service_name = "api-name.endpoints.project-id.cloud.goog" project = "project-id" - grpc_config = "${file("service_spec.yml")}" - protoc_output_base64 = "${base64encode(file("compiled_descriptor_file.pb"))}" + grpc_config = file("service_spec.yml") + protoc_output_base64 = base64encode(file("compiled_descriptor_file.pb")) } ``` diff --git a/website/docs/r/firestore_index.html.markdown b/website/docs/r/firestore_index.html.markdown index afd862ff136..9bdc8290c54 100644 --- a/website/docs/r/firestore_index.html.markdown +++ b/website/docs/r/firestore_index.html.markdown @@ -43,7 +43,7 @@ To get more information about Index, see: ```hcl resource "google_firestore_index" "my-index" { - project = "my-project-name" + project = "my-project-name" collection = "chatrooms" diff --git a/website/docs/r/google_billing_account_iam_binding.md b/website/docs/r/google_billing_account_iam_binding.html.markdown similarity index 100% rename from website/docs/r/google_billing_account_iam_binding.md rename to website/docs/r/google_billing_account_iam_binding.html.markdown diff --git a/website/docs/r/google_billing_account_iam_member.md b/website/docs/r/google_billing_account_iam_member.html.markdown similarity index 100% rename from website/docs/r/google_billing_account_iam_member.md rename to website/docs/r/google_billing_account_iam_member.html.markdown diff --git a/website/docs/r/google_billing_account_iam_policy.md b/website/docs/r/google_billing_account_iam_policy.html.markdown similarity index 93% rename from website/docs/r/google_billing_account_iam_policy.md rename to website/docs/r/google_billing_account_iam_policy.html.markdown index 5f571bab400..e704e910bf7 100644 --- a/website/docs/r/google_billing_account_iam_policy.md +++ b/website/docs/r/google_billing_account_iam_policy.html.markdown @@ -25,8 +25,8 @@ by use of this resource. The safest alternative is to use multiple `google_billi ```hcl resource "google_billing_account_iam_policy" "policy" { - billing_account_id = "00AA00-000AAA-00AA0A" - policy_data = "${data.google_iam_policy.admin.policy_data}" + billing_account_id = "00AA00-000AAA-00AA0A" + policy_data = data.google_iam_policy.admin.policy_data } data "google_iam_policy" "admin" { diff --git a/website/docs/r/google_folder.html.markdown b/website/docs/r/google_folder.html.markdown index fb476f3b3a4..6f90117df58 100644 --- a/website/docs/r/google_folder.html.markdown +++ b/website/docs/r/google_folder.html.markdown @@ -29,13 +29,13 @@ doc for more information. # Top-level folder under an organization. resource "google_folder" "department1" { display_name = "Department 1" - parent = "organizations/1234567" + parent = "organizations/1234567" } # Folder nested under another folder. resource "google_folder" "team-abc" { display_name = "Team ABC" - parent = "${google_folder.department1.name}" + parent = google_folder.department1.name } ``` diff --git a/website/docs/r/google_folder_iam_binding.html.markdown b/website/docs/r/google_folder_iam_binding.html.markdown index a5201dde457..8b8a453fffa 100644 --- a/website/docs/r/google_folder_iam_binding.html.markdown +++ b/website/docs/r/google_folder_iam_binding.html.markdown @@ -29,8 +29,8 @@ resource "google_folder" "department1" { } resource "google_folder_iam_binding" "admin" { - folder = "${google_folder.department1.name}" - role = "roles/editor" + folder = google_folder.department1.name + role = "roles/editor" members = [ "user:alice@gmail.com", diff --git a/website/docs/r/google_folder_iam_member.html.markdown b/website/docs/r/google_folder_iam_member.html.markdown index e8e4b963495..bf01ecaf8a9 100644 --- a/website/docs/r/google_folder_iam_member.html.markdown +++ b/website/docs/r/google_folder_iam_member.html.markdown @@ -26,9 +26,9 @@ resource "google_folder" "department1" { } resource "google_folder_iam_member" "admin" { - folder = "${google_folder.department1.name}" - role = "roles/editor" - member = "user:alice@gmail.com" + folder = google_folder.department1.name + role = "roles/editor" + member = "user:alice@gmail.com" } ``` diff --git a/website/docs/r/google_folder_iam_policy.html.markdown b/website/docs/r/google_folder_iam_policy.html.markdown index f709a2aa80d..59bfd031300 100644 --- a/website/docs/r/google_folder_iam_policy.html.markdown +++ b/website/docs/r/google_folder_iam_policy.html.markdown @@ -16,13 +16,13 @@ Platform folder. ```hcl resource "google_folder_iam_policy" "folder_admin_policy" { - folder = "${google_folder.department1.name}" - policy_data = "${data.google_iam_policy.admin.policy_data}" + folder = google_folder.department1.name + policy_data = data.google_iam_policy.admin.policy_data } resource "google_folder" "department1" { display_name = "Department 1" - parent = "organizations/1234567" + parent = "organizations/1234567" } data "google_iam_policy" "admin" { diff --git a/website/docs/r/google_folder_organization_policy.html.markdown b/website/docs/r/google_folder_organization_policy.html.markdown index d6687932a4a..7a4690f9d7e 100644 --- a/website/docs/r/google_folder_organization_policy.html.markdown +++ b/website/docs/r/google_folder_organization_policy.html.markdown @@ -54,7 +54,7 @@ resource "google_folder_organization_policy" "services_policy" { constraint = "serviceuser.services" list_policy { - suggested_values = "compute.googleapis.com" + suggested_value = "compute.googleapis.com" deny { values = ["cloudresourcemanager.googleapis.com"] @@ -108,7 +108,7 @@ The `list_policy` block supports: * `allow` or `deny` - (Optional) One or the other must be set. -* `suggested_values` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. +* `suggested_value` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. * `inherit_from_parent` - (Optional) If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. @@ -137,6 +137,6 @@ exported: Folder organization policies can be imported using any of the follow formats: ``` -$ terraform import google_folder_organization_policy.policy folders/folder-1234:constraints/serviceuser.services -$ terraform import google_folder_organization_policy.policy folder-1234:serviceuser.services +$ terraform import google_folder_organization_policy.policy folders/folder-1234/constraints/serviceuser.services +$ terraform import google_folder_organization_policy.policy folder-1234/serviceuser.services ``` diff --git a/website/docs/r/google_iap_tunnel_instance_iam.markdown b/website/docs/r/google_iap_tunnel_instance_iam.html.markdown similarity index 100% rename from website/docs/r/google_iap_tunnel_instance_iam.markdown rename to website/docs/r/google_iap_tunnel_instance_iam.html.markdown diff --git a/website/docs/r/google_kms_key_ring_iam.html.markdown b/website/docs/r/google_kms_key_ring_iam.html.markdown index 4e16455d9be..b06a0bb48c9 100644 --- a/website/docs/r/google_kms_key_ring_iam.html.markdown +++ b/website/docs/r/google_kms_key_ring_iam.html.markdown @@ -33,8 +33,8 @@ data "google_iam_policy" "admin" { } resource "google_kms_key_ring_iam_policy" "key_ring" { - key_ring_id = "your-key-ring-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + key_ring_id = "your-key-ring-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` diff --git a/website/docs/r/google_organization_iam_member.md b/website/docs/r/google_organization_iam_member.html.markdown similarity index 96% rename from website/docs/r/google_organization_iam_member.md rename to website/docs/r/google_organization_iam_member.html.markdown index ca03fe63628..df547c2937c 100644 --- a/website/docs/r/google_organization_iam_member.md +++ b/website/docs/r/google_organization_iam_member.html.markdown @@ -21,8 +21,8 @@ the IAM policy for an existing Google Cloud Platform Organization. ```hcl resource "google_organization_iam_member" "binding" { org_id = "0123456789" - role = "roles/editor" - member = "user:alice@gmail.com" + role = "roles/editor" + member = "user:alice@gmail.com" } ``` diff --git a/website/docs/r/google_organization_iam_policy.md b/website/docs/r/google_organization_iam_policy.html.markdown similarity index 95% rename from website/docs/r/google_organization_iam_policy.md rename to website/docs/r/google_organization_iam_policy.html.markdown index 84962b74c2a..a65b033054a 100644 --- a/website/docs/r/google_organization_iam_policy.md +++ b/website/docs/r/google_organization_iam_policy.html.markdown @@ -28,8 +28,8 @@ Allows management of the entire IAM policy for an existing Google Cloud Platform ```hcl resource "google_organization_iam_policy" "policy" { - org_id = "123456789" - policy_data = "${data.google_iam_policy.admin.policy_data}" + org_id = "123456789" + policy_data = data.google_iam_policy.admin.policy_data } data "google_iam_policy" "admin" { diff --git a/website/docs/r/google_organization_policy.html.markdown b/website/docs/r/google_organization_policy.html.markdown index cafeb8dd6d7..784e3fc3cbf 100644 --- a/website/docs/r/google_organization_policy.html.markdown +++ b/website/docs/r/google_organization_policy.html.markdown @@ -53,7 +53,7 @@ resource "google_organization_policy" "services_policy" { constraint = "serviceuser.services" list_policy { - suggested_values = "compute.googleapis.com" + suggested_value = "compute.googleapis.com" deny { values = ["cloudresourcemanager.googleapis.com"] @@ -66,7 +66,7 @@ To restore the default organization policy, use the following instead: ```hcl resource "google_organization_policy" "services_policy" { - org_id = "123456789" + org_id = "123456789" constraint = "serviceuser.services" restore_policy { @@ -106,7 +106,7 @@ The `list_policy` block supports: * `allow` or `deny` - (Optional) One or the other must be set. -* `suggested_values` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. +* `suggested_value` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. * `inherit_from_parent` - (Optional) If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. @@ -135,4 +135,7 @@ exported: Organization Policies can be imported using the `org_id` and the `constraint`, e.g. ``` -$ terraform import google_organization_policy.services_policy 123456789:constraints/serviceuser.services +$ terraform import google_organization_policy.services_policy 123456789/constraints/serviceuser.services +``` + +It is all right if the constraint contains a slash, as in the example above. diff --git a/website/docs/r/google_project.html.markdown b/website/docs/r/google_project.html.markdown index 33b93e0827b..2fc93707cd5 100755 --- a/website/docs/r/google_project.html.markdown +++ b/website/docs/r/google_project.html.markdown @@ -41,7 +41,7 @@ Terraform. Only newly added projects are affected. ```hcl resource "google_project" "my_project" { - name = "My Project" + name = "My Project" project_id = "your-project-id" org_id = "1234567" } @@ -51,14 +51,14 @@ To create a project under a specific folder ```hcl resource "google_project" "my_project-in-a-folder" { - name = "My Project" + name = "My Project" project_id = "your-project-id" - folder_id = "${google_folder.department1.name}" + folder_id = google_folder.department1.name } resource "google_folder" "department1" { display_name = "Department 1" - parent = "organizations/1234567" + parent = "organizations/1234567" } ``` diff --git a/website/docs/r/google_project_iam.html.markdown b/website/docs/r/google_project_iam.html.markdown index 94c95c523f7..6821d1b28a0 100644 --- a/website/docs/r/google_project_iam.html.markdown +++ b/website/docs/r/google_project_iam.html.markdown @@ -33,7 +33,7 @@ Four different resources help you manage your IAM policy for a project. Each of ```hcl resource "google_project_iam_policy" "project" { project = "your-project-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + policy_data = data.google_iam_policy.admin.policy_data } data "google_iam_policy" "admin" { diff --git a/website/docs/r/google_project_organization_policy.html.markdown b/website/docs/r/google_project_organization_policy.html.markdown index 503fdb382d4..d211f132dc7 100644 --- a/website/docs/r/google_project_organization_policy.html.markdown +++ b/website/docs/r/google_project_organization_policy.html.markdown @@ -54,7 +54,7 @@ resource "google_project_organization_policy" "services_policy" { constraint = "serviceuser.services" list_policy { - suggested_values = "compute.googleapis.com" + suggested_value = "compute.googleapis.com" deny { values = ["cloudresourcemanager.googleapis.com"] @@ -107,7 +107,7 @@ The `list_policy` block supports: * `allow` or `deny` - (Optional) One or the other must be set. -* `suggested_values` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. +* `suggested_value` - (Optional) The Google Cloud Console will try to default to a configuration that matches the value specified in this field. * `inherit_from_parent` - (Optional) If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy. diff --git a/website/docs/r/google_project_service.html.markdown b/website/docs/r/google_project_service.html.markdown index c4f22cc4fa3..6361c32fba9 100644 --- a/website/docs/r/google_project_service.html.markdown +++ b/website/docs/r/google_project_service.html.markdown @@ -14,9 +14,6 @@ Allows management of a single API service for an existing Google Cloud Platform For a list of services available, visit the [API library page](https://console.cloud.google.com/apis/library) or run `gcloud services list`. -~> **Note:** This resource _must not_ be used in conjunction with - `google_project_services` or they will fight over which services should be enabled. - ## Example Usage ```hcl diff --git a/website/docs/r/google_project_services.html.markdown b/website/docs/r/google_project_services.html.markdown deleted file mode 100644 index f685b10dfcc..00000000000 --- a/website/docs/r/google_project_services.html.markdown +++ /dev/null @@ -1,55 +0,0 @@ ---- -subcategory: "Cloud Platform" -layout: "google" -page_title: "Google: google_project_services" -sidebar_current: "docs-google-project-services" -description: |- - Allows management of API services for a Google Cloud Platform project. ---- - -# google\_project\_services - -Allows management of enabled API services for an existing Google Cloud -Platform project. Services in an existing project that are not defined -in the config will be removed. - -For a list of services available, visit the -[API library page](https://console.cloud.google.com/apis/library) or run `gcloud services list`. - -~> **Note:** This resource attempts to be the authoritative source on *all* enabled APIs, which often - leads to conflicts when certain actions enable other APIs. If you do not need to ensure that - *exclusively* a particular set of APIs are enabled, you should most likely use the - [google_project_service](google_project_service.html) resource, one resource per API. - -## Example Usage - -```hcl -resource "google_project_services" "project" { - project = "your-project-id" - services = ["iam.googleapis.com", "cloudresourcemanager.googleapis.com"] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `project` - (Required) The project ID. - Changing this forces Terraform to attempt to disable all previously managed - API services in the previous project. - -* `services` - (Required) The list of services that are enabled. Supports - update. - -* `disable_on_destroy` - (Optional) Whether or not to disable APIs on project - when destroyed. Defaults to true. **Note**: When `disable_on_destroy` is - true and the project is changed, Terraform will force disable API services - managed by Terraform for the previous project. - -## Import - -Project services can be imported using the `project_id`, e.g. - -``` -$ terraform import google_project_services.my_project your-project-id -``` diff --git a/website/docs/r/google_service_account_iam.html.markdown b/website/docs/r/google_service_account_iam.html.markdown index 30e44b49591..176c88d4434 100644 --- a/website/docs/r/google_service_account_iam.html.markdown +++ b/website/docs/r/google_service_account_iam.html.markdown @@ -40,22 +40,21 @@ resource "google_service_account" "sa" { } resource "google_service_account_iam_policy" "admin-account-iam" { - service_account_id = "${google_service_account.sa.name}" - policy_data = "${data.google_iam_policy.admin.policy_data}" + service_account_id = google_service_account.sa.name + policy_data = data.google_iam_policy.admin.policy_data } ``` ## google\_service\_account\_iam\_binding ```hcl - resource "google_service_account" "sa" { account_id = "my-service-account" display_name = "A service account that only Jane can use" } resource "google_service_account_iam_binding" "admin-account-iam" { - service_account_id = "${google_service_account.sa.name}" + service_account_id = google_service_account.sa.name role = "roles/iam.serviceAccountUser" members = [ @@ -91,7 +90,8 @@ resource "google_service_account_iam_binding" "admin-account-iam" { ## google\_service\_account\_iam\_member ```hcl -data "google_compute_default_service_account" "default" { } +data "google_compute_default_service_account" "default" { +} resource "google_service_account" "sa" { account_id = "my-service-account" @@ -99,14 +99,14 @@ resource "google_service_account" "sa" { } resource "google_service_account_iam_member" "admin-account-iam" { - service_account_id = "${google_service_account.sa.name}" + service_account_id = google_service_account.sa.name role = "roles/iam.serviceAccountUser" member = "user:jane@example.com" } # Allow SA service account use the default GCE account resource "google_service_account_iam_member" "gce-default-account-iam" { - service_account_id = "${data.google_compute_default_service_account.default.name}" + service_account_id = data.google_compute_default_service_account.default.name role = "roles/iam.serviceAccountUser" member = "serviceAccount:${google_service_account.sa.email}" } @@ -186,7 +186,7 @@ $ terraform import google_service_account_iam_policy.admin-account-iam projects/ $ terraform import google_service_account_iam_binding.admin-account-iam "projects/{your-project-id}/serviceAccounts/{your-service-account-email} iam.serviceAccountUser" -$ terraform import google_service_account_iam_member.admin-account-iam "projects/{your-project-id}/serviceAccounts/{your-service-account-email} iam.serviceAccountUser user:foo@example.com" +$ terraform import google_service_account_iam_member.admin-account-iam "projects/{your-project-id}/serviceAccounts/{your-service-account-email} roles/editor user:foo@example.com" ``` With conditions: diff --git a/website/docs/r/google_service_account_key.html.markdown b/website/docs/r/google_service_account_key.html.markdown index 87f80b2e9cc..b8fbadd8f75 100644 --- a/website/docs/r/google_service_account_key.html.markdown +++ b/website/docs/r/google_service_account_key.html.markdown @@ -16,13 +16,13 @@ Creates and manages service account key-pairs, which allow the user to establish ```hcl resource "google_service_account" "myaccount" { - account_id = "myaccount" + account_id = "myaccount" display_name = "My Service Account" } resource "google_service_account_key" "mykey" { - service_account_id = "${google_service_account.myaccount.name}" - public_key_type = "TYPE_X509_PEM_FILE" + service_account_id = google_service_account.myaccount.name + public_key_type = "TYPE_X509_PEM_FILE" } ``` @@ -35,7 +35,7 @@ resource "google_service_account" "myaccount" { } resource "google_service_account_key" "mykey" { - service_account_id = "${google_service_account.myaccount.name}" + service_account_id = google_service_account.myaccount.name } resource "kubernetes_secret" "google-application-credentials" { @@ -43,7 +43,7 @@ resource "kubernetes_secret" "google-application-credentials" { name = "google-application-credentials" } data = { - credentials.json = "${base64decode(google_service_account_key.mykey.private_key)}" + credentials.json = base64decode(google_service_account_key.mykey.private_key) } } ``` @@ -52,13 +52,13 @@ resource "kubernetes_secret" "google-application-credentials" { ```hcl resource "google_service_account" "myaccount" { - account_id = "myaccount" + account_id = "myaccount" display_name = "My Service Account" } resource "google_service_account_key" "mykey" { - service_account_id = "${google_service_account.myaccount.name}" - public_key_type = "TYPE_X509_PEM_FILE" + service_account_id = google_service_account.myaccount.name + public_key_type = "TYPE_X509_PEM_FILE" } ``` diff --git a/website/docs/r/healthcare_dataset_iam.html.markdown b/website/docs/r/healthcare_dataset_iam.html.markdown index bc510041490..37b418c4f16 100644 --- a/website/docs/r/healthcare_dataset_iam.html.markdown +++ b/website/docs/r/healthcare_dataset_iam.html.markdown @@ -36,8 +36,8 @@ data "google_iam_policy" "admin" { } resource "google_healthcare_dataset_iam_policy" "dataset" { - dataset_id = "your-dataset-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + dataset_id = "your-dataset-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,7 +46,7 @@ resource "google_healthcare_dataset_iam_policy" "dataset" { ```hcl resource "google_healthcare_dataset_iam_binding" "dataset" { dataset_id = "your-dataset-id" - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", @@ -59,8 +59,8 @@ resource "google_healthcare_dataset_iam_binding" "dataset" { ```hcl resource "google_healthcare_dataset_iam_member" "dataset" { dataset_id = "your-dataset-id" - role = "roles/editor" - member = "user:jane@example.com" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/healthcare_dicom_store_iam.html.markdown b/website/docs/r/healthcare_dicom_store_iam.html.markdown index 56c5f7834ff..e871816c6cb 100644 --- a/website/docs/r/healthcare_dicom_store_iam.html.markdown +++ b/website/docs/r/healthcare_dicom_store_iam.html.markdown @@ -36,8 +36,8 @@ data "google_iam_policy" "admin" { } resource "google_healthcare_dicom_store_iam_policy" "dicom_store" { - dicom_store_id = "your-dicom-store-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + dicom_store_id = "your-dicom-store-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,7 +46,7 @@ resource "google_healthcare_dicom_store_iam_policy" "dicom_store" { ```hcl resource "google_healthcare_dicom_store_iam_binding" "dicom_store" { dicom_store_id = "your-dicom-store-id" - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", @@ -59,8 +59,8 @@ resource "google_healthcare_dicom_store_iam_binding" "dicom_store" { ```hcl resource "google_healthcare_dicom_store_iam_member" "dicom_store" { dicom_store_id = "your-dicom-store-id" - role = "roles/editor" - member = "user:jane@example.com" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/healthcare_fhir_store_iam.html.markdown b/website/docs/r/healthcare_fhir_store_iam.html.markdown index 0074adf9143..58084b82ae0 100644 --- a/website/docs/r/healthcare_fhir_store_iam.html.markdown +++ b/website/docs/r/healthcare_fhir_store_iam.html.markdown @@ -36,8 +36,8 @@ data "google_iam_policy" "admin" { } resource "google_healthcare_fhir_store_iam_policy" "fhir_store" { - fhir_store_id = "your-fhir-store-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + fhir_store_id = "your-fhir-store-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,7 +46,7 @@ resource "google_healthcare_fhir_store_iam_policy" "fhir_store" { ```hcl resource "google_healthcare_fhir_store_iam_binding" "fhir_store" { fhir_store_id = "your-fhir-store-id" - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", @@ -59,8 +59,8 @@ resource "google_healthcare_fhir_store_iam_binding" "fhir_store" { ```hcl resource "google_healthcare_fhir_store_iam_member" "fhir_store" { fhir_store_id = "your-fhir-store-id" - role = "roles/editor" - member = "user:jane@example.com" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/healthcare_hl7_v2_store_iam.html.markdown b/website/docs/r/healthcare_hl7_v2_store_iam.html.markdown index 1a608c06aff..db839201491 100644 --- a/website/docs/r/healthcare_hl7_v2_store_iam.html.markdown +++ b/website/docs/r/healthcare_hl7_v2_store_iam.html.markdown @@ -36,8 +36,8 @@ data "google_iam_policy" "admin" { } resource "google_healthcare_hl7_v2_store_iam_policy" "hl7_v2_store" { - hl7_v2_store_id = "your-hl7-v2-store-id" - policy_data = "${data.google_iam_policy.admin.policy_data}" + hl7_v2_store_id = "your-hl7-v2-store-id" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,12 +46,13 @@ resource "google_healthcare_hl7_v2_store_iam_policy" "hl7_v2_store" { ```hcl resource "google_healthcare_hl7_v2_store_iam_binding" "hl7_v2_store" { hl7_v2_store_id = "your-hl7-v2-store-id" - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] } + ``` ## google\_healthcare\_hl7\_v2\_store\_iam\_member @@ -59,8 +60,8 @@ resource "google_healthcare_hl7_v2_store_iam_binding" "hl7_v2_store" { ```hcl resource "google_healthcare_hl7_v2_store_iam_member" "hl7_v2_store" { hl7_v2_store_id = "your-hl7-v2-store-id" - role = "roles/editor" - member = "user:jane@example.com" + role = "roles/editor" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/kms_crypto_key.html.markdown b/website/docs/r/kms_crypto_key.html.markdown index e8d4ef60c7a..ce0b1b80d31 100644 --- a/website/docs/r/kms_crypto_key.html.markdown +++ b/website/docs/r/kms_crypto_key.html.markdown @@ -45,13 +45,13 @@ To get more information about CryptoKey, see: ```hcl resource "google_kms_key_ring" "keyring" { - name = "keyring-example" + name = "keyring-example" location = "global" } resource "google_kms_crypto_key" "example-key" { name = "crypto-key-example" - key_ring = "${google_kms_key_ring.keyring.self_link}" + key_ring = google_kms_key_ring.keyring.self_link rotation_period = "100000s" lifecycle { @@ -70,7 +70,7 @@ resource "google_kms_key_ring" "keyring" { resource "google_kms_crypto_key" "example-asymmetric-sign-key" { name = "crypto-key-example" - key_ring = "${google_kms_key_ring.keyring.self_link}" + key_ring = google_kms_key_ring.keyring.self_link purpose = "ASYMMETRIC_SIGN" version_template { diff --git a/website/docs/r/kms_key_ring.html.markdown b/website/docs/r/kms_key_ring.html.markdown index a83526f064d..c89da10e156 100644 --- a/website/docs/r/kms_key_ring.html.markdown +++ b/website/docs/r/kms_key_ring.html.markdown @@ -41,7 +41,7 @@ To get more information about KeyRing, see: ```hcl resource "google_kms_key_ring" "example-keyring" { - name = "keyring-example" + name = "keyring-example" location = "global" } ``` diff --git a/website/docs/r/logging_billing_account_exclusion.html.markdown b/website/docs/r/logging_billing_account_exclusion.html.markdown index 0bf10714819..b54161a8d10 100644 --- a/website/docs/r/logging_billing_account_exclusion.html.markdown +++ b/website/docs/r/logging_billing_account_exclusion.html.markdown @@ -20,13 +20,13 @@ granted to the credentials used with Terraform. ```hcl resource "google_logging_billing_account_exclusion" "my-exclusion" { - name = "my-instance-debug-exclusion" - billing_account = "ABCDEF-012345-GHIJKL" + name = "my-instance-debug-exclusion" + billing_account = "ABCDEF-012345-GHIJKL" - description = "Exclude GCE instance debug logs" + description = "Exclude GCE instance debug logs" - # Exclude all DEBUG or lower severity messages relating to instances - filter = "resource.type = gce_instance AND severity <= DEBUG" + # Exclude all DEBUG or lower severity messages relating to instances + filter = "resource.type = gce_instance AND severity <= DEBUG" } ``` diff --git a/website/docs/r/logging_billing_account_sink.html.markdown b/website/docs/r/logging_billing_account_sink.html.markdown index b4b005c56d9..da8b54c2a45 100644 --- a/website/docs/r/logging_billing_account_sink.html.markdown +++ b/website/docs/r/logging_billing_account_sink.html.markdown @@ -22,23 +22,23 @@ typical IAM roles granted on a project. ```hcl resource "google_logging_billing_account_sink" "my-sink" { - name = "my-sink" - billing_account = "ABCDEF-012345-GHIJKL" + name = "my-sink" + billing_account = "ABCDEF-012345-GHIJKL" - # Can export to pubsub, cloud storage, or bigquery - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + # Can export to pubsub, cloud storage, or bigquery + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" } resource "google_storage_bucket" "log-bucket" { - name = "billing-logging-bucket" + name = "billing-logging-bucket" } resource "google_project_iam_binding" "log-writer" { - role = "roles/storage.objectCreator" + role = "roles/storage.objectCreator" - members = [ - "${google_logging_billing_account_sink.my-sink.writer_identity}", - ] + members = [ + google_logging_billing_account_sink.my-sink.writer_identity, + ] } ``` diff --git a/website/docs/r/logging_folder_exclusion.html.markdown b/website/docs/r/logging_folder_exclusion.html.markdown index 107dc42c6c8..615e53d85e7 100644 --- a/website/docs/r/logging_folder_exclusion.html.markdown +++ b/website/docs/r/logging_folder_exclusion.html.markdown @@ -20,18 +20,18 @@ granted to the credentials used with Terraform. ```hcl resource "google_logging_folder_exclusion" "my-exclusion" { - name = "my-instance-debug-exclusion" - folder = "${google_folder.my-folder.name}" + name = "my-instance-debug-exclusion" + folder = google_folder.my-folder.name - description = "Exclude GCE instance debug logs" + description = "Exclude GCE instance debug logs" - # Exclude all DEBUG or lower severity messages relating to instances - filter = "resource.type = gce_instance AND severity <= DEBUG" + # Exclude all DEBUG or lower severity messages relating to instances + filter = "resource.type = gce_instance AND severity <= DEBUG" } resource "google_folder" "my-folder" { - display_name = "My folder" - parent = "organizations/123456" + display_name = "My folder" + parent = "organizations/123456" } ``` diff --git a/website/docs/r/logging_folder_sink.html.markdown b/website/docs/r/logging_folder_sink.html.markdown index ab076376838..ec70627ab6f 100644 --- a/website/docs/r/logging_folder_sink.html.markdown +++ b/website/docs/r/logging_folder_sink.html.markdown @@ -20,31 +20,31 @@ granted to the credentials used with terraform. ```hcl resource "google_logging_folder_sink" "my-sink" { - name = "my-sink" - folder = "${google_folder.my-folder.name}" + name = "my-sink" + folder = google_folder.my-folder.name - # Can export to pubsub, cloud storage, or bigquery - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + # Can export to pubsub, cloud storage, or bigquery + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" - # Log all WARN or higher severity messages relating to instances - filter = "resource.type = gce_instance AND severity >= WARN" + # Log all WARN or higher severity messages relating to instances + filter = "resource.type = gce_instance AND severity >= WARN" } resource "google_storage_bucket" "log-bucket" { - name = "folder-logging-bucket" + name = "folder-logging-bucket" } resource "google_project_iam_binding" "log-writer" { - role = "roles/storage.objectCreator" + role = "roles/storage.objectCreator" - members = [ - "${google_logging_folder_sink.my-sink.writer_identity}", - ] + members = [ + google_logging_folder_sink.my-sink.writer_identity, + ] } resource "google_folder" "my-folder" { - display_name = "My folder" - parent = "organizations/123456" + display_name = "My folder" + parent = "organizations/123456" } ``` diff --git a/website/docs/r/logging_metric.html.markdown b/website/docs/r/logging_metric.html.markdown index 3c34f27227e..da5e52add83 100644 --- a/website/docs/r/logging_metric.html.markdown +++ b/website/docs/r/logging_metric.html.markdown @@ -44,26 +44,28 @@ To get more information about Metric, see: ```hcl resource "google_logging_metric" "logging_metric" { - name = "my-(custom)/metric" + name = "my-(custom)/metric" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "DISTRIBUTION" - unit = "1" + value_type = "DISTRIBUTION" + unit = "1" labels { - key = "mass" - value_type = "STRING" - description = "amount of matter" + key = "mass" + value_type = "STRING" + description = "amount of matter" } display_name = "My metric" } value_extractor = "EXTRACT(jsonPayload.request)" - label_extractors = { "mass": "EXTRACT(jsonPayload.request)" } + label_extractors = { + "mass" = "EXTRACT(jsonPayload.request)" + } bucket_options { linear_buckets { num_finite_buckets = 3 - width = 1 - offset = 1 + width = 1 + offset = 1 } } } @@ -78,11 +80,11 @@ resource "google_logging_metric" "logging_metric" { ```hcl resource "google_logging_metric" "logging_metric" { - name = "my-(custom)/metric" + name = "my-(custom)/metric" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "INT64" + value_type = "INT64" } } ``` @@ -96,18 +98,20 @@ resource "google_logging_metric" "logging_metric" { ```hcl resource "google_logging_metric" "logging_metric" { - name = "my-(custom)/metric" + name = "my-(custom)/metric" filter = "resource.type=gae_app AND severity>=ERROR" metric_descriptor { metric_kind = "DELTA" - value_type = "INT64" + value_type = "INT64" labels { - key = "mass" - value_type = "STRING" - description = "amount of matter" + key = "mass" + value_type = "STRING" + description = "amount of matter" } } - label_extractors = { "mass": "EXTRACT(jsonPayload.request)" } + label_extractors = { + "mass" = "EXTRACT(jsonPayload.request)" + } } ``` @@ -264,7 +268,7 @@ The `exponential_buckets` block supports: The `explicit_buckets` block supports: * `bounds` - - (Optional) + (Required) The values must be monotonically increasing. diff --git a/website/docs/r/logging_organization_exclusion.html.markdown b/website/docs/r/logging_organization_exclusion.html.markdown index e0bca87533e..c580ba520dc 100644 --- a/website/docs/r/logging_organization_exclusion.html.markdown +++ b/website/docs/r/logging_organization_exclusion.html.markdown @@ -20,13 +20,13 @@ granted to the credentials used with Terraform. ```hcl resource "google_logging_organization_exclusion" "my-exclusion" { - name = "my-instance-debug-exclusion" - org_id = "123456789" + name = "my-instance-debug-exclusion" + org_id = "123456789" - description = "Exclude GCE instance debug logs" + description = "Exclude GCE instance debug logs" - # Exclude all DEBUG or lower severity messages relating to instances - filter = "resource.type = gce_instance AND severity <= DEBUG" + # Exclude all DEBUG or lower severity messages relating to instances + filter = "resource.type = gce_instance AND severity <= DEBUG" } ``` diff --git a/website/docs/r/logging_organization_sink.html.markdown b/website/docs/r/logging_organization_sink.html.markdown index 74b9326ea0f..c663da6e350 100644 --- a/website/docs/r/logging_organization_sink.html.markdown +++ b/website/docs/r/logging_organization_sink.html.markdown @@ -20,24 +20,24 @@ granted to the credentials used with terraform. ```hcl resource "google_logging_organization_sink" "my-sink" { - name = "my-sink" - org_id = "123456789" + name = "my-sink" + org_id = "123456789" - # Can export to pubsub, cloud storage, or bigquery - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + # Can export to pubsub, cloud storage, or bigquery + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" - # Log all WARN or higher severity messages relating to instances - filter = "resource.type = gce_instance AND severity >= WARN" + # Log all WARN or higher severity messages relating to instances + filter = "resource.type = gce_instance AND severity >= WARN" } resource "google_storage_bucket" "log-bucket" { - name = "organization-logging-bucket" + name = "organization-logging-bucket" } resource "google_project_iam_member" "log-writer" { - role = "roles/storage.objectCreator" + role = "roles/storage.objectCreator" - member = "${google_logging_organization_sink.my-sink.writer_identity}" + member = google_logging_organization_sink.my-sink.writer_identity } ``` diff --git a/website/docs/r/logging_project_exclusion.html.markdown b/website/docs/r/logging_project_exclusion.html.markdown index 74d4ea2ab94..854d968db42 100644 --- a/website/docs/r/logging_project_exclusion.html.markdown +++ b/website/docs/r/logging_project_exclusion.html.markdown @@ -20,12 +20,12 @@ granted to the credentials used with Terraform. ```hcl resource "google_logging_project_exclusion" "my-exclusion" { - name = "my-instance-debug-exclusion" + name = "my-instance-debug-exclusion" - description = "Exclude GCE instance debug logs" + description = "Exclude GCE instance debug logs" - # Exclude all DEBUG or lower severity messages relating to instances - filter = "resource.type = gce_instance AND severity <= DEBUG" + # Exclude all DEBUG or lower severity messages relating to instances + filter = "resource.type = gce_instance AND severity <= DEBUG" } ``` diff --git a/website/docs/r/logging_project_sink.html.markdown b/website/docs/r/logging_project_sink.html.markdown index e0eacea8d50..892c1dd24b6 100644 --- a/website/docs/r/logging_project_sink.html.markdown +++ b/website/docs/r/logging_project_sink.html.markdown @@ -23,16 +23,16 @@ and ```hcl resource "google_logging_project_sink" "my-sink" { - name = "my-pubsub-instance-sink" + name = "my-pubsub-instance-sink" - # Can export to pubsub, cloud storage, or bigquery - destination = "pubsub.googleapis.com/projects/my-project/topics/instance-activity" + # Can export to pubsub, cloud storage, or bigquery + destination = "pubsub.googleapis.com/projects/my-project/topics/instance-activity" - # Log all WARN or higher severity messages relating to instances - filter = "resource.type = gce_instance AND severity >= WARN" + # Log all WARN or higher severity messages relating to instances + filter = "resource.type = gce_instance AND severity >= WARN" - # Use a unique writer (creates a unique service account used for writing) - unique_writer_identity = true + # Use a unique writer (creates a unique service account used for writing) + unique_writer_identity = true } ``` @@ -57,33 +57,33 @@ resource "google_compute_instance" "my-logged-instance" { network_interface { network = "default" - access_config {} + access_config { + } } } # A bucket to store logs in resource "google_storage_bucket" "log-bucket" { - name = "my-unique-logging-bucket" + name = "my-unique-logging-bucket" } # Our sink; this logs all activity related to our "my-logged-instance" instance resource "google_logging_project_sink" "instance-sink" { - name = "my-instance-sink" - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" - filter = "resource.type = gce_instance AND resource.labels.instance_id = \"${google_compute_instance.my-logged-instance.instance_id}\"" + name = "my-instance-sink" + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + filter = "resource.type = gce_instance AND resource.labels.instance_id = \"${google_compute_instance.my-logged-instance.instance_id}\"" - unique_writer_identity = true + unique_writer_identity = true } # Because our sink uses a unique_writer, we must grant that writer access to the bucket. resource "google_project_iam_binding" "log-writer" { - role = "roles/storage.objectCreator" + role = "roles/storage.objectCreator" - members = [ - "${google_logging_project_sink.instance-sink.writer_identity}", - ] + members = [ + google_logging_project_sink.instance-sink.writer_identity, + ] } - ``` ## Argument Reference diff --git a/website/docs/r/ml_engine_model.html.markdown b/website/docs/r/ml_engine_model.html.markdown index bea6e4f56f1..d91572711cf 100644 --- a/website/docs/r/ml_engine_model.html.markdown +++ b/website/docs/r/ml_engine_model.html.markdown @@ -39,9 +39,9 @@ ready to receive prediction requests. The model itself is just a container. ```hcl resource "google_ml_engine_model" "default" { - name = "default" + name = "default" description = "My model" - regions = ["us-central1"] + regions = ["us-central1"] } ```
@@ -54,13 +54,13 @@ resource "google_ml_engine_model" "default" { ```hcl resource "google_ml_engine_model" "default" { - name = "default" + name = "default" description = "My model" - regions = ["us-central1"] - labels = { + regions = ["us-central1"] + labels = { my_model = "foo" } - online_prediction_logging = true + online_prediction_logging = true online_prediction_console_logging = true } ``` @@ -111,7 +111,7 @@ The following arguments are supported: The `default_version` block supports: * `name` - - (Optional) + (Required) The name specified for the version when it was created. diff --git a/website/docs/r/monitoring_alert_policy.html.markdown b/website/docs/r/monitoring_alert_policy.html.markdown index ef8067e9f9c..e2593e10a4d 100644 --- a/website/docs/r/monitoring_alert_policy.html.markdown +++ b/website/docs/r/monitoring_alert_policy.html.markdown @@ -41,15 +41,15 @@ To get more information about AlertPolicy, see: ```hcl resource "google_monitoring_alert_policy" "alert_policy" { display_name = "My Alert Policy" - combiner = "OR" + combiner = "OR" conditions { display_name = "test condition" condition_threshold { - filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"" - duration = "60s" + filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"" + duration = "60s" comparison = "COMPARISON_GT" aggregations { - alignment_period = "60s" + alignment_period = "60s" per_series_aligner = "ALIGN_RATE" } } diff --git a/website/docs/r/monitoring_group.html.markdown b/website/docs/r/monitoring_group.html.markdown index bf89093fdfb..85ec1269587 100644 --- a/website/docs/r/monitoring_group.html.markdown +++ b/website/docs/r/monitoring_group.html.markdown @@ -60,13 +60,13 @@ resource "google_monitoring_group" "basic" { ```hcl resource "google_monitoring_group" "parent" { display_name = "tf-test MonitoringSubGroup" - filter = "resource.metadata.region=\"europe-west2\"" + filter = "resource.metadata.region=\"europe-west2\"" } resource "google_monitoring_group" "subgroup" { display_name = "tf-test MonitoringSubGroup" - filter = "resource.metadata.region=\"europe-west2\"" - parent_name = "${google_monitoring_group.parent.name}" + filter = "resource.metadata.region=\"europe-west2\"" + parent_name = google_monitoring_group.parent.name } ``` diff --git a/website/docs/r/monitoring_notification_channel.html.markdown b/website/docs/r/monitoring_notification_channel.html.markdown index 072bcfee708..73dfeddf872 100644 --- a/website/docs/r/monitoring_notification_channel.html.markdown +++ b/website/docs/r/monitoring_notification_channel.html.markdown @@ -59,7 +59,7 @@ To get more information about NotificationChannel, see: ```hcl resource "google_monitoring_notification_channel" "basic" { display_name = "Test Notification Channel" - type = "email" + type = "email" labels = { email_address = "fake_email@blahblah.com" } diff --git a/website/docs/r/monitoring_uptime_check_config.html.markdown b/website/docs/r/monitoring_uptime_check_config.html.markdown index 6486a4cf11a..45ef477daea 100644 --- a/website/docs/r/monitoring_uptime_check_config.html.markdown +++ b/website/docs/r/monitoring_uptime_check_config.html.markdown @@ -42,7 +42,7 @@ To get more information about UptimeCheckConfig, see: ```hcl resource "google_monitoring_uptime_check_config" "http" { display_name = "http-uptime-check" - timeout = "60s" + timeout = "60s" http_check { path = "/some-path" @@ -53,7 +53,7 @@ resource "google_monitoring_uptime_check_config" "http" { type = "uptime_url" labels = { project_id = "my-project-name" - host = "192.168.1.1" + host = "192.168.1.1" } } @@ -106,7 +106,7 @@ resource "google_monitoring_uptime_check_config" "https" { ```hcl resource "google_monitoring_uptime_check_config" "tcp_group" { display_name = "tcp-uptime-check" - timeout = "60s" + timeout = "60s" tcp_check { port = 888 @@ -114,14 +114,13 @@ resource "google_monitoring_uptime_check_config" "tcp_group" { resource_group { resource_type = "INSTANCE" - group_id = "${google_monitoring_group.check.name}" + group_id = google_monitoring_group.check.name } } - resource "google_monitoring_group" "check" { display_name = "uptime-check-group" - filter = "resource.metadata.name=has_substring(\"foo\")" + filter = "resource.metadata.name=has_substring(\"foo\")" } ``` @@ -177,7 +176,7 @@ The following arguments are supported: The `content_matchers` block supports: * `content` - - (Optional) + (Required) String or regex content to match (max 1024 bytes) The `http_check` block supports: @@ -214,11 +213,11 @@ The `http_check` block supports: The `auth_info` block supports: * `password` - - (Optional) + (Required) The password to authenticate. * `username` - - (Optional) + (Required) The username to authenticate. The `tcp_check` block supports: diff --git a/website/docs/r/pubsub_subscription.html.markdown b/website/docs/r/pubsub_subscription.html.markdown index 4b4887d35ef..78e6b04d5ba 100644 --- a/website/docs/r/pubsub_subscription.html.markdown +++ b/website/docs/r/pubsub_subscription.html.markdown @@ -43,7 +43,7 @@ resource "google_pubsub_topic" "example" { resource "google_pubsub_subscription" "example" { name = "example-subscription" - topic = "${google_pubsub_topic.example.name}" + topic = google_pubsub_topic.example.name ack_deadline_seconds = 20 @@ -54,7 +54,7 @@ resource "google_pubsub_subscription" "example" { push_config { push_endpoint = "https://example.com/push" - attributes { + attributes = { x-goog-version = "v1" } } @@ -75,7 +75,7 @@ resource "google_pubsub_topic" "example" { resource "google_pubsub_subscription" "example" { name = "example-subscription" - topic = "${google_pubsub_topic.example.name}" + topic = google_pubsub_topic.example.name labels = { foo = "bar" @@ -83,7 +83,7 @@ resource "google_pubsub_subscription" "example" { # 20 minutes message_retention_duration = "1200s" - retain_acked_messages = true + retain_acked_messages = true ack_deadline_seconds = 20 @@ -104,7 +104,7 @@ resource "google_pubsub_topic" "example" { resource "google_pubsub_subscription" "example" { project = "subscription-project" name = "example-subscription" - topic = "${google_pubsub_topic.example.name}" + topic = google_pubsub_topic.example.name } ``` @@ -177,7 +177,7 @@ The following arguments are supported: A subscription is considered active as long as any connected subscriber is successfully consuming messages from the subscription or is issuing operations on the subscription. If expirationPolicy is not set, a default - policy with ttl of 31 days will be used. If it is set but left empty, the + policy with ttl of 31 days will be used. If it is set but ttl is "", the resource never expires. The minimum allowed value for expirationPolicy.ttl is 1 day. Structure is documented below. @@ -241,7 +241,7 @@ The `oidc_token` block supports: The `expiration_policy` block supports: * `ttl` - - (Optional) + (Required) Specifies the "time-to-live" duration for an associated resource. The resource expires if it is not active for a period of ttl. If ttl is not set, the associated resource never expires. diff --git a/website/docs/r/pubsub_subscription_iam.html.markdown b/website/docs/r/pubsub_subscription_iam.html.markdown index 12e5181706d..5b137125231 100644 --- a/website/docs/r/pubsub_subscription_iam.html.markdown +++ b/website/docs/r/pubsub_subscription_iam.html.markdown @@ -24,7 +24,7 @@ Three different resources help you manage your IAM policy for pubsub subscriptio ```hcl data "google_iam_policy" "admin" { binding { - role = "roles/editor" + role = "roles/editor" members = [ "user:jane@example.com", ] @@ -33,7 +33,7 @@ data "google_iam_policy" "admin" { resource "google_pubsub_subscription_iam_policy" "editor" { subscription = "your-subscription-name" - policy_data = "${data.google_iam_policy.admin.policy_data}" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -43,7 +43,7 @@ resource "google_pubsub_subscription_iam_policy" "editor" { resource "google_pubsub_subscription_iam_binding" "editor" { subscription = "your-subscription-name" role = "roles/editor" - members = [ + members = [ "user:jane@example.com", ] } diff --git a/website/docs/r/pubsub_topic.html.markdown b/website/docs/r/pubsub_topic.html.markdown index a16292c15cb..f2f14d4f7e0 100644 --- a/website/docs/r/pubsub_topic.html.markdown +++ b/website/docs/r/pubsub_topic.html.markdown @@ -54,12 +54,12 @@ resource "google_pubsub_topic" "example" { ```hcl resource "google_pubsub_topic" "example" { name = "example-topic" - kms_key_name = "${google_kms_crypto_key.crypto_key.self_link}" + kms_key_name = google_kms_crypto_key.crypto_key.self_link } resource "google_kms_crypto_key" "crypto_key" { name = "example-key" - key_ring = "${google_kms_key_ring.key_ring.self_link}" + key_ring = google_kms_key_ring.key_ring.self_link } resource "google_kms_key_ring" "key_ring" { @@ -84,7 +84,6 @@ resource "google_pubsub_topic" "example" { "europe-west3", ] } - } ``` diff --git a/website/docs/r/redis_instance.html.markdown b/website/docs/r/redis_instance.html.markdown index c64db938554..d81412077e4 100644 --- a/website/docs/r/redis_instance.html.markdown +++ b/website/docs/r/redis_instance.html.markdown @@ -62,7 +62,7 @@ resource "google_redis_instance" "cache" { location_id = "us-central1-a" alternative_location_id = "us-central1-f" - authorized_network = "${google_compute_network.auto-network.self_link}" + authorized_network = google_compute_network.auto-network.self_link redis_version = "REDIS_3_2" display_name = "Terraform Test Instance" diff --git a/website/docs/r/resource_manager_lien.html.markdown b/website/docs/r/resource_manager_lien.html.markdown index b9ec3443fd8..86ec601d83f 100644 --- a/website/docs/r/resource_manager_lien.html.markdown +++ b/website/docs/r/resource_manager_lien.html.markdown @@ -31,15 +31,15 @@ A Lien represents an encumbrance on the actions that can be performed on a resou ```hcl resource "google_resource_manager_lien" "lien" { - parent = "projects/${google_project.project.number}" + parent = "projects/${google_project.project.number}" restrictions = ["resourcemanager.projects.delete"] - origin = "machine-readable-explanation" - reason = "This project is an important environment" + origin = "machine-readable-explanation" + reason = "This project is an important environment" } resource "google_project" "project" { project_id = "staging-project" - name = "A very important project!" + name = "A very important project!" } ``` diff --git a/website/docs/r/runtimeconfig_config.html.markdown b/website/docs/r/runtimeconfig_config.html.markdown index 713dee1a942..0af0429327a 100644 --- a/website/docs/r/runtimeconfig_config.html.markdown +++ b/website/docs/r/runtimeconfig_config.html.markdown @@ -20,8 +20,8 @@ Example creating a RuntimeConfig resource. ```hcl resource "google_runtimeconfig_config" "my-runtime-config" { - name = "my-service-runtime-config" - description = "Runtime configuration values for my service" + name = "my-service-runtime-config" + description = "Runtime configuration values for my service" } ``` diff --git a/website/docs/r/runtimeconfig_variable.html.markdown b/website/docs/r/runtimeconfig_variable.html.markdown index 3618b1ffbdc..d5b5e80ba3d 100644 --- a/website/docs/r/runtimeconfig_variable.html.markdown +++ b/website/docs/r/runtimeconfig_variable.html.markdown @@ -20,14 +20,14 @@ Example creating a RuntimeConfig variable. ```hcl resource "google_runtimeconfig_config" "my-runtime-config" { - name = "my-service-runtime-config" - description = "Runtime configuration values for my service" + name = "my-service-runtime-config" + description = "Runtime configuration values for my service" } resource "google_runtimeconfig_variable" "environment" { - parent = "${google_runtimeconfig_config.my-runtime-config.name}" - name = "prod-variables/hostname" - text = "example.com" + parent = google_runtimeconfig_config.my-runtime-config.name + name = "prod-variables/hostname" + text = "example.com" } ``` @@ -38,14 +38,14 @@ Example of using the `value` argument. ```hcl resource "google_runtimeconfig_config" "my-runtime-config" { - name = "my-service-runtime-config" - description = "Runtime configuration values for my service" + name = "my-service-runtime-config" + description = "Runtime configuration values for my service" } resource "google_runtimeconfig_variable" "my-secret" { - parent = "${google_runtimeconfig_config.my-runtime-config.name}" - name = "secret" - value = "${base64encode(file("my-encrypted-secret.dat"))}" + parent = google_runtimeconfig_config.my-runtime-config.name + name = "secret" + value = base64encode(file("my-encrypted-secret.dat")) } ``` diff --git a/website/docs/r/scc_source.html.markdown b/website/docs/r/scc_source.html.markdown index 2d823f31d67..b7660aad798 100644 --- a/website/docs/r/scc_source.html.markdown +++ b/website/docs/r/scc_source.html.markdown @@ -41,7 +41,7 @@ To get more information about Source, see: resource "google_scc_source" "custom_source" { display_name = "My Source" organization = "123456789" - description = "My custom Cloud Security Command Center Finding Source" + description = "My custom Cloud Security Command Center Finding Source" } ``` diff --git a/website/docs/r/service_networking_connection.html.markdown b/website/docs/r/service_networking_connection.html.markdown index 1e377834bd7..e201834c931 100644 --- a/website/docs/r/service_networking_connection.html.markdown +++ b/website/docs/r/service_networking_connection.html.markdown @@ -26,13 +26,13 @@ resource "google_compute_global_address" "private_ip_alloc" { purpose = "VPC_PEERING" address_type = "INTERNAL" prefix_length = 16 - network = "${google_compute_network.peering_network.self_link}" + network = google_compute_network.peering_network.self_link } resource "google_service_networking_connection" "foobar" { - network = "${google_compute_network.peering_network.self_link}" + network = google_compute_network.peering_network.self_link service = "servicenetworking.googleapis.com" - reserved_peering_ranges = ["${google_compute_global_address.private_ip_alloc.name}"] + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] } ``` diff --git a/website/docs/r/sourcerepo_repository_iam.html.markdown b/website/docs/r/sourcerepo_repository_iam.html.markdown index 21d465e98dc..b0cf6b75648 100644 --- a/website/docs/r/sourcerepo_repository_iam.html.markdown +++ b/website/docs/r/sourcerepo_repository_iam.html.markdown @@ -122,17 +122,17 @@ SourceRepo repository IAM resources can be imported using the resource identifie IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. ``` -$ terraform import google_sourcerepo_repository_iam_member.editor "{{project}}/{{repository}} roles/viewer jane@example.com" +$ terraform import google_sourcerepo_repository_iam_member.editor "projects/{{project}}/repos/{{repository}} roles/viewer jane@example.com" ``` IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. ``` -$ terraform import google_sourcerepo_repository_iam_binding.editor "{{project}}/{{repository}} roles/viewer" +$ terraform import google_sourcerepo_repository_iam_binding.editor "projects/{{project}}/repos/{{repository}} roles/viewer" ``` IAM policy imports use the identifier of the resource in question, e.g. ``` -$ terraform import google_sourcerepo_repository_iam_policy.editor {{project}}/{{repository}} +$ terraform import google_sourcerepo_repository_iam_policy.editor projects/{{project}}/repos/{{repository}} ``` -> If you're importing a resource with beta features, make sure to include `-provider=google-beta` diff --git a/website/docs/r/spanner_database.html.markdown b/website/docs/r/spanner_database.html.markdown index 5fca1639c31..f9f0e57a1d6 100644 --- a/website/docs/r/spanner_database.html.markdown +++ b/website/docs/r/spanner_database.html.markdown @@ -46,11 +46,11 @@ resource "google_spanner_instance" "main" { } resource "google_spanner_database" "database" { - instance = "${google_spanner_instance.main.name}" - name = "my-database" - ddl = [ + instance = google_spanner_instance.main.name + name = "my-database" + ddl = [ "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", - "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)" + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", ] } ``` diff --git a/website/docs/r/spanner_database_iam.html.markdown b/website/docs/r/spanner_database_iam.html.markdown index 006e3ed7552..d386ef2d101 100644 --- a/website/docs/r/spanner_database_iam.html.markdown +++ b/website/docs/r/spanner_database_iam.html.markdown @@ -38,7 +38,7 @@ data "google_iam_policy" "admin" { resource "google_spanner_database_iam_policy" "database" { instance = "your-instance-name" database = "your-database-name" - policy_data = "${data.google_iam_policy.admin.policy_data}" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -46,9 +46,9 @@ resource "google_spanner_database_iam_policy" "database" { ```hcl resource "google_spanner_database_iam_binding" "database" { - instance = "your-instance-name" - database = "your-database-name" - role = "roles/compute.networkUser" + instance = "your-instance-name" + database = "your-database-name" + role = "roles/compute.networkUser" members = [ "user:jane@example.com", @@ -60,10 +60,10 @@ resource "google_spanner_database_iam_binding" "database" { ```hcl resource "google_spanner_database_iam_member" "database" { - instance = "your-instance-name" - database = "your-database-name" - role = "roles/compute.networkUser" - member = "user:jane@example.com" + instance = "your-instance-name" + database = "your-database-name" + role = "roles/compute.networkUser" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/spanner_instance.html.markdown b/website/docs/r/spanner_instance.html.markdown index 685fff0f69f..8e2031d42b6 100644 --- a/website/docs/r/spanner_instance.html.markdown +++ b/website/docs/r/spanner_instance.html.markdown @@ -43,9 +43,9 @@ To get more information about Instance, see: ```hcl resource "google_spanner_instance" "example" { - config = "regional-us-central1" - display_name = "Test Spanner Instance" - num_nodes = 2 + config = "regional-us-central1" + display_name = "Test Spanner Instance" + num_nodes = 2 labels = { "foo" = "bar" } diff --git a/website/docs/r/spanner_instance_iam.html.markdown b/website/docs/r/spanner_instance_iam.html.markdown index 969a13a326b..1b301502fc0 100644 --- a/website/docs/r/spanner_instance_iam.html.markdown +++ b/website/docs/r/spanner_instance_iam.html.markdown @@ -37,7 +37,7 @@ data "google_iam_policy" "admin" { resource "google_spanner_instance_iam_policy" "instance" { instance = "your-instance-name" - policy_data = "${data.google_iam_policy.admin.policy_data}" + policy_data = data.google_iam_policy.admin.policy_data } ``` @@ -45,8 +45,8 @@ resource "google_spanner_instance_iam_policy" "instance" { ```hcl resource "google_spanner_instance_iam_binding" "instance" { - instance = "your-instance-name" - role = "roles/compute.networkUser" + instance = "your-instance-name" + role = "roles/compute.networkUser" members = [ "user:jane@example.com", @@ -58,9 +58,9 @@ resource "google_spanner_instance_iam_binding" "instance" { ```hcl resource "google_spanner_instance_iam_member" "instance" { - instance = "your-instance-name" - role = "roles/compute.networkUser" - member = "user:jane@example.com" + instance = "your-instance-name" + role = "roles/compute.networkUser" + member = "user:jane@example.com" } ``` diff --git a/website/docs/r/sql_database.html.markdown b/website/docs/r/sql_database.html.markdown index 953c5e94617..7c9437928c0 100644 --- a/website/docs/r/sql_database.html.markdown +++ b/website/docs/r/sql_database.html.markdown @@ -38,16 +38,16 @@ Google's cloud. ```hcl resource "google_sql_database" "database" { - name = "my-database" - instance = "${google_sql_database_instance.instance.name}" + name = "my-database" + instance = google_sql_database_instance.instance.name } resource "google_sql_database_instance" "instance" { - name = "my-database-instance" - region = "us-central" - settings { - tier = "D0" - } + name = "my-database-instance" + region = "us-central" + settings { + tier = "D0" + } } ``` @@ -109,7 +109,6 @@ $ terraform import google_sql_database.default projects/{{project}}/instances/{{ $ terraform import google_sql_database.default instances/{{instance}}/databases/{{name}} $ terraform import google_sql_database.default {{project}}/{{instance}}/{{name}} $ terraform import google_sql_database.default {{instance}}/{{name}} -$ terraform import google_sql_database.default {{instance}}:{{name}} $ terraform import google_sql_database.default {{name}} ``` diff --git a/website/docs/r/sql_database_instance.html.markdown b/website/docs/r/sql_database_instance.html.markdown index f7c787ac990..f16a5db743b 100644 --- a/website/docs/r/sql_database_instance.html.markdown +++ b/website/docs/r/sql_database_instance.html.markdown @@ -27,8 +27,9 @@ resource "random_id" "db_name_suffix" { } resource "google_sql_database_instance" "master" { - name = "master-instance-${random_id.db_name_suffix.hex}" + name = "master-instance-${random_id.db_name_suffix.hex}" database_version = "MYSQL_5_6" + # First-generation instance regions are not the conventional # Google Compute Engine regions. See argument reference below. region = "us-central" @@ -43,9 +44,9 @@ resource "google_sql_database_instance" "master" { ```hcl resource "google_sql_database_instance" "master" { - name = "master-instance" + name = "master-instance" database_version = "POSTGRES_9_6" - region = "us-central1" + region = "us-central1" settings { # Second-generation instance tiers are based on the machine @@ -78,40 +79,42 @@ resource "google_compute_instance" "apps" { } } -data "null_data_source" "auth_netw_postgres_allowed_1" { - count = "${length(google_compute_instance.apps.*.self_link)}" - - inputs = { - name = "apps-${count.index + 1}" - value = "${element(google_compute_instance.apps.*.network_interface.0.access_config.0.nat_ip, count.index)}" - } -} - -data "null_data_source" "auth_netw_postgres_allowed_2" { - count = 2 - - inputs = { - name = "onprem-${count.index + 1}" - value = "${element(list("192.168.1.2", "192.168.2.3"), count.index)}" - } -} - resource "random_id" "db_name_suffix" { byte_length = 4 } +locals { + onprem = ["192.168.1.2", "192.168.2.3"] +} + resource "google_sql_database_instance" "postgres" { - name = "postgres-instance-${random_id.db_name_suffix.hex}" + name = "postgres-instance-${random_id.db_name_suffix.hex}" database_version = "POSTGRES_9_6" settings { tier = "db-f1-micro" ip_configuration { - authorized_networks = [ - "${data.null_data_source.auth_netw_postgres_allowed_1.*.outputs}", - "${data.null_data_source.auth_netw_postgres_allowed_2.*.outputs}", - ] + + dynamic "authorized_networks" { + for_each = google_compute_instance.apps + iterator = apps + + content { + name = apps.value.name + value = apps.value.network_interface.0.access_config.0.nat_ip + } + } + + dynamic "authorized_networks" { + for_each = local.onprem + iterator = onprem + + content { + name = "onprem-${onprem.key}" + value = onprem.value + } + } } } } @@ -122,27 +125,27 @@ resource "google_sql_database_instance" "postgres" { ```hcl resource "google_compute_network" "private_network" { - provider = "google-beta" + provider = google-beta - name = "private-network" + name = "private-network" } resource "google_compute_global_address" "private_ip_address" { - provider = "google-beta" + provider = google-beta name = "private-ip-address" purpose = "VPC_PEERING" - address_type = "INTERNAL" + address_type = "INTERNAL" prefix_length = 16 - network = "${google_compute_network.private_network.self_link}" + network = google_compute_network.private_network.self_link } resource "google_service_networking_connection" "private_vpc_connection" { - provider = "google-beta" + provider = google-beta - network = "${google_compute_network.private_network.self_link}" - service = "servicenetworking.googleapis.com" - reserved_peering_ranges = ["${google_compute_global_address.private_ip_address.name}"] + network = google_compute_network.private_network.self_link + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_address.name] } resource "random_id" "db_name_suffix" { @@ -150,25 +153,23 @@ resource "random_id" "db_name_suffix" { } resource "google_sql_database_instance" "instance" { - provider = "google-beta" + provider = google-beta - name = "private-instance-${random_id.db_name_suffix.hex}" + name = "private-instance-${random_id.db_name_suffix.hex}" region = "us-central1" - depends_on = [ - "google_service_networking_connection.private_vpc_connection" - ] + depends_on = [google_service_networking_connection.private_vpc_connection] settings { tier = "db-f1-micro" ip_configuration { - ipv4_enabled = false - private_network = "${google_compute_network.private_network.self_link}" + ipv4_enabled = false + private_network = google_compute_network.private_network.self_link } } } -provider "google-beta"{ +provider "google-beta" { region = "us-central1" zone = "us-central1-a" } diff --git a/website/docs/r/sql_ssl_cert.html.markdown b/website/docs/r/sql_ssl_cert.html.markdown index 76b4efc1cee..9710ba8711d 100644 --- a/website/docs/r/sql_ssl_cert.html.markdown +++ b/website/docs/r/sql_ssl_cert.html.markdown @@ -33,7 +33,7 @@ resource "google_sql_database_instance" "master" { resource "google_sql_ssl_cert" "client_cert" { common_name = "client-name" - instance = "${google_sql_database_instance.master.name}" + instance = google_sql_database_instance.master.name } ``` diff --git a/website/docs/r/sql_user.html.markdown b/website/docs/r/sql_user.html.markdown index a282e6100f3..8f78fa75192 100644 --- a/website/docs/r/sql_user.html.markdown +++ b/website/docs/r/sql_user.html.markdown @@ -34,7 +34,7 @@ resource "google_sql_database_instance" "master" { resource "google_sql_user" "users" { name = "me" - instance = "${google_sql_database_instance.master.name}" + instance = google_sql_database_instance.master.name host = "me.com" password = "changeme" } diff --git a/website/docs/r/storage_bucket.html.markdown b/website/docs/r/storage_bucket.html.markdown index b96fd891bc8..752bd7a4a5c 100644 --- a/website/docs/r/storage_bucket.html.markdown +++ b/website/docs/r/storage_bucket.html.markdown @@ -96,9 +96,7 @@ The `condition` block supports the following elements, and requires at least one * `created_before` - (Optional) Creation date of an object in RFC 3339 (e.g. `2017-06-13`) to satisfy this condition. -* `with_state` - (Optional) Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: `"LIVE"`, `"ARCHIVED"`, `"ANY"`. Unset or empty strings will be treated as `ARCHIVED` to maintain backwards compatibility with `is_live`. - -* `is_live` - (Optional, Deprecated) Defaults to `false` to match archived objects. If `true`, this condition matches live objects. Unversioned buckets have only live objects. +* `with_state` - (Optional) Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: `"LIVE"`, `"ARCHIVED"`, `"ANY"`. * `matches_storage_class` - (Optional) [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of objects to satisfy this condition. Supported values include: `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`, `STANDARD`, `DURABLE_REDUCED_AVAILABILITY`. diff --git a/website/docs/r/storage_bucket_access_control.html.markdown b/website/docs/r/storage_bucket_access_control.html.markdown index 6b3d27905ad..c3cdb38028a 100644 --- a/website/docs/r/storage_bucket_access_control.html.markdown +++ b/website/docs/r/storage_bucket_access_control.html.markdown @@ -55,7 +55,7 @@ resource "google_storage_bucket_access_control" "public_rule" { } resource "google_storage_bucket" "bucket" { - name = "static-content-bucket" + name = "static-content-bucket" } ``` diff --git a/website/docs/r/storage_bucket_acl.html.markdown b/website/docs/r/storage_bucket_acl.html.markdown index 13422ab5226..db9e2467d55 100644 --- a/website/docs/r/storage_bucket_acl.html.markdown +++ b/website/docs/r/storage_bucket_acl.html.markdown @@ -25,7 +25,7 @@ resource "google_storage_bucket" "image-store" { } resource "google_storage_bucket_acl" "image-store-acl" { - bucket = "${google_storage_bucket.image-store.name}" + bucket = google_storage_bucket.image-store.name role_entity = [ "OWNER:user-my.email@gmail.com", diff --git a/website/docs/r/storage_bucket_iam.html.markdown b/website/docs/r/storage_bucket_iam.html.markdown index feb2c22b9e6..bc2510a7730 100644 --- a/website/docs/r/storage_bucket_iam.html.markdown +++ b/website/docs/r/storage_bucket_iam.html.markdown @@ -23,7 +23,7 @@ Three different resources help you manage your IAM policy for storage bucket. Ea ```hcl resource "google_storage_bucket_iam_binding" "binding" { bucket = "your-bucket-name" - role = "roles/storage.objectViewer" + role = "roles/storage.objectViewer" members = [ "user:jane@example.com", @@ -36,8 +36,8 @@ resource "google_storage_bucket_iam_binding" "binding" { ```hcl resource "google_storage_bucket_iam_member" "member" { bucket = "your-bucket-name" - role = "roles/storage.objectViewer" - member = "user:jane@example.com" + role = "roles/storage.objectViewer" + member = "user:jane@example.com" } ``` @@ -54,13 +54,13 @@ data "google_iam_policy" "foo-policy" { binding { role = "roles/your-role" - members = [ "group:yourgroup@example.com" ] + members = ["group:yourgroup@example.com"] } } resource "google_storage_bucket_iam_policy" "member" { - bucket = "your-bucket-name" - policy_data = "${data.google_iam_policy.foo-policy.policy_data}" + bucket = "your-bucket-name" + policy_data = data.google_iam_policy.foo-policy.policy_data } ``` diff --git a/website/docs/r/storage_default_object_access_control.html.markdown b/website/docs/r/storage_default_object_access_control.html.markdown index 752ed9a49d5..8ff1b98e2b9 100644 --- a/website/docs/r/storage_default_object_access_control.html.markdown +++ b/website/docs/r/storage_default_object_access_control.html.markdown @@ -55,13 +55,13 @@ To get more information about DefaultObjectAccessControl, see: ```hcl resource "google_storage_default_object_access_control" "public_rule" { - bucket = "${google_storage_bucket.bucket.name}" + bucket = google_storage_bucket.bucket.name role = "READER" entity = "allUsers" } resource "google_storage_bucket" "bucket" { - name = "static-content-bucket" + name = "static-content-bucket" } ``` diff --git a/website/docs/r/storage_default_object_acl.html.markdown b/website/docs/r/storage_default_object_acl.html.markdown index 596832b3cfd..72df1a5914b 100644 --- a/website/docs/r/storage_default_object_acl.html.markdown +++ b/website/docs/r/storage_default_object_acl.html.markdown @@ -34,7 +34,7 @@ resource "google_storage_bucket" "image-store" { } resource "google_storage_default_object_acl" "image-store-default-acl" { - bucket = "${google_storage_bucket.image-store.name}" + bucket = google_storage_bucket.image-store.name role_entity = [ "OWNER:user-my.email@gmail.com", "READER:group-mygroup", diff --git a/website/docs/r/storage_notification.html.markdown b/website/docs/r/storage_notification.html.markdown index 645956087d9..2ed21de6948 100644 --- a/website/docs/r/storage_notification.html.markdown +++ b/website/docs/r/storage_notification.html.markdown @@ -25,36 +25,35 @@ for an example of enabling notifications by granting the correct IAM permission. ```hcl resource "google_storage_notification" "notification" { - notification_id = "1" - bucket = "${google_storage_bucket.bucket.name}" - payload_format = "JSON_API_V1" - topic = "${google_pubsub_topic.topic.name}" - event_types = ["OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE"] - custom_attributes = { - new-attribute = "new-attribute-value" - } - depends_on = ["google_pubsub_topic_iam_binding.binding"] + bucket = google_storage_bucket.bucket.name + payload_format = "JSON_API_V1" + topic = google_pubsub_topic.topic.name + event_types = ["OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE"] + custom_attributes = { + new-attribute = "new-attribute-value" + } + depends_on = [google_pubsub_topic_iam_binding.binding] } // Enable notifications by giving the correct IAM permission to the unique service account. -data "google_storage_project_service_account" "gcs_account" {} +data "google_storage_project_service_account" "gcs_account" { +} resource "google_pubsub_topic_iam_binding" "binding" { - topic = "${google_pubsub_topic.topic.name}" - role = "roles/pubsub.publisher" - members = ["serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}"] + topic = google_pubsub_topic.topic.name + role = "roles/pubsub.publisher" + members = ["serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}"] } // End enabling notifications - resource "google_storage_bucket" "bucket" { - name = "default_bucket" + name = "default_bucket" } resource "google_pubsub_topic" "topic" { - name = "default_topic" + name = "default_topic" } ``` diff --git a/website/docs/r/storage_object_access_control.html.markdown b/website/docs/r/storage_object_access_control.html.markdown index 2bfc3e96a7a..735b37ebbad 100644 --- a/website/docs/r/storage_object_access_control.html.markdown +++ b/website/docs/r/storage_object_access_control.html.markdown @@ -53,20 +53,20 @@ To get more information about ObjectAccessControl, see: ```hcl resource "google_storage_object_access_control" "public_rule" { - object = "${google_storage_bucket_object.object.output_name}" - bucket = "${google_storage_bucket.bucket.name}" + object = google_storage_bucket_object.object.output_name + bucket = google_storage_bucket.bucket.name role = "READER" entity = "allUsers" } resource "google_storage_bucket" "bucket" { - name = "static-content-bucket" + name = "static-content-bucket" } - resource "google_storage_bucket_object" "object" { - name = "public-object" - bucket = "${google_storage_bucket.bucket.name}" - source = "../static/img/header-logo.png" +resource "google_storage_bucket_object" "object" { + name = "public-object" + bucket = google_storage_bucket.bucket.name + source = "../static/img/header-logo.png" } ``` diff --git a/website/docs/r/storage_object_acl.html.markdown b/website/docs/r/storage_object_acl.html.markdown index e8eb1d67473..3dedc497a62 100644 --- a/website/docs/r/storage_object_acl.html.markdown +++ b/website/docs/r/storage_object_acl.html.markdown @@ -33,13 +33,13 @@ resource "google_storage_bucket" "image-store" { resource "google_storage_bucket_object" "image" { name = "image1" - bucket = "${google_storage_bucket.image-store.name}" + bucket = google_storage_bucket.image-store.name source = "image1.jpg" } resource "google_storage_object_acl" "image-store-acl" { - bucket = "${google_storage_bucket.image-store.name}" - object = "${google_storage_bucket_object.image.output_name}" + bucket = google_storage_bucket.image-store.name + object = google_storage_bucket_object.image.output_name role_entity = [ "OWNER:user-my.email@gmail.com", diff --git a/website/docs/r/storage_transfer_job.html.markdown b/website/docs/r/storage_transfer_job.html.markdown index be2bc0172f9..8e8c69bcafc 100644 --- a/website/docs/r/storage_transfer_job.html.markdown +++ b/website/docs/r/storage_transfer_job.html.markdown @@ -23,74 +23,69 @@ To get more information about Google Cloud Storage Transfer, see: Example creating a nightly Transfer Job from an AWS S3 Bucket to a GCS bucket. ```hcl - data "google_storage_transfer_project_service_account" "default" { - project = "${var.project}" + project = var.project } resource "google_storage_bucket" "s3-backup-bucket" { name = "${var.aws_s3_bucket}-backup" storage_class = "NEARLINE" - project = "${var.project}" + project = var.project } resource "google_storage_bucket_iam_member" "s3-backup-bucket" { - bucket = "${google_storage_bucket.s3-backup-bucket.name}" - role = "roles/storage.admin" - member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}" - depends_on = [ - "google_storage_bucket.s3-backup-bucket" - ] + bucket = google_storage_bucket.s3-backup-bucket.name + role = "roles/storage.admin" + member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}" + depends_on = [google_storage_bucket.s3-backup-bucket] } resource "google_storage_transfer_job" "s3-bucket-nightly-backup" { - description = "Nightly backup of S3 bucket" - project = "${var.project}" - - transfer_spec { - object_conditions { - max_time_elapsed_since_last_modification = "600s" - exclude_prefixes = [ - "requests.gz" - ] - } - transfer_options { - delete_objects_unique_in_sink = false - } - aws_s3_data_source { - bucket_name = "${var.aws_s3_bucket}" - aws_access_key { - access_key_id = "${var.aws_access_key}" - secret_access_key = "${var.aws_secret_key}" - } - } - gcs_data_sink { - bucket_name = "${google_storage_bucket.s3-backup-bucket.name}" - } - } - - schedule { - schedule_start_date { - year = 2018 - month = 10 - day = 1 - } - schedule_end_date { - year = 2019 - month = 1 - day = 15 - } - start_time_of_day { - hours = 23 - minutes = 30 - seconds = 0 - nanos = 0 - } - } - - depends_on = [ - "google_storage_bucket_iam_member.s3-backup-bucket" - ] + description = "Nightly backup of S3 bucket" + project = var.project + + transfer_spec { + object_conditions { + max_time_elapsed_since_last_modification = "600s" + exclude_prefixes = [ + "requests.gz", + ] + } + transfer_options { + delete_objects_unique_in_sink = false + } + aws_s3_data_source { + bucket_name = var.aws_s3_bucket + aws_access_key { + access_key_id = var.aws_access_key + secret_access_key = var.aws_secret_key + } + } + gcs_data_sink { + bucket_name = google_storage_bucket.s3-backup-bucket.name + } + } + + schedule { + schedule_start_date { + year = 2018 + month = 10 + day = 1 + } + schedule_end_date { + year = 2019 + month = 1 + day = 15 + } + start_time_of_day { + hours = 23 + minutes = 30 + seconds = 0 + nanos = 0 + } + } + + depends_on = [google_storage_bucket_iam_member.s3-backup-bucket] } ``` diff --git a/website/docs/r/tpu_node.html.markdown b/website/docs/r/tpu_node.html.markdown index 4648fb62062..a04987cc50c 100644 --- a/website/docs/r/tpu_node.html.markdown +++ b/website/docs/r/tpu_node.html.markdown @@ -40,15 +40,17 @@ To get more information about Node, see: ```hcl -data "google_tpu_tensorflow_versions" "available" { } + +data "google_tpu_tensorflow_versions" "available" { +} resource "google_tpu_node" "tpu" { - name = "test-tpu" - zone = "us-central1-b" + name = "test-tpu" + zone = "us-central1-b" - accelerator_type = "v3-8" - tensorflow_version = "${data.google_tpu_tensorflow_versions.available.versions[0]}" - cidr_block = "10.2.0.0/29" + accelerator_type = "v3-8" + tensorflow_version = data.google_tpu_tensorflow_versions.available.versions[0] + cidr_block = "10.2.0.0/29" } ```
@@ -60,27 +62,29 @@ resource "google_tpu_node" "tpu" { ```hcl -data "google_tpu_tensorflow_versions" "available" { } +data "google_tpu_tensorflow_versions" "available" { +} + resource "google_tpu_node" "tpu" { - name = "test-tpu" - zone = "us-central1-b" + name = "test-tpu" + zone = "us-central1-b" - accelerator_type = "v3-8" + accelerator_type = "v3-8" - cidr_block = "10.3.0.0/29" - tensorflow_version = "${data.google_tpu_tensorflow_versions.available.versions[0]}" + cidr_block = "10.3.0.0/29" + tensorflow_version = data.google_tpu_tensorflow_versions.available.versions[0] - description = "Terraform Google Provider test TPU" - network = "default" + description = "Terraform Google Provider test TPU" + network = "default" - labels = { - foo = "bar" - } + labels = { + foo = "bar" + } - scheduling_config { - preemptible = true - } + scheduling_config { + preemptible = true + } } ``` @@ -146,7 +150,7 @@ The following arguments are supported: The `scheduling_config` block supports: * `preemptible` - - (Optional) + (Required) Defines whether the TPU instance is preemptible. ## Attributes Reference diff --git a/website/docs/r/usage_export_bucket.html.markdown b/website/docs/r/usage_export_bucket.html.markdown index d7bb95c66d4..6a7935c8fc1 100644 --- a/website/docs/r/usage_export_bucket.html.markdown +++ b/website/docs/r/usage_export_bucket.html.markdown @@ -25,8 +25,8 @@ safe to have multiple resources with the same backing bucket. ```hcl resource "google_project_usage_export_bucket" "usage_export" { - project = "development-project" - bucket_name = "usage-tracking-bucket" + project = "development-project" + bucket_name = "usage-tracking-bucket" } ``` diff --git a/website/google.erb b/website/google.erb index f204c11903d..1c37167f48a 100644 --- a/website/google.erb +++ b/website/google.erb @@ -168,9 +168,6 @@ > google_project_organization_policy - > - google_project_services - > google_service_account @@ -407,9 +404,6 @@ > google_project_service - > - google_project_services - > google_project_usage_export_bucket