diff --git a/.changelog/3892.txt b/.changelog/3892.txt new file mode 100644 index 00000000000..b2ad5c99e63 --- /dev/null +++ b/.changelog/3892.txt @@ -0,0 +1,6 @@ +```release-note:enhancement + +container : added project override support to `google_container_cluster` and `google_container_nodepool` +servicemanagement : added project override support to `google_project_service` + +``` diff --git a/google/resource_container_cluster.go b/google/resource_container_cluster.go index a3383cdd109..7a416411a0b 100644 --- a/google/resource_container_cluster.go +++ b/google/resource_container_cluster.go @@ -1183,7 +1183,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er parent := fmt.Sprintf("projects/%s/locations/%s", project, location) var op *containerBeta.Operation err = retry(func() error { - op, err = config.clientContainerBeta.Projects.Locations.Clusters.Create(parent, req).Do() + clusterCreateCall := config.clientContainerBeta.Projects.Locations.Clusters.Create(parent, req) + if config.UserProjectOverride { + clusterCreateCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterCreateCall.Do() return err }) if err != nil { @@ -1208,7 +1212,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er // leaving default case to ensure this is non blocking } // Try a GET on the cluster so we can see the state in debug logs. This will help classify error states. - _, getErr := config.clientContainerBeta.Projects.Locations.Clusters.Get(containerClusterFullName(project, location, clusterName)).Do() + clusterGetCall := config.clientContainerBeta.Projects.Locations.Clusters.Get(containerClusterFullName(project, location, clusterName)) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + _, getErr := clusterGetCall.Do() if getErr != nil { log.Printf("[WARN] Cluster %s was created in an error state and not found", clusterName) d.SetId("") @@ -1230,7 +1238,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er if d.Get("remove_default_node_pool").(bool) { parent := fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") err = retry(func() error { - op, err = config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Delete(parent).Do() + clusterNodePoolDeleteCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Delete(parent) + if config.UserProjectOverride { + clusterNodePoolDeleteCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterNodePoolDeleteCall.Do() return err }) if err != nil { @@ -1286,7 +1298,12 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro clusterName := d.Get("name").(string) name := containerClusterFullName(project, location, clusterName) - cluster, err := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do() + clusterGetCall := config.clientContainerBeta.Projects.Locations.Clusters.Get(name) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + + cluster, err := clusterGetCall.Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) } @@ -1420,7 +1437,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateFunc := func(req *containerBeta.UpdateClusterRequest, updateDescription string) func() error { return func() error { name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + clusterUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() if err != nil { return err } @@ -1539,7 +1560,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := func() error { log.Println("[DEBUG] updating release_channel") name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + clusterUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() if err != nil { return err } @@ -1567,7 +1592,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := func() error { name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetMaintenancePolicy(name, req).Do() + clusterSetMaintenancePolicyCall := config.clientContainerBeta.Projects.Locations.Clusters.SetMaintenancePolicy(name, req) + if config.UserProjectOverride { + clusterSetMaintenancePolicyCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetMaintenancePolicyCall.Do() if err != nil { return err @@ -1646,7 +1675,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := func() error { log.Println("[DEBUG] updating enable_legacy_abac") name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetLegacyAbac(name, req).Do() + clusterSetLegacyAbacCall := config.clientContainerBeta.Projects.Locations.Clusters.SetLegacyAbac(name, req) + if config.UserProjectOverride { + clusterSetLegacyAbacCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetLegacyAbacCall.Do() if err != nil { return err } @@ -1679,7 +1712,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er DesiredLoggingService: logging, }, } - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + clusterUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() if err != nil { return err } @@ -1707,7 +1744,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := func() error { log.Println("[DEBUG] updating network_policy") name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetNetworkPolicy(name, req).Do() + clusterSetNetworkPolicyCall := config.clientContainerBeta.Projects.Locations.Clusters.SetNetworkPolicy(name, req) + if config.UserProjectOverride { + clusterSetNetworkPolicyCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetNetworkPolicyCall.Do() if err != nil { return err } @@ -1820,7 +1861,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := func() error { name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + clusterUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() if err != nil { return err } @@ -1857,7 +1902,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := func() error { name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetMasterAuth(name, req).Do() + clusterSetMasterAuthCall := config.clientContainerBeta.Projects.Locations.Clusters.SetMasterAuth(name, req) + if config.UserProjectOverride { + clusterSetMasterAuthCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetMasterAuthCall.Do() if err != nil { return err } @@ -1905,7 +1954,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := func() error { name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + clusterUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() if err != nil { return err } @@ -1958,7 +2011,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } updateF := func() error { name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetResourceLabels(name, req).Do() + clusterSetResourceLabelsCall := config.clientContainerBeta.Projects.Locations.Clusters.SetResourceLabels(name, req) + if config.UserProjectOverride { + clusterSetResourceLabelsCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetResourceLabelsCall.Do() if err != nil { return err } @@ -1977,7 +2034,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er if d.HasChange("remove_default_node_pool") && d.Get("remove_default_node_pool").(bool) { name := fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") - op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Delete(name).Do() + clusterNodePoolDeleteCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Delete(name) + if config.UserProjectOverride { + clusterNodePoolDeleteCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterNodePoolDeleteCall.Do() if err != nil { if !isGoogleApiErrorWithCode(err, 404) { return errwrap.Wrapf("Error deleting default node pool: {{err}}", err) @@ -2001,7 +2062,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := func() error { name := containerClusterFullName(project, location, clusterName) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + clusterUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() if err != nil { return err } @@ -2054,7 +2119,11 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er count++ name := containerClusterFullName(project, location, clusterName) - op, err = config.clientContainerBeta.Projects.Locations.Clusters.Delete(name).Do() + clusterDeleteCall := config.clientContainerBeta.Projects.Locations.Clusters.Delete(name) + if config.UserProjectOverride { + clusterDeleteCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterDeleteCall.Do() if err != nil { log.Printf("[WARNING] Cluster is still not ready to delete, retrying %s", clusterName) @@ -2105,7 +2174,11 @@ func cleanFailedContainerCluster(d *schema.ResourceData, meta interface{}) error fullName := containerClusterFullName(project, location, clusterName) log.Printf("[DEBUG] Cleaning up failed GKE cluster %s", d.Get("name").(string)) - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Delete(fullName).Do() + clusterDeleteCall := config.clientContainerBeta.Projects.Locations.Clusters.Delete(fullName) + if config.UserProjectOverride { + clusterDeleteCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterDeleteCall.Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) } @@ -2131,7 +2204,11 @@ var containerClusterRestingStates = RestingStates{ func containerClusterAwaitRestingState(config *Config, project, location, clusterName string, timeout time.Duration) (state string, err error) { err = resource.Retry(timeout, func() *resource.RetryError { name := containerClusterFullName(project, location, clusterName) - cluster, gErr := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do() + clusterGetCall := config.clientContainerBeta.Projects.Locations.Clusters.Get(name) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + cluster, gErr := clusterGetCall.Do() if gErr != nil { return resource.NonRetryableError(gErr) } @@ -2251,7 +2328,11 @@ func expandMaintenancePolicy(d *schema.ResourceData, meta interface{}) *containe location, _ := getLocation(d, config) clusterName := d.Get("name").(string) name := containerClusterFullName(project, location, clusterName) - cluster, _ := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do() + clusterGetCall := config.clientContainerBeta.Projects.Locations.Clusters.Get(name) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + cluster, _ := clusterGetCall.Do() resourceVersion := "" // If the cluster doesn't exist or if there is a read error of any kind, we will pass in an empty // resourceVersion. If there happens to be a change to maintenance policy, we will fail at that diff --git a/google/resource_container_cluster_test.go b/google/resource_container_cluster_test.go index 65e50d72b5c..5718479714c 100644 --- a/google/resource_container_cluster_test.go +++ b/google/resource_container_cluster_test.go @@ -2784,6 +2784,9 @@ resource "google_container_cluster" "with_ip_allocation_policy" { func testAccContainerCluster_withResourceUsageExportConfig(clusterName, datasetId, enableMetering string) string { return fmt.Sprintf(` +provider "google" { + user_project_override = true +} resource "google_bigquery_dataset" "default" { dataset_id = "%s" description = "gke resource usage dataset tests" diff --git a/google/resource_container_node_pool.go b/google/resource_container_node_pool.go index ecc726a7781..e4b7f3e919c 100644 --- a/google/resource_container_node_pool.go +++ b/google/resource_container_node_pool.go @@ -296,8 +296,11 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e var operation *containerBeta.Operation err = resource.Retry(timeout, func() *resource.RetryError { - operation, err = config.clientContainerBeta. - Projects.Locations.Clusters.NodePools.Create(nodePoolInfo.parent(), req).Do() + clusterNodePoolsCreateCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Create(nodePoolInfo.parent(), req) + if config.UserProjectOverride { + clusterNodePoolsCreateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + operation, err = clusterNodePoolsCreateCall.Do() if err != nil { if isFailedPreconditionError(err) { @@ -330,7 +333,7 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e return err } - state, err := containerNodePoolAwaitRestingState(config, d.Id(), d.Timeout(schema.TimeoutCreate)) + state, err := containerNodePoolAwaitRestingState(config, d.Id(), nodePoolInfo.project, d.Timeout(schema.TimeoutCreate)) if err != nil { return err } @@ -351,7 +354,11 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err name := getNodePoolName(d.Id()) - nodePool, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)).Do() + clusterNodePoolsGetCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)) + if config.UserProjectOverride { + clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + nodePool, err := clusterNodePoolsGetCall.Do() if err != nil { return handleNotFoundError(err, d, fmt.Sprintf("NodePool %q from cluster %q", name, nodePoolInfo.cluster)) } @@ -373,14 +380,13 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - nodePoolInfo, err := extractNodePoolInformation(d, config) if err != nil { return err } name := getNodePoolName(d.Id()) - _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), d.Timeout(schema.TimeoutUpdate)) + _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, d.Timeout(schema.TimeoutUpdate)) if err != nil { return err } @@ -391,7 +397,7 @@ func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) e } d.Partial(false) - _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), d.Timeout(schema.TimeoutUpdate)) + _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, d.Timeout(schema.TimeoutUpdate)) if err != nil { return err } @@ -401,7 +407,6 @@ func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) e func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - nodePoolInfo, err := extractNodePoolInformation(d, config) if err != nil { return err @@ -409,7 +414,7 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e name := getNodePoolName(d.Id()) - _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), d.Timeout(schema.TimeoutDelete)) + _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, d.Timeout(schema.TimeoutDelete)) if err != nil { return err } @@ -422,8 +427,11 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e var operation *containerBeta.Operation err = resource.Retry(timeout, func() *resource.RetryError { - operation, err = config.clientContainerBeta. - Projects.Locations.Clusters.NodePools.Delete(nodePoolInfo.fullyQualifiedName(name)).Do() + clusterNodePoolsDeleteCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Delete(nodePoolInfo.fullyQualifiedName(name)) + if config.UserProjectOverride { + clusterNodePoolsDeleteCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + operation, err = clusterNodePoolsDeleteCall.Do() if err != nil { if isFailedPreconditionError(err) { @@ -458,15 +466,18 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { config := meta.(*Config) - nodePoolInfo, err := extractNodePoolInformation(d, config) if err != nil { return false, err } name := getNodePoolName(d.Id()) + clusterNodePoolsGetCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)) + if config.UserProjectOverride { + clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + _, err = clusterNodePoolsGetCall.Do() - _, err = config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)).Do() if err != nil { if err = handleNotFoundError(err, d, fmt.Sprintf("Container NodePool %s", name)); err == nil { return false, nil @@ -490,7 +501,12 @@ func resourceContainerNodePoolStateImporter(d *schema.ResourceData, meta interfa d.SetId(id) - if _, err := containerNodePoolAwaitRestingState(config, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + if _, err := containerNodePoolAwaitRestingState(config, d.Id(), project, d.Timeout(schema.TimeoutCreate)); err != nil { return nil, err } @@ -657,7 +673,6 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *containerBeta.N func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *NodePoolInformation, prefix string, timeout time.Duration) error { config := meta.(*Config) - name := d.Get(prefix + "name").(string) lockKey := nodePoolInfo.lockKey() @@ -685,7 +700,11 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } updateF := func() error { - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req).Do() + clusterUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterUpdateCall.Do() if err != nil { return err } @@ -719,7 +738,11 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } updateF := func() error { - op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req).Do() + clusterUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterUpdateCall.Do() if err != nil { return err } @@ -750,7 +773,11 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node NodeCount: newSize, } updateF := func() error { - op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.SetSize(nodePoolInfo.fullyQualifiedName(name), req).Do() + clusterNodePoolsSetSizeCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.SetSize(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsSetSizeCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsSetSizeCall.Do() if err != nil { return err @@ -788,8 +815,11 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } updateF := func() error { - op, err := config.clientContainerBeta.Projects.Locations. - Clusters.NodePools.SetManagement(nodePoolInfo.fullyQualifiedName(name), req).Do() + clusterNodePoolsSetManagementCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.SetManagement(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsSetManagementCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsSetManagementCall.Do() if err != nil { return err @@ -819,8 +849,11 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node NodeVersion: d.Get(prefix + "version").(string), } updateF := func() error { - op, err := config.clientContainerBeta.Projects. - Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req).Do() + clusterNodePoolsUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() if err != nil { return err @@ -849,7 +882,11 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node Locations: convertStringSet(d.Get(prefix + "node_locations").(*schema.Set)), } updateF := func() error { - op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req).Do() + clusterNodePoolsUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() if err != nil { return err @@ -882,7 +919,11 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node UpgradeSettings: upgradeSettings, } updateF := func() error { - op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req).Do() + clusterNodePoolsUpdateCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() if err != nil { return err @@ -919,11 +960,16 @@ var containerNodePoolRestingStates = RestingStates{ "ERROR": ErrorState, } -// takes in a config object, full node pool name, and the current CRUD action timeout +// takes in a config object, full node pool name, project name and the current CRUD action timeout // returns a state with no error if the state is a resting state, and the last state with an error otherwise -func containerNodePoolAwaitRestingState(config *Config, name string, timeout time.Duration) (state string, err error) { +func containerNodePoolAwaitRestingState(config *Config, name string, project string, timeout time.Duration) (state string, err error) { + err = resource.Retry(timeout, func() *resource.RetryError { - nodePool, gErr := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Get(name).Do() + clusterNodePoolsGetCall := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Get(name) + if config.UserProjectOverride { + clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", project) + } + nodePool, gErr := clusterNodePoolsGetCall.Do() if gErr != nil { return resource.NonRetryableError(gErr) } diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go index d6b42629f0b..f3e2eb87209 100644 --- a/google/resource_container_node_pool_test.go +++ b/google/resource_container_node_pool_test.go @@ -664,6 +664,9 @@ func testAccCheckContainerNodePoolDestroyProducer(t *testing.T) func(s *terrafor func testAccContainerNodePool_basic(cluster, np string) string { return fmt.Sprintf(` +provider "google" { + user_project_override = true +} resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" diff --git a/google/resource_google_project_service.go b/google/resource_google_project_service.go index e8d0cbbe293..4bc861078c8 100644 --- a/google/resource_google_project_service.go +++ b/google/resource_google_project_service.go @@ -157,7 +157,12 @@ func resourceGoogleProjectServiceRead(d *schema.ResourceData, meta interface{}) } // Verify project for services still exists - p, err := config.clientResourceManager.Projects.Get(project).Do() + projectGetCall := config.clientResourceManager.Projects.Get(project) + if config.UserProjectOverride { + projectGetCall.Header().Add("X-Goog-User-Project", project) + } + p, err := projectGetCall.Do() + if err == nil && p.LifecycleState == "DELETE_REQUESTED" { // Construct a 404 error for handleNotFoundError err = &googleapi.Error{ @@ -222,9 +227,13 @@ func resourceGoogleProjectServiceUpdate(d *schema.ResourceData, meta interface{} func disableServiceUsageProjectService(service, project string, d *schema.ResourceData, config *Config, disableDependentServices bool) error { err := retryTimeDuration(func() error { name := fmt.Sprintf("projects/%s/services/%s", project, service) - sop, err := config.clientServiceUsage.Services.Disable(name, &serviceusage.DisableServiceRequest{ + servicesDisableCall := config.clientServiceUsage.Services.Disable(name, &serviceusage.DisableServiceRequest{ DisableDependentServices: disableDependentServices, - }).Do() + }) + if config.UserProjectOverride { + servicesDisableCall.Header().Add("X-Goog-User-Project", project) + } + sop, err := servicesDisableCall.Do() if err != nil { return err } diff --git a/google/resource_google_project_service_test.go b/google/resource_google_project_service_test.go index 958502dcc94..dd21670c8b7 100644 --- a/google/resource_google_project_service_test.go +++ b/google/resource_google_project_service_test.go @@ -187,6 +187,9 @@ func testAccCheckProjectService(t *testing.T, services []string, pid string, exp func testAccProjectService_basic(services []string, pid, name, org string) string { return fmt.Sprintf(` +provider "google" { + user_project_override = true +} resource "google_project" "acceptance" { project_id = "%s" name = "%s"