diff --git a/.changelog/11039.txt b/.changelog/11039.txt new file mode 100644 index 00000000000..3681886db84 --- /dev/null +++ b/.changelog/11039.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +`google_vertex_ai_index_endpoint_deployed_index` +``` \ No newline at end of file diff --git a/google/provider/provider_mmv1_resources.go b/google/provider/provider_mmv1_resources.go index 29dfe448b9b..72920f7c192 100644 --- a/google/provider/provider_mmv1_resources.go +++ b/google/provider/provider_mmv1_resources.go @@ -427,9 +427,9 @@ var handwrittenIAMDatasources = map[string]*schema.Resource{ } // Resources -// Generated resources: 448 +// Generated resources: 449 // Generated IAM resources: 255 -// Total generated resources: 703 +// Total generated resources: 704 var generatedResources = map[string]*schema.Resource{ "google_folder_access_approval_settings": accessapproval.ResourceAccessApprovalFolderSettings(), "google_organization_access_approval_settings": accessapproval.ResourceAccessApprovalOrganizationSettings(), @@ -1101,6 +1101,7 @@ var generatedResources = map[string]*schema.Resource{ "google_vertex_ai_featurestore_entitytype_feature": vertexai.ResourceVertexAIFeaturestoreEntitytypeFeature(), "google_vertex_ai_index": vertexai.ResourceVertexAIIndex(), "google_vertex_ai_index_endpoint": vertexai.ResourceVertexAIIndexEndpoint(), + "google_vertex_ai_index_endpoint_deployed_index": vertexai.ResourceVertexAIIndexEndpointDeployedIndex(), "google_vertex_ai_tensorboard": vertexai.ResourceVertexAITensorboard(), "google_vmwareengine_cluster": vmwareengine.ResourceVmwareengineCluster(), "google_vmwareengine_external_access_rule": vmwareengine.ResourceVmwareengineExternalAccessRule(), diff --git a/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index.go b/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index.go new file mode 100644 index 00000000000..18a93fe8862 --- /dev/null +++ b/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index.go @@ -0,0 +1,1163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceVertexAIIndexEndpointDeployedIndex() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAIIndexEndpointDeployedIndexCreate, + Read: resourceVertexAIIndexEndpointDeployedIndexRead, + Update: resourceVertexAIIndexEndpointDeployedIndexUpdate, + Delete: resourceVertexAIIndexEndpointDeployedIndexDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVertexAIIndexEndpointDeployedIndexImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(45 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "deployed_index_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in.`, + }, + "index": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `The name of the Index this is the deployment of.`, + }, + "index_endpoint": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the index endpoint. Must be in the format +'projects/{{project}}/locations/{{region}}/indexEndpoints/{{indexEndpoint}}'`, + }, + "automatic_resources": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_replica_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + +The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.`, + }, + "min_replica_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The minimum number of replicas this DeployedModel will be always deployed on. If minReplicaCount is not set, the default value is 2 (we don't provide SLA when minReplicaCount=1). + +If traffic against it increases, it may dynamically be deployed onto more replicas up to [maxReplicaCount](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/AutomaticResources#FIELDS.max_replica_count), and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.`, + }, + }, + }, + }, + "dedicated_resources": { + Type: schema.TypeList, + Optional: true, + Description: `A description of resources that are dedicated to the DeployedIndex, and that need a higher degree of manual configuration. The field minReplicaCount must be set to a value strictly greater than 0, or else validation will fail. We don't provide SLA when minReplicaCount=1. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + +Available machine types for SMALL shard: e2-standard-2 and all machine types available for MEDIUM and LARGE shard. + +Available machine types for MEDIUM shard: e2-standard-16 and all machine types available for LARGE shard. + +Available machine types for LARGE shard: e2-highmem-16, n2d-standard-32. + +n1-standard-16 and n1-standard-32 are still available, but we recommend e2-standard-16 and e2-highmem-16 for cost efficiency.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The minimum number of replicas this DeployedModel will be always deployed on.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The type of the machine. + +See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) + +See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). + +For [DeployedModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints#DeployedModel) this field is optional, and the default value is n1-standard-2. For [BatchPredictionJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchPredictionJob) or as part of [WorkerPoolSpec](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#WorkerPoolSpec) this field is required.`, + }, + }, + }, + }, + "min_replica_count": { + Type: schema.TypeInt, + Required: true, + Description: `The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1.`, + }, + "max_replica_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount`, + }, + }, + }, + }, + "deployed_index_auth_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If set, the authentication is enabled for the private endpoint.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auth_provider": { + Type: schema.TypeList, + Optional: true, + Description: `Defines the authentication provider that the DeployedIndex uses.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_issuers": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: service-account-name@project-id.iam.gserviceaccount.com`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "audiences": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of JWT audiences. that are allowed to access. A JWT containing any of these audiences will be accepted.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "deployment_group": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. +Creating deployment_groups with reserved_ip_ranges is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. [See the official documentation here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#DeployedIndex.FIELDS.deployment_group). +Note: we only support up to 5 deployment groups (not including 'default').`, + Default: "default", + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters.`, + }, + "enable_access_logging": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, private endpoint's access logs are sent to Cloud Logging.`, + Default: false, + }, + "reserved_ip_ranges": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. +If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. + +The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: ['vertex-ai-ip-range']. + +For more information about subnets and network IP ranges, please see https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "index_sync_time": { + Type: schema.TypeString, + Computed: true, + Description: `The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the [Index.update_time](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexes#Index.FIELDS.update_time) of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must [list](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.operations/list#google.longrunning.Operations.ListOperations) the operations that are running on the original Index. Only the successfully completed Operations with updateTime equal or before this sync time are contained in this DeployedIndex. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the DeployedIndex resource.`, + }, + "private_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if [network](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#IndexEndpoint.FIELDS.network) is configured.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "match_grpc_address": { + Type: schema.TypeString, + Computed: true, + Description: `The ip address used to send match gRPC requests.`, + }, + "psc_automated_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `PscAutomatedEndpoints is populated if private service connect is enabled if PscAutomatedConfig is set.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "match_address": { + Type: schema.TypeString, + Computed: true, + Description: `ip Address created by the automated forwarding rule.`, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Description: `Corresponding network in pscAutomationConfigs.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Description: `Corresponding projectId in pscAutomationConfigs`, + }, + }, + }, + }, + "service_attachment": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the service attachment resource. Populated if private service connect is enabled.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAIIndexEndpointDeployedIndexCreate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + deployedIndexIdProp, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexId(d.Get("deployed_index_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployed_index_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(deployedIndexIdProp)) && (ok || !reflect.DeepEqual(v, deployedIndexIdProp)) { + obj["deployedIndexId"] = deployedIndexIdProp + } + indexProp, err := expandVertexAIIndexEndpointDeployedIndexIndex(d.Get("index"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("index"); !tpgresource.IsEmptyValue(reflect.ValueOf(indexProp)) && (ok || !reflect.DeepEqual(v, indexProp)) { + obj["index"] = indexProp + } + displayNameProp, err := expandVertexAIIndexEndpointDeployedIndexDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + automaticResourcesProp, err := expandVertexAIIndexEndpointDeployedIndexAutomaticResources(d.Get("automatic_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automatic_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(automaticResourcesProp)) && (ok || !reflect.DeepEqual(v, automaticResourcesProp)) { + obj["automaticResources"] = automaticResourcesProp + } + dedicatedResourcesProp, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResources(d.Get("dedicated_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dedicated_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(dedicatedResourcesProp)) && (ok || !reflect.DeepEqual(v, dedicatedResourcesProp)) { + obj["dedicatedResources"] = dedicatedResourcesProp + } + enableAccessLoggingProp, err := expandVertexAIIndexEndpointDeployedIndexEnableAccessLogging(d.Get("enable_access_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_access_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableAccessLoggingProp)) && (ok || !reflect.DeepEqual(v, enableAccessLoggingProp)) { + obj["enableAccessLogging"] = enableAccessLoggingProp + } + deployedIndexAuthConfigProp, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(d.Get("deployed_index_auth_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployed_index_auth_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(deployedIndexAuthConfigProp)) && (ok || !reflect.DeepEqual(v, deployedIndexAuthConfigProp)) { + obj["deployedIndexAuthConfig"] = deployedIndexAuthConfigProp + } + reservedIpRangesProp, err := expandVertexAIIndexEndpointDeployedIndexReservedIpRanges(d.Get("reserved_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_ip_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(reservedIpRangesProp)) && (ok || !reflect.DeepEqual(v, reservedIpRangesProp)) { + obj["reservedIpRanges"] = reservedIpRangesProp + } + deploymentGroupProp, err := expandVertexAIIndexEndpointDeployedIndexDeploymentGroup(d.Get("deployment_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployment_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(deploymentGroupProp)) && (ok || !reflect.DeepEqual(v, deploymentGroupProp)) { + obj["deploymentGroup"] = deploymentGroupProp + } + + obj, err = resourceVertexAIIndexEndpointDeployedIndexEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{index_endpoint}}:deployIndex") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new IndexEndpointDeployedIndex: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating IndexEndpointDeployedIndex: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{index_endpoint}}/deployedIndex/{{deployed_index_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating IndexEndpointDeployedIndex", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create IndexEndpointDeployedIndex: %s", err) + } + + opRes, err = resourceVertexAIIndexEndpointDeployedIndexDecoder(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error decoding response from operation: %s", err) + } + if opRes == nil { + return fmt.Errorf("Error decoding response from operation, could not find object") + } + + if err := d.Set("name", flattenVertexAIIndexEndpointDeployedIndexName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{index_endpoint}}/deployedIndex/{{deployed_index_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating IndexEndpointDeployedIndex %q: %#v", d.Id(), res) + + return resourceVertexAIIndexEndpointDeployedIndexRead(d, meta) +} + +func resourceVertexAIIndexEndpointDeployedIndexRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{index_endpoint}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIIndexEndpointDeployedIndex %q", d.Id())) + } + + res, err = resourceVertexAIIndexEndpointDeployedIndexDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing VertexAIIndexEndpointDeployedIndex because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenVertexAIIndexEndpointDeployedIndexName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("deployed_index_id", flattenVertexAIIndexEndpointDeployedIndexDeployedIndexId(res["deployedIndexId"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("index", flattenVertexAIIndexEndpointDeployedIndexIndex(res["index"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("display_name", flattenVertexAIIndexEndpointDeployedIndexDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("create_time", flattenVertexAIIndexEndpointDeployedIndexCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("private_endpoints", flattenVertexAIIndexEndpointDeployedIndexPrivateEndpoints(res["privateEndpoints"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("index_sync_time", flattenVertexAIIndexEndpointDeployedIndexIndexSyncTime(res["indexSyncTime"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("automatic_resources", flattenVertexAIIndexEndpointDeployedIndexAutomaticResources(res["automaticResources"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("dedicated_resources", flattenVertexAIIndexEndpointDeployedIndexDedicatedResources(res["dedicatedResources"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("enable_access_logging", flattenVertexAIIndexEndpointDeployedIndexEnableAccessLogging(res["enableAccessLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("deployed_index_auth_config", flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(res["deployedIndexAuthConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("reserved_ip_ranges", flattenVertexAIIndexEndpointDeployedIndexReservedIpRanges(res["reservedIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + if err := d.Set("deployment_group", flattenVertexAIIndexEndpointDeployedIndexDeploymentGroup(res["deploymentGroup"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpointDeployedIndex: %s", err) + } + + return nil +} + +func resourceVertexAIIndexEndpointDeployedIndexUpdate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + deployedIndexIdProp, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexId(d.Get("deployed_index_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployed_index_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deployedIndexIdProp)) { + obj["deployedIndexId"] = deployedIndexIdProp + } + indexProp, err := expandVertexAIIndexEndpointDeployedIndexIndex(d.Get("index"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("index"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, indexProp)) { + obj["index"] = indexProp + } + displayNameProp, err := expandVertexAIIndexEndpointDeployedIndexDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + automaticResourcesProp, err := expandVertexAIIndexEndpointDeployedIndexAutomaticResources(d.Get("automatic_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automatic_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automaticResourcesProp)) { + obj["automaticResources"] = automaticResourcesProp + } + dedicatedResourcesProp, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResources(d.Get("dedicated_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dedicated_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dedicatedResourcesProp)) { + obj["dedicatedResources"] = dedicatedResourcesProp + } + enableAccessLoggingProp, err := expandVertexAIIndexEndpointDeployedIndexEnableAccessLogging(d.Get("enable_access_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_access_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableAccessLoggingProp)) { + obj["enableAccessLogging"] = enableAccessLoggingProp + } + deployedIndexAuthConfigProp, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(d.Get("deployed_index_auth_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployed_index_auth_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deployedIndexAuthConfigProp)) { + obj["deployedIndexAuthConfig"] = deployedIndexAuthConfigProp + } + reservedIpRangesProp, err := expandVertexAIIndexEndpointDeployedIndexReservedIpRanges(d.Get("reserved_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_ip_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, reservedIpRangesProp)) { + obj["reservedIpRanges"] = reservedIpRangesProp + } + deploymentGroupProp, err := expandVertexAIIndexEndpointDeployedIndexDeploymentGroup(d.Get("deployment_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployment_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deploymentGroupProp)) { + obj["deploymentGroup"] = deploymentGroupProp + } + + obj, err = resourceVertexAIIndexEndpointDeployedIndexUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{index_endpoint}}:mutateDeployedIndex") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating IndexEndpointDeployedIndex %q: %#v", d.Id(), obj) + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating IndexEndpointDeployedIndex %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating IndexEndpointDeployedIndex %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating IndexEndpointDeployedIndex", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceVertexAIIndexEndpointDeployedIndexRead(d, meta) +} + +func resourceVertexAIIndexEndpointDeployedIndexDelete(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{index_endpoint}}:undeployIndex") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + obj = map[string]interface{}{ + "deployedIndexId": d.Get("deployed_index_id"), + } + + log.Printf("[DEBUG] Deleting IndexEndpointDeployedIndex %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "IndexEndpointDeployedIndex") + } + + err = VertexAIOperationWaitTime( + config, res, project, "Deleting IndexEndpointDeployedIndex", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting IndexEndpointDeployedIndex %q: %#v", d.Id(), res) + return nil +} + +func resourceVertexAIIndexEndpointDeployedIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/deployedIndex/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{index_endpoint}}/deployedIndex/{{deployed_index_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenVertexAIIndexEndpointDeployedIndexName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexIndex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["match_grpc_address"] = + flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsMatchGrpcAddress(original["matchGrpcAddress"], d, config) + transformed["service_attachment"] = + flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsServiceAttachment(original["serviceAttachment"], d, config) + transformed["psc_automated_endpoints"] = + flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpoints(original["pscAutomatedEndpoints"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsMatchGrpcAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsServiceAttachment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "project_id": flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsProjectId(original["projectId"], d, config), + "network": flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsNetwork(original["network"], d, config), + "match_address": flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsMatchAddress(original["matchAddress"], d, config), + }) + } + return transformed +} +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexPrivateEndpointsPscAutomatedEndpointsMatchAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexIndexSyncTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexAutomaticResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_replica_count"] = + flattenVertexAIIndexEndpointDeployedIndexAutomaticResourcesMinReplicaCount(original["minReplicaCount"], d, config) + transformed["max_replica_count"] = + flattenVertexAIIndexEndpointDeployedIndexAutomaticResourcesMaxReplicaCount(original["maxReplicaCount"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexAutomaticResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexEndpointDeployedIndexAutomaticResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["machine_spec"] = + flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpec(original["machineSpec"], d, config) + transformed["min_replica_count"] = + flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMinReplicaCount(original["minReplicaCount"], d, config) + transformed["max_replica_count"] = + flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMaxReplicaCount(original["maxReplicaCount"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["machine_type"] = + flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpecMachineType(original["machineType"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpecMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexEndpointDeployedIndexDedicatedResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexEndpointDeployedIndexEnableAccessLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["auth_provider"] = + flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider(original["authProvider"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["audiences"] = + flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAudiences(original["audiences"], d, config) + transformed["allowed_issuers"] = + flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAllowedIssuers(original["allowedIssuers"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAudiences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAllowedIssuers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexReservedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDeployedIndexDeploymentGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexIndex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexAutomaticResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinReplicaCount, err := expandVertexAIIndexEndpointDeployedIndexAutomaticResourcesMinReplicaCount(original["min_replica_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinReplicaCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minReplicaCount"] = transformedMinReplicaCount + } + + transformedMaxReplicaCount, err := expandVertexAIIndexEndpointDeployedIndexAutomaticResourcesMaxReplicaCount(original["max_replica_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxReplicaCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxReplicaCount"] = transformedMaxReplicaCount + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexAutomaticResourcesMinReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexAutomaticResourcesMaxReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMachineSpec, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpec(original["machine_spec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineSpec"] = transformedMachineSpec + } + + transformedMinReplicaCount, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMinReplicaCount(original["min_replica_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinReplicaCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minReplicaCount"] = transformedMinReplicaCount + } + + transformedMaxReplicaCount, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMaxReplicaCount(original["max_replica_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxReplicaCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxReplicaCount"] = transformedMaxReplicaCount + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMachineType, err := expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpecMachineType(original["machine_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineType"] = transformedMachineType + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMachineSpecMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMinReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDedicatedResourcesMaxReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexEnableAccessLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAuthProvider, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider(original["auth_provider"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuthProvider); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["authProvider"] = transformedAuthProvider + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProvider(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAudiences, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAudiences(original["audiences"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAudiences); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["audiences"] = transformedAudiences + } + + transformedAllowedIssuers, err := expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAllowedIssuers(original["allowed_issuers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedIssuers); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedIssuers"] = transformedAllowedIssuers + } + + return transformed, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAudiences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeployedIndexAuthConfigAuthProviderAllowedIssuers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexReservedIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDeployedIndexDeploymentGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceVertexAIIndexEndpointDeployedIndexEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + req := make(map[string]interface{}) + obj["id"] = d.Get("deployed_index_id") + delete(obj, "deployedIndexId") + delete(obj, "name") + delete(obj, "indexEndpoint") + req["deployedIndex"] = obj + return req, nil +} + +func resourceVertexAIIndexEndpointDeployedIndexUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + obj["id"] = obj["deployedIndexId"] + delete(obj, "deployedIndexId") + return obj, nil +} + +func resourceVertexAIIndexEndpointDeployedIndexDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + v, ok := res["deployedIndexes"] + if !ok || v == nil { // CREATE + res["name"] = res["deployedIndexId"] + delete(res, "deployedIndexId") + return res, nil + } + dpIndex := make(map[string]interface{}) + for _, v := range v.([]interface{}) { + dpI := v.(map[string]interface{}) + if dpI["id"] == d.Get("deployed_index_id").(string) { + dpI["indexEndpoint"] = d.Get("index_endpoint") + dpI["deployedIndexId"] = d.Get("deployed_index_id") + dpIndex = dpI + break + } + } + if dpIndex == nil { + return nil, fmt.Errorf("Error: Deployment Index not Found") + } + return dpIndex, nil +} diff --git a/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index_generated_test.go b/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index_generated_test.go new file mode 100644 index 00000000000..98eb31c1ae9 --- /dev/null +++ b/google/services/vertexai/resource_vertex_ai_index_endpoint_deployed_index_generated_test.go @@ -0,0 +1,487 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccVertexAIIndexEndpointDeployedIndex_vertexAiIndexEndpointDeployedIndexBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "address_name": acctest.BootstrapSharedTestGlobalAddress(t, "vpc-network-1", acctest.AddressWithPrefixLength(8)), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-network-1"), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVertexAIIndexEndpointDeployedIndexDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIIndexEndpointDeployedIndex_vertexAiIndexEndpointDeployedIndexBasicExample(context), + }, + { + ResourceName: "google_vertex_ai_index_endpoint_deployed_index.basic_deployed_index", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"index_endpoint"}, + }, + }, + }) +} + +func testAccVertexAIIndexEndpointDeployedIndex_vertexAiIndexEndpointDeployedIndexBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "sa" { + account_id = "tf-test-vertex-sa%{random_suffix}" +} + +resource "google_vertex_ai_index_endpoint_deployed_index" "basic_deployed_index" { + depends_on = [ google_vertex_ai_index_endpoint.vertex_index_endpoint_deployed, google_service_account.sa ] + index_endpoint = google_vertex_ai_index_endpoint.vertex_index_endpoint_deployed.id + index = google_vertex_ai_index.index.id // this is the index that will be deployed onto an endpoint + deployed_index_id = "tf_test_deployed_index_id%{random_suffix}" + reserved_ip_ranges = ["%{address_name}"] + enable_access_logging = false + display_name = "tf-test-vertex-deployed-index%{random_suffix}" + deployed_index_auth_config{ + auth_provider{ + audiences = ["123456-my-app"] + allowed_issuers = ["${google_service_account.sa.email}"] + } + } +} + +resource "google_storage_bucket" "bucket" { + name = "tf-test-bucket-name%{random_suffix}" + location = "us-central1" + uniform_bucket_level_access = true +} + +# The sample data comes from the following link: +# https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#specify-namespaces-tokens +resource "google_storage_bucket_object" "data" { + name = "contents/data.json" + bucket = google_storage_bucket.bucket.name + content = < + + Open in Cloud Shell + + +## Example Usage - Vertex Ai Index Endpoint Deployed Index Basic + + +```hcl +resource "google_service_account" "sa" { + account_id = "vertex-sa" +} + +resource "google_vertex_ai_index_endpoint_deployed_index" "basic_deployed_index" { + depends_on = [ google_vertex_ai_index_endpoint.vertex_index_endpoint_deployed, google_service_account.sa ] + index_endpoint = google_vertex_ai_index_endpoint.vertex_index_endpoint_deployed.id + index = google_vertex_ai_index.index.id // this is the index that will be deployed onto an endpoint + deployed_index_id = "deployed_index_id" + reserved_ip_ranges = ["vertex-ai-range"] + enable_access_logging = false + display_name = "vertex-deployed-index" + deployed_index_auth_config{ + auth_provider{ + audiences = ["123456-my-app"] + allowed_issuers = ["${google_service_account.sa.email}"] + } + } +} + +resource "google_storage_bucket" "bucket" { + name = "bucket-name" + location = "us-central1" + uniform_bucket_level_access = true +} + +# The sample data comes from the following link: +# https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#specify-namespaces-tokens +resource "google_storage_bucket_object" "data" { + name = "contents/data.json" + bucket = google_storage_bucket.bucket.name + content = < + + Open in Cloud Shell + + +## Example Usage - Vertex Ai Index Endpoint Deployed Index Basic Two + + +```hcl +resource "google_service_account" "sa" { + account_id = "vertex-sa" +} + +resource "google_vertex_ai_index_endpoint_deployed_index" "basic_deployed_index" { + depends_on = [ google_vertex_ai_index_endpoint.vertex_index_endpoint_deployed, google_service_account.sa ] + index_endpoint = google_vertex_ai_index_endpoint.vertex_index_endpoint_deployed.id + index = google_vertex_ai_index.index.id // this is the index that will be deployed onto an endpoint + deployed_index_id = "deployed_index_id" + reserved_ip_ranges = ["vertex-ai-range"] + enable_access_logging = false + display_name = "vertex-deployed-index" + deployed_index_auth_config{ + auth_provider{ + audiences = ["123456-my-app"] + allowed_issuers = ["${google_service_account.sa.email}"] + } + } + automatic_resources{ + max_replica_count = 4 + } +} + +resource "google_storage_bucket" "bucket" { + name = "bucket-name" + location = "us-central1" + uniform_bucket_level_access = true +} + +# The sample data comes from the following link: +# https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#specify-namespaces-tokens +resource "google_storage_bucket_object" "data" { + name = "contents/data.json" + bucket = google_storage_bucket.bucket.name + content = <The `automatic_resources` block supports: + +* `min_replica_count` - + (Optional) + The minimum number of replicas this DeployedModel will be always deployed on. If minReplicaCount is not set, the default value is 2 (we don't provide SLA when minReplicaCount=1). + If traffic against it increases, it may dynamically be deployed onto more replicas up to [maxReplicaCount](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/AutomaticResources#FIELDS.max_replica_count), and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + +* `max_replica_count` - + (Optional) + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + +The `dedicated_resources` block supports: + +* `machine_spec` - + (Required) + The minimum number of replicas this DeployedModel will be always deployed on. + Structure is [documented below](#nested_machine_spec). + +* `min_replica_count` - + (Required) + The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. + +* `max_replica_count` - + (Optional) + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount + + +The `machine_spec` block supports: + +* `machine_type` - + (Optional) + The type of the machine. + See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) + See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). + For [DeployedModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints#DeployedModel) this field is optional, and the default value is n1-standard-2. For [BatchPredictionJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchPredictionJob) or as part of [WorkerPoolSpec](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#WorkerPoolSpec) this field is required. + +The `deployed_index_auth_config` block supports: + +* `auth_provider` - + (Optional) + Defines the authentication provider that the DeployedIndex uses. + Structure is [documented below](#nested_auth_provider). + + +The `auth_provider` block supports: + +* `audiences` - + (Optional) + The list of JWT audiences. that are allowed to access. A JWT containing any of these audiences will be accepted. + +* `allowed_issuers` - + (Optional) + A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: service-account-name@project-id.iam.gserviceaccount.com + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `{{index_endpoint}}/deployedIndex/{{deployed_index_id}}` + +* `name` - + The name of the DeployedIndex resource. + +* `create_time` - + The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + +* `private_endpoints` - + Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if [network](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#IndexEndpoint.FIELDS.network) is configured. + Structure is [documented below](#nested_private_endpoints). + +* `index_sync_time` - + The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the [Index.update_time](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexes#Index.FIELDS.update_time) of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must [list](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.operations/list#google.longrunning.Operations.ListOperations) the operations that are running on the original Index. Only the successfully completed Operations with updateTime equal or before this sync time are contained in this DeployedIndex. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + + +The `private_endpoints` block contains: + +* `match_grpc_address` - + (Output) + The ip address used to send match gRPC requests. + +* `service_attachment` - + (Output) + The name of the service attachment resource. Populated if private service connect is enabled. + +* `psc_automated_endpoints` - + (Output) + PscAutomatedEndpoints is populated if private service connect is enabled if PscAutomatedConfig is set. + Structure is [documented below](#nested_psc_automated_endpoints). + + +The `psc_automated_endpoints` block contains: + +* `project_id` - + (Output) + Corresponding projectId in pscAutomationConfigs + +* `network` - + (Output) + Corresponding network in pscAutomationConfigs. + +* `match_address` - + (Output) + ip Address created by the automated forwarding rule. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 45 minutes. +- `update` - Default is 45 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +IndexEndpointDeployedIndex can be imported using any of these accepted formats: + +* `projects/{{project}}/locations/{{region}}/indexEndpoints/{{index_endpoint}}/deployedIndex/{{deployed_index_id}}` +* `{{project}}/{{region}}/{{index_endpoint}}/{{deployed_index_id}}` +* `{{region}}/{{index_endpoint}}/{{deployed_index_id}}` +* `{{index_endpoint}}/{{deployed_index_id}}` + + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IndexEndpointDeployedIndex using one of the formats above. For example: + +```tf +import { + id = "projects/{{project}}/locations/{{region}}/indexEndpoints/{{index_endpoint}}/deployedIndex/{{deployed_index_id}}" + to = google_vertex_ai_index_endpoint_deployed_index.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), IndexEndpointDeployedIndex can be imported using one of the formats above. For example: + +``` +$ terraform import google_vertex_ai_index_endpoint_deployed_index.default projects/{{project}}/locations/{{region}}/indexEndpoints/{{index_endpoint}}/deployedIndex/{{deployed_index_id}} +$ terraform import google_vertex_ai_index_endpoint_deployed_index.default {{project}}/{{region}}/{{index_endpoint}}/{{deployed_index_id}} +$ terraform import google_vertex_ai_index_endpoint_deployed_index.default {{region}}/{{index_endpoint}}/{{deployed_index_id}} +$ terraform import google_vertex_ai_index_endpoint_deployed_index.default {{index_endpoint}}/{{deployed_index_id}} +```