diff --git a/go.mod b/go.mod index a6484397aec..3df25cec353 100644 --- a/go.mod +++ b/go.mod @@ -63,6 +63,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.23.4 github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.4 github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.4 + github.com/aws/aws-sdk-go-v2/service/datasync v1.36.4 github.com/aws/aws-sdk-go-v2/service/datazone v1.8.0 github.com/aws/aws-sdk-go-v2/service/dax v1.19.4 github.com/aws/aws-sdk-go-v2/service/devopsguru v1.30.4 diff --git a/go.sum b/go.sum index 86b68fa17b2..4e8005b21a7 100644 --- a/go.sum +++ b/go.sum @@ -146,6 +146,8 @@ github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.4 h1:gSO6kMlH4cXxB github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.4/go.mod h1:UkyRWEyu3iT7oPmPri8xwPnKXqJQzSUDK9MOKq7xyZE= github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.4 h1:UBo3t3uliQIP3f8duZhmJ1Z62bz/j5o7LH8f/BTt1mU= github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.4/go.mod h1:NGHeOPrlK475HqycL4V02Ubc67Wm+D09Xh4pO6g2c8g= +github.com/aws/aws-sdk-go-v2/service/datasync v1.36.4 h1:B5avI4R+VxroaKOgZGLQW9yBj0qOHssVi+jJqSCOwEw= +github.com/aws/aws-sdk-go-v2/service/datasync v1.36.4/go.mod h1:AT/X92EowfcC8JIqYweBLUN9js/BcHwzAYC5XwWtaYk= github.com/aws/aws-sdk-go-v2/service/datazone v1.8.0 h1:wJ4bVNXoSTJIMfxgebI32qbxEVLqGVdGCC2f/yyvNxs= github.com/aws/aws-sdk-go-v2/service/datazone v1.8.0/go.mod h1:6UUbvwSg1ADRoK5rYXGDWjwzSyy8mElzIhVSqCXwNYE= github.com/aws/aws-sdk-go-v2/service/dax v1.19.4 h1:S3mvtYjRVVsg1R4EuV1LWZUiD72t+pfnBbK8TL7zEmo= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 13f7e5badfc..ed5a7089fb7 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -55,6 +55,7 @@ import ( costandusagereportservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costandusagereportservice" costoptimizationhub_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costoptimizationhub" customerprofiles_sdkv2 "github.com/aws/aws-sdk-go-v2/service/customerprofiles" + datasync_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datasync" datazone_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datazone" dax_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dax" devopsguru_sdkv2 "github.com/aws/aws-sdk-go-v2/service/devopsguru" @@ -173,7 +174,6 @@ import ( databasemigrationservice_sdkv1 "github.com/aws/aws-sdk-go/service/databasemigrationservice" dataexchange_sdkv1 "github.com/aws/aws-sdk-go/service/dataexchange" datapipeline_sdkv1 "github.com/aws/aws-sdk-go/service/datapipeline" - datasync_sdkv1 "github.com/aws/aws-sdk-go/service/datasync" detective_sdkv1 "github.com/aws/aws-sdk-go/service/detective" devicefarm_sdkv1 "github.com/aws/aws-sdk-go/service/devicefarm" directconnect_sdkv1 "github.com/aws/aws-sdk-go/service/directconnect" @@ -538,8 +538,8 @@ func (c *AWSClient) DataPipelineConn(ctx context.Context) *datapipeline_sdkv1.Da return errs.Must(conn[*datapipeline_sdkv1.DataPipeline](ctx, c, names.DataPipeline, make(map[string]any))) } -func (c *AWSClient) DataSyncConn(ctx context.Context) *datasync_sdkv1.DataSync { - return errs.Must(conn[*datasync_sdkv1.DataSync](ctx, c, names.DataSync, make(map[string]any))) +func (c *AWSClient) DataSyncClient(ctx context.Context) *datasync_sdkv2.Client { + return errs.Must(client[*datasync_sdkv2.Client](ctx, c, names.DataSync, make(map[string]any))) } func (c *AWSClient) DataZoneClient(ctx context.Context) *datazone_sdkv2.Client { diff --git a/internal/service/datasync/agent.go b/internal/service/datasync/agent.go index 51dbe885caf..92f758dafd4 100644 --- a/internal/service/datasync/agent.go +++ b/internal/service/datasync/agent.go @@ -11,9 +11,9 @@ import ( "net/http" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -99,7 +99,7 @@ func ResourceAgent() *schema.Resource { func resourceAgentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) activationKey := d.Get("activation_key").(string) agentIpAddress := d.Get("ip_address").(string) @@ -189,24 +189,24 @@ func resourceAgentCreate(ctx context.Context, d *schema.ResourceData, meta inter } if v, ok := d.GetOk("security_group_arns"); ok { - input.SecurityGroupArns = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroupArns = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("subnet_arns"); ok { - input.SubnetArns = flex.ExpandStringSet(v.(*schema.Set)) + input.SubnetArns = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("vpc_endpoint_id"); ok { input.VpcEndpointId = aws.String(v.(string)) } - output, err := conn.CreateAgentWithContext(ctx, input) + output, err := conn.CreateAgent(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Agent: %s", err) } - d.SetId(aws.StringValue(output.AgentArn)) + d.SetId(aws.ToString(output.AgentArn)) _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { return FindAgentByARN(ctx, conn, d.Id()) @@ -221,7 +221,7 @@ func resourceAgentCreate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAgentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := FindAgentByARN(ctx, conn, d.Id()) @@ -239,8 +239,8 @@ func resourceAgentRead(ctx context.Context, d *schema.ResourceData, meta interfa d.Set("name", output.Name) if plc := output.PrivateLinkConfig; plc != nil { d.Set("private_link_endpoint", plc.PrivateLinkEndpoint) - d.Set("security_group_arns", flex.FlattenStringList(plc.SecurityGroupArns)) - d.Set("subnet_arns", flex.FlattenStringList(plc.SubnetArns)) + d.Set("security_group_arns", flex.FlattenStringValueList(plc.SecurityGroupArns)) + d.Set("subnet_arns", flex.FlattenStringValueList(plc.SubnetArns)) d.Set("vpc_endpoint_id", plc.VpcEndpointId) } else { d.Set("private_link_endpoint", "") @@ -254,7 +254,7 @@ func resourceAgentRead(ctx context.Context, d *schema.ResourceData, meta interfa func resourceAgentUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) if d.HasChange("name") { input := &datasync.UpdateAgentInput{ @@ -262,7 +262,7 @@ func resourceAgentUpdate(ctx context.Context, d *schema.ResourceData, meta inter Name: aws.String(d.Get("name").(string)), } - _, err := conn.UpdateAgentWithContext(ctx, input) + _, err := conn.UpdateAgent(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DataSync Agent (%s): %s", d.Id(), err) @@ -274,14 +274,14 @@ func resourceAgentUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAgentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Agent: %s", d.Id()) - _, err := conn.DeleteAgentWithContext(ctx, &datasync.DeleteAgentInput{ + _, err := conn.DeleteAgent(ctx, &datasync.DeleteAgentInput{ AgentArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "does not exist") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "does not exist") { return diags } @@ -292,14 +292,14 @@ func resourceAgentDelete(ctx context.Context, d *schema.ResourceData, meta inter return diags } -func FindAgentByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeAgentOutput, error) { +func FindAgentByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeAgentOutput, error) { input := &datasync.DescribeAgentInput{ AgentArn: aws.String(arn), } - output, err := conn.DescribeAgentWithContext(ctx, input) + output, err := conn.DescribeAgent(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "does not exist") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "does not exist") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/datasync/agent_test.go b/internal/service/datasync/agent_test.go index 7f7b91e088b..b7d9e8f5685 100644 --- a/internal/service/datasync/agent_test.go +++ b/internal/service/datasync/agent_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -205,7 +205,7 @@ func TestAccDataSyncAgent_vpcEndpointID(t *testing.T) { func testAccCheckAgentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_agent" { @@ -236,7 +236,7 @@ func testAccCheckAgentExists(ctx context.Context, n string, v *datasync.Describe return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindAgentByARN(ctx, conn, rs.Primary.ID) @@ -252,7 +252,7 @@ func testAccCheckAgentExists(ctx context.Context, n string, v *datasync.Describe func testAccCheckAgentNotRecreated(i, j *datasync.DescribeAgentOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.TimeValue(i.CreationTime).Equal(aws.TimeValue(j.CreationTime)) { + if !aws.ToTime(i.CreationTime).Equal(aws.ToTime(j.CreationTime)) { return errors.New("DataSync Agent was recreated") } diff --git a/internal/service/datasync/common_fsx_protocol_functions.go b/internal/service/datasync/common_fsx_protocol_functions.go index 2bb97e5e83a..429d2e27d23 100644 --- a/internal/service/datasync/common_fsx_protocol_functions.go +++ b/internal/service/datasync/common_fsx_protocol_functions.go @@ -4,17 +4,17 @@ package datasync import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" ) -func expandProtocol(l []interface{}) *datasync.FsxProtocol { +func expandProtocol(l []interface{}) *awstypes.FsxProtocol { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - protocol := &datasync.FsxProtocol{} + protocol := &awstypes.FsxProtocol{} if v, ok := m["nfs"].([]interface{}); ok { protocol.NFS = expandNFS(v) @@ -26,7 +26,7 @@ func expandProtocol(l []interface{}) *datasync.FsxProtocol { return protocol } -func flattenProtocol(protocol *datasync.FsxProtocol) []interface{} { +func flattenProtocol(protocol *awstypes.FsxProtocol) []interface{} { if protocol == nil { return []interface{}{} } @@ -43,28 +43,28 @@ func flattenProtocol(protocol *datasync.FsxProtocol) []interface{} { return []interface{}{m} } -func expandNFS(l []interface{}) *datasync.FsxProtocolNfs { +func expandNFS(l []interface{}) *awstypes.FsxProtocolNfs { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - protocol := &datasync.FsxProtocolNfs{ + protocol := &awstypes.FsxProtocolNfs{ MountOptions: expandNFSMountOptions(m["mount_options"].([]interface{})), } return protocol } -func expandSMB(l []interface{}) *datasync.FsxProtocolSmb { +func expandSMB(l []interface{}) *awstypes.FsxProtocolSmb { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - protocol := &datasync.FsxProtocolSmb{ + protocol := &awstypes.FsxProtocolSmb{ MountOptions: expandSMBMountOptions(m["mount_options"].([]interface{})), } if v, ok := m["domain"].(string); ok && v != "" { @@ -81,7 +81,7 @@ func expandSMB(l []interface{}) *datasync.FsxProtocolSmb { } // todo: go another level down? -func flattenNFS(nfs *datasync.FsxProtocolNfs) []interface{} { +func flattenNFS(nfs *awstypes.FsxProtocolNfs) []interface{} { if nfs == nil { return []interface{}{} } @@ -93,7 +93,7 @@ func flattenNFS(nfs *datasync.FsxProtocolNfs) []interface{} { return []interface{}{m} } -func flattenSMB(smb *datasync.FsxProtocolSmb) []interface{} { +func flattenSMB(smb *awstypes.FsxProtocolSmb) []interface{} { if smb == nil { return []interface{}{} } @@ -102,13 +102,13 @@ func flattenSMB(smb *datasync.FsxProtocolSmb) []interface{} { "mount_options": flattenSMBMountOptions(smb.MountOptions), } if v := smb.Domain; v != nil { - m["domain"] = aws.StringValue(v) + m["domain"] = aws.ToString(v) } if v := smb.Password; v != nil { - m["password"] = aws.StringValue(v) + m["password"] = aws.ToString(v) } if v := smb.User; v != nil { - m["user"] = aws.StringValue(v) + m["user"] = aws.ToString(v) } return []interface{}{m} diff --git a/internal/service/datasync/generate.go b/internal/service/datasync/generate.go index da13cbae9e4..7345ac8696b 100644 --- a/internal/service/datasync/generate.go +++ b/internal/service/datasync/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -TagType=TagListEntry -UntagInTagsElem=Keys -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ServiceTagsSlice -TagType=TagListEntry -UntagInTagsElem=Keys -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/datasync/location_azure_blob.go b/internal/service/datasync/location_azure_blob.go index 385ba0998fc..ac99e26056b 100644 --- a/internal/service/datasync/location_azure_blob.go +++ b/internal/service/datasync/location_azure_blob.go @@ -9,14 +9,15 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -40,10 +41,10 @@ func resourceLocationAzureBlob() *schema.Resource { Schema: map[string]*schema.Schema{ "access_tier": { - Type: schema.TypeString, - Optional: true, - Default: datasync.AzureAccessTierHot, - ValidateFunc: validation.StringInSlice(datasync.AzureAccessTier_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.AzureAccessTierHot, + ValidateDiagFunc: enum.Validate[awstypes.AzureAccessTier](), }, "agent_arns": { Type: schema.TypeSet, @@ -58,15 +59,15 @@ func resourceLocationAzureBlob() *schema.Resource { Computed: true, }, "authentication_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(datasync.AzureBlobAuthenticationType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.AzureBlobAuthenticationType](), }, "blob_type": { - Type: schema.TypeString, - Optional: true, - Default: datasync.AzureBlobTypeBlock, - ValidateFunc: validation.StringInSlice(datasync.AzureBlobType_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.AzureBlobTypeBlock, + ValidateDiagFunc: enum.Validate[awstypes.AzureBlobType](), }, "container_url": { Type: schema.TypeString, @@ -115,21 +116,21 @@ func resourceLocationAzureBlob() *schema.Resource { func resourceLocationAzureBlobCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationAzureBlobInput{ - AgentArns: flex.ExpandStringSet(d.Get("agent_arns").(*schema.Set)), - AuthenticationType: aws.String(d.Get("authentication_type").(string)), + AgentArns: flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)), + AuthenticationType: awstypes.AzureBlobAuthenticationType(d.Get("authentication_type").(string)), ContainerUrl: aws.String(d.Get("container_url").(string)), Tags: getTagsIn(ctx), } if v, ok := d.GetOk("access_tier"); ok { - input.AccessTier = aws.String(v.(string)) + input.AccessTier = awstypes.AzureAccessTier(v.(string)) } if v, ok := d.GetOk("blob_type"); ok { - input.BlobType = aws.String(v.(string)) + input.BlobType = awstypes.AzureBlobType(v.(string)) } if v, ok := d.GetOk("sas_configuration"); ok { @@ -140,20 +141,20 @@ func resourceLocationAzureBlobCreate(ctx context.Context, d *schema.ResourceData input.Subdirectory = aws.String(v.(string)) } - output, err := conn.CreateLocationAzureBlobWithContext(ctx, input) + output, err := conn.CreateLocationAzureBlob(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location Microsoft Azure Blob Storage: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationAzureBlobRead(ctx, d, meta)...) } func resourceLocationAzureBlobRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationAzureBlobByARN(ctx, conn, d.Id()) @@ -167,8 +168,8 @@ func resourceLocationAzureBlobRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading DataSync Location Microsoft Azure Blob Storage (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) - accountHostName, err := globalIDFromLocationURI(aws.StringValue(output.LocationUri)) + uri := aws.ToString(output.LocationUri) + accountHostName, err := globalIDFromLocationURI(aws.ToString(output.LocationUri)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -180,7 +181,7 @@ func resourceLocationAzureBlobRead(ctx context.Context, d *schema.ResourceData, containerURL := fmt.Sprintf("https://%s%s", accountHostName, containerName) d.Set("access_tier", output.AccessTier) - d.Set("agent_arns", aws.StringValueSlice(output.AgentArns)) + d.Set("agent_arns", output.AgentArns) d.Set("arn", output.LocationArn) d.Set("authentication_type", output.AuthenticationType) d.Set("blob_type", output.BlobType) @@ -194,7 +195,7 @@ func resourceLocationAzureBlobRead(ctx context.Context, d *schema.ResourceData, func resourceLocationAzureBlobUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) if d.HasChangesExcept("tags", "tags_all") { input := &datasync.UpdateLocationAzureBlobInput{ @@ -202,19 +203,19 @@ func resourceLocationAzureBlobUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("access_tier") { - input.AccessTier = aws.String(d.Get("access_tier").(string)) + input.AccessTier = awstypes.AzureAccessTier(d.Get("access_tier").(string)) } if d.HasChange("agent_arns") { - input.AgentArns = flex.ExpandStringSet(d.Get("agent_arns").(*schema.Set)) + input.AgentArns = flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)) } if d.HasChange("authentication_type") { - input.AuthenticationType = aws.String(d.Get("authentication_type").(string)) + input.AuthenticationType = awstypes.AzureBlobAuthenticationType(d.Get("authentication_type").(string)) } if d.HasChange("blob_type") { - input.BlobType = aws.String(d.Get("blob_type").(string)) + input.BlobType = awstypes.AzureBlobType(d.Get("blob_type").(string)) } if d.HasChange("sas_configuration") { @@ -225,7 +226,7 @@ func resourceLocationAzureBlobUpdate(ctx context.Context, d *schema.ResourceData input.Subdirectory = aws.String(d.Get("subdirectory").(string)) } - _, err := conn.UpdateLocationAzureBlobWithContext(ctx, input) + _, err := conn.UpdateLocationAzureBlob(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DataSync Location Microsoft Azure Blob Storage (%s): %s", d.Id(), err) @@ -237,14 +238,14 @@ func resourceLocationAzureBlobUpdate(ctx context.Context, d *schema.ResourceData func resourceLocationAzureBlobDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync LocationMicrosoft Azure Blob Storage: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -255,14 +256,14 @@ func resourceLocationAzureBlobDelete(ctx context.Context, d *schema.ResourceData return diags } -func findLocationAzureBlobByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationAzureBlobOutput, error) { +func findLocationAzureBlobByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationAzureBlobOutput, error) { input := &datasync.DescribeLocationAzureBlobInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationAzureBlobWithContext(ctx, input) + output, err := conn.DescribeLocationAzureBlob(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -280,14 +281,14 @@ func findLocationAzureBlobByARN(ctx context.Context, conn *datasync.DataSync, ar return output, nil } -func expandAzureBlobSasConfiguration(l []interface{}) *datasync.AzureBlobSasConfiguration { +func expandAzureBlobSasConfiguration(l []interface{}) *awstypes.AzureBlobSasConfiguration { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - apiObject := &datasync.AzureBlobSasConfiguration{ + apiObject := &awstypes.AzureBlobSasConfiguration{ Token: aws.String(m["token"].(string)), } diff --git a/internal/service/datasync/location_azure_blob_test.go b/internal/service/datasync/location_azure_blob_test.go index 80032142df5..5c91ef5c31d 100644 --- a/internal/service/datasync/location_azure_blob_test.go +++ b/internal/service/datasync/location_azure_blob_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -182,7 +182,7 @@ func TestAccDataSyncLocationAzureBlob_update(t *testing.T) { func testAccCheckLocationAzureBlobDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_azure_blob" { @@ -213,7 +213,7 @@ func testAccCheckLocationAzureBlobExists(ctx context.Context, n string, v *datas return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationAzureBlobByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_efs.go b/internal/service/datasync/location_efs.go index c2810e7bb74..db7b21144a5 100644 --- a/internal/service/datasync/location_efs.go +++ b/internal/service/datasync/location_efs.go @@ -9,15 +9,16 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -88,10 +89,10 @@ func resourceLocationEFS() *schema.Resource { ValidateFunc: verify.ValidARN, }, "in_transit_encryption": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(datasync.EfsInTransitEncryption_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.EfsInTransitEncryption](), }, "subdirectory": { Type: schema.TypeString, @@ -123,7 +124,7 @@ func resourceLocationEFS() *schema.Resource { func resourceLocationEFSCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationEfsInput{ Ec2Config: expandEC2Config(d.Get("ec2_config").([]interface{})), @@ -141,23 +142,23 @@ func resourceLocationEFSCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("in_transit_encryption"); ok { - input.InTransitEncryption = aws.String(v.(string)) + input.InTransitEncryption = awstypes.EfsInTransitEncryption(v.(string)) } - output, err := conn.CreateLocationEfsWithContext(ctx, input) + output, err := conn.CreateLocationEfs(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location EFS: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationEFSRead(ctx, d, meta)...) } func resourceLocationEFSRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationEFSByARN(ctx, conn, d.Id()) @@ -171,7 +172,7 @@ func resourceLocationEFSRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading DataSync Location EFS (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) globalID, err := globalIDFromLocationURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -211,14 +212,14 @@ func resourceLocationEFSUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceLocationEFSDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location EFS: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -229,14 +230,14 @@ func resourceLocationEFSDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func findLocationEFSByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationEfsOutput, error) { +func findLocationEFSByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationEfsOutput, error) { input := &datasync.DescribeLocationEfsInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationEfsWithContext(ctx, input) + output, err := conn.DescribeLocationEfs(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -254,28 +255,28 @@ func findLocationEFSByARN(ctx context.Context, conn *datasync.DataSync, arn stri return output, nil } -func flattenEC2Config(ec2Config *datasync.Ec2Config) []interface{} { +func flattenEC2Config(ec2Config *awstypes.Ec2Config) []interface{} { if ec2Config == nil { return []interface{}{} } m := map[string]interface{}{ - "security_group_arns": flex.FlattenStringSet(ec2Config.SecurityGroupArns), - "subnet_arn": aws.StringValue(ec2Config.SubnetArn), + "security_group_arns": flex.FlattenStringValueSet(ec2Config.SecurityGroupArns), + "subnet_arn": aws.ToString(ec2Config.SubnetArn), } return []interface{}{m} } -func expandEC2Config(l []interface{}) *datasync.Ec2Config { +func expandEC2Config(l []interface{}) *awstypes.Ec2Config { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - ec2Config := &datasync.Ec2Config{ - SecurityGroupArns: flex.ExpandStringSet(m["security_group_arns"].(*schema.Set)), + ec2Config := &awstypes.Ec2Config{ + SecurityGroupArns: flex.ExpandStringValueSet(m["security_group_arns"].(*schema.Set)), SubnetArn: aws.String(m["subnet_arn"].(string)), } diff --git a/internal/service/datasync/location_efs_test.go b/internal/service/datasync/location_efs_test.go index 1933e0e5a17..63b958a5098 100644 --- a/internal/service/datasync/location_efs_test.go +++ b/internal/service/datasync/location_efs_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -187,7 +187,7 @@ func TestAccDataSyncLocationEFS_tags(t *testing.T) { func testAccCheckLocationEFSDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_efs" { @@ -218,7 +218,7 @@ func testAccCheckLocationEFSExists(ctx context.Context, n string, v *datasync.De return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationEFSByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_fsx_lustre_file_system.go b/internal/service/datasync/location_fsx_lustre_file_system.go index f2e99941369..36ccc962d1e 100644 --- a/internal/service/datasync/location_fsx_lustre_file_system.go +++ b/internal/service/datasync/location_fsx_lustre_file_system.go @@ -10,14 +10,15 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -99,12 +100,12 @@ func resourceLocationFSxLustreFileSystem() *schema.Resource { func resourceLocationFSxLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) fsxArn := d.Get("fsx_filesystem_arn").(string) input := &datasync.CreateLocationFsxLustreInput{ FsxFilesystemArn: aws.String(fsxArn), - SecurityGroupArns: flex.ExpandStringSet(d.Get("security_group_arns").(*schema.Set)), + SecurityGroupArns: flex.ExpandStringValueSet(d.Get("security_group_arns").(*schema.Set)), Tags: getTagsIn(ctx), } @@ -112,20 +113,20 @@ func resourceLocationFSxLustreFileSystemCreate(ctx context.Context, d *schema.Re input.Subdirectory = aws.String(v.(string)) } - output, err := conn.CreateLocationFsxLustreWithContext(ctx, input) + output, err := conn.CreateLocationFsxLustre(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location FSx for Lustre File System: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationFSxLustreFileSystemRead(ctx, d, meta)...) } func resourceLocationFSxLustreFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationFSxLustreByARN(ctx, conn, d.Id()) @@ -139,7 +140,7 @@ func resourceLocationFSxLustreFileSystemRead(ctx context.Context, d *schema.Reso return sdkdiag.AppendErrorf(diags, "reading DataSync Location FSx for Lustre File System (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) subdirectory, err := subdirectoryFromLocationURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -148,7 +149,7 @@ func resourceLocationFSxLustreFileSystemRead(ctx context.Context, d *schema.Reso d.Set("arn", output.LocationArn) d.Set("creation_time", output.CreationTime.Format(time.RFC3339)) d.Set("fsx_filesystem_arn", d.Get("fsx_filesystem_arn")) - d.Set("security_group_arns", aws.StringValueSlice(output.SecurityGroupArns)) + d.Set("security_group_arns", output.SecurityGroupArns) d.Set("subdirectory", subdirectory) d.Set("uri", output.LocationUri) @@ -165,14 +166,14 @@ func resourceLocationFSxLustreFileSystemUpdate(ctx context.Context, d *schema.Re func resourceLocationFSxLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location FSx for Lustre File System: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -183,14 +184,14 @@ func resourceLocationFSxLustreFileSystemDelete(ctx context.Context, d *schema.Re return diags } -func findLocationFSxLustreByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationFsxLustreOutput, error) { +func findLocationFSxLustreByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationFsxLustreOutput, error) { input := &datasync.DescribeLocationFsxLustreInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationFsxLustreWithContext(ctx, input) + output, err := conn.DescribeLocationFsxLustre(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/datasync/location_fsx_lustre_file_system_test.go b/internal/service/datasync/location_fsx_lustre_file_system_test.go index c26cf2300a8..0f0da44375a 100644 --- a/internal/service/datasync/location_fsx_lustre_file_system_test.go +++ b/internal/service/datasync/location_fsx_lustre_file_system_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/aws/aws-sdk-go/service/fsx" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -174,7 +174,7 @@ func TestAccDataSyncLocationFSxLustreFileSystem_tags(t *testing.T) { func testAccCheckLocationFSxLustreDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_fsx_lustre_file_system" { @@ -205,7 +205,7 @@ func testAccCheckLocationFSxLustreExists(ctx context.Context, n string, v *datas return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationFSxLustreByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_fsx_ontap_file_system.go b/internal/service/datasync/location_fsx_ontap_file_system.go index a07f9f01f46..784b36fcb5d 100644 --- a/internal/service/datasync/location_fsx_ontap_file_system.go +++ b/internal/service/datasync/location_fsx_ontap_file_system.go @@ -10,14 +10,16 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -88,13 +90,11 @@ func resourceLocationFSxONTAPFileSystem() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "version": { - Type: schema.TypeString, - Default: datasync.NfsVersionNfs3, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - datasync.NfsVersionNfs3, - }, false), + Type: schema.TypeString, + Default: awstypes.NfsVersionNfs3, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(enum.Slice(awstypes.NfsVersionNfs3), false), }, }, }, @@ -125,15 +125,15 @@ func resourceLocationFSxONTAPFileSystem() *schema.Resource { Schema: map[string]*schema.Schema{ "version": { Type: schema.TypeString, - Default: datasync.SmbVersionAutomatic, + Default: awstypes.SmbVersionAutomatic, Optional: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - datasync.SmbVersionAutomatic, - datasync.SmbVersionSmb2, - datasync.SmbVersionSmb3, - datasync.SmbVersionSmb20, - }, false), + ValidateFunc: validation.StringInSlice(enum.Slice( + awstypes.SmbVersionAutomatic, + awstypes.SmbVersionSmb2, + awstypes.SmbVersionSmb3, + awstypes.SmbVersionSmb20, + ), false), }, }, }, @@ -195,11 +195,11 @@ func resourceLocationFSxONTAPFileSystem() *schema.Resource { func resourceLocationFSxONTAPFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationFsxOntapInput{ Protocol: expandProtocol(d.Get("protocol").([]interface{})), - SecurityGroupArns: flex.ExpandStringSet(d.Get("security_group_arns").(*schema.Set)), + SecurityGroupArns: flex.ExpandStringValueSet(d.Get("security_group_arns").(*schema.Set)), StorageVirtualMachineArn: aws.String(d.Get("storage_virtual_machine_arn").(string)), Tags: getTagsIn(ctx), } @@ -208,20 +208,20 @@ func resourceLocationFSxONTAPFileSystemCreate(ctx context.Context, d *schema.Res input.Subdirectory = aws.String(v.(string)) } - output, err := conn.CreateLocationFsxOntapWithContext(ctx, input) + output, err := conn.CreateLocationFsxOntap(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location FSx for NetApp ONTAP File System: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationFSxONTAPFileSystemRead(ctx, d, meta)...) } func resourceLocationFSxONTAPFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationFSxONTAPByARN(ctx, conn, d.Id()) @@ -235,7 +235,7 @@ func resourceLocationFSxONTAPFileSystemRead(ctx context.Context, d *schema.Resou return sdkdiag.AppendErrorf(diags, "reading DataSync Location FSx for NetApp ONTAP File System (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) subdirectory, err := subdirectoryFromLocationURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -245,7 +245,7 @@ func resourceLocationFSxONTAPFileSystemRead(ctx context.Context, d *schema.Resou d.Set("creation_time", output.CreationTime.Format(time.RFC3339)) d.Set("fsx_filesystem_arn", output.FsxFilesystemArn) // SMB Password is not returned from the API. - if output.Protocol != nil && output.Protocol.SMB != nil && aws.StringValue(output.Protocol.SMB.Password) == "" { + if output.Protocol != nil && output.Protocol.SMB != nil && aws.ToString(output.Protocol.SMB.Password) == "" { if smbPassword := d.Get("protocol.0.smb.0.password").(string); smbPassword != "" { output.Protocol.SMB.Password = aws.String(smbPassword) } @@ -253,7 +253,7 @@ func resourceLocationFSxONTAPFileSystemRead(ctx context.Context, d *schema.Resou if err := d.Set("protocol", flattenProtocol(output.Protocol)); err != nil { return sdkdiag.AppendErrorf(diags, "setting protocol: %s", err) } - d.Set("security_group_arns", aws.StringValueSlice(output.SecurityGroupArns)) + d.Set("security_group_arns", output.SecurityGroupArns) d.Set("storage_virtual_machine_arn", output.StorageVirtualMachineArn) d.Set("subdirectory", subdirectory) d.Set("uri", uri) @@ -271,16 +271,16 @@ func resourceLocationFSxONTAPFileSystemUpdate(ctx context.Context, d *schema.Res func resourceLocationFSxONTAPFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), } log.Printf("[DEBUG] Deleting DataSync Location FSx for NetApp ONTAP File System: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, input) + _, err := conn.DeleteLocation(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -291,14 +291,14 @@ func resourceLocationFSxONTAPFileSystemDelete(ctx context.Context, d *schema.Res return diags } -func findLocationFSxONTAPByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationFsxOntapOutput, error) { +func findLocationFSxONTAPByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationFsxOntapOutput, error) { input := &datasync.DescribeLocationFsxOntapInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationFsxOntapWithContext(ctx, input) + output, err := conn.DescribeLocationFsxOntap(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/datasync/location_fsx_ontap_file_system_test.go b/internal/service/datasync/location_fsx_ontap_file_system_test.go index 0b4b1ded658..d6c67436fbd 100644 --- a/internal/service/datasync/location_fsx_ontap_file_system_test.go +++ b/internal/service/datasync/location_fsx_ontap_file_system_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/aws/aws-sdk-go/service/fsx" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -225,7 +225,7 @@ func TestAccDataSyncLocationFSxONTAPFileSystem_tags(t *testing.T) { func testAccCheckLocationFSxONTAPDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_fsx_ontap_file_system" { @@ -256,7 +256,7 @@ func testAccCheckLocationFSxONTAPExists(ctx context.Context, n string, v *datasy return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationFSxONTAPByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_fsx_openzfs_file_system.go b/internal/service/datasync/location_fsx_openzfs_file_system.go index dc0bdc21d83..ceee438bf72 100644 --- a/internal/service/datasync/location_fsx_openzfs_file_system.go +++ b/internal/service/datasync/location_fsx_openzfs_file_system.go @@ -10,14 +10,16 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -89,11 +91,11 @@ func resourceLocationFSxOpenZFSFileSystem() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "version": { - Type: schema.TypeString, - Default: datasync.NfsVersionAutomatic, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(datasync.NfsVersion_Values(), false), + Type: schema.TypeString, + Default: awstypes.NfsVersionAutomatic, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.NfsVersion](), }, }, }, @@ -136,12 +138,12 @@ func resourceLocationFSxOpenZFSFileSystem() *schema.Resource { func resourceLocationFSxOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationFsxOpenZfsInput{ FsxFilesystemArn: aws.String(d.Get("fsx_filesystem_arn").(string)), Protocol: expandProtocol(d.Get("protocol").([]interface{})), - SecurityGroupArns: flex.ExpandStringSet(d.Get("security_group_arns").(*schema.Set)), + SecurityGroupArns: flex.ExpandStringValueSet(d.Get("security_group_arns").(*schema.Set)), Tags: getTagsIn(ctx), } @@ -149,20 +151,20 @@ func resourceLocationFSxOpenZFSFileSystemCreate(ctx context.Context, d *schema.R input.Subdirectory = aws.String(v.(string)) } - output, err := conn.CreateLocationFsxOpenZfsWithContext(ctx, input) + output, err := conn.CreateLocationFsxOpenZfs(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location FSx for OpenZFS File System: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationFSxOpenZFSFileSystemRead(ctx, d, meta)...) } func resourceLocationFSxOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationFSxOpenZFSByARN(ctx, conn, d.Id()) @@ -176,7 +178,7 @@ func resourceLocationFSxOpenZFSFileSystemRead(ctx context.Context, d *schema.Res return sdkdiag.AppendErrorf(diags, "reading DataSync Location FSx for OpenZFS File System (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) subdirectory, err := subdirectoryFromLocationURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -188,7 +190,7 @@ func resourceLocationFSxOpenZFSFileSystemRead(ctx context.Context, d *schema.Res if err := d.Set("protocol", flattenProtocol(output.Protocol)); err != nil { return sdkdiag.AppendErrorf(diags, "setting protocol: %s", err) } - d.Set("security_group_arns", aws.StringValueSlice(output.SecurityGroupArns)) + d.Set("security_group_arns", output.SecurityGroupArns) d.Set("subdirectory", subdirectory) d.Set("uri", uri) @@ -205,14 +207,14 @@ func resourceLocationFSxOpenZFSFileSystemUpdate(ctx context.Context, d *schema.R func resourceLocationFSxOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location FSx for OpenZFS File System: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -223,14 +225,14 @@ func resourceLocationFSxOpenZFSFileSystemDelete(ctx context.Context, d *schema.R return diags } -func findLocationFSxOpenZFSByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationFsxOpenZfsOutput, error) { +func findLocationFSxOpenZFSByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationFsxOpenZfsOutput, error) { input := &datasync.DescribeLocationFsxOpenZfsInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationFsxOpenZfsWithContext(ctx, input) + output, err := conn.DescribeLocationFsxOpenZfs(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/datasync/location_fsx_openzfs_file_system_test.go b/internal/service/datasync/location_fsx_openzfs_file_system_test.go index 5828e906085..b60ecc6ec18 100644 --- a/internal/service/datasync/location_fsx_openzfs_file_system_test.go +++ b/internal/service/datasync/location_fsx_openzfs_file_system_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/aws/aws-sdk-go/service/fsx" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -174,7 +174,7 @@ func TestAccDataSyncLocationFSxOpenZFSFileSystem_tags(t *testing.T) { func testAccCheckLocationFSxOpenZFSDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_fsx_openzfs_file_system" { @@ -205,7 +205,7 @@ func testAccCheckLocationFSxOpenZFSExists(ctx context.Context, n string, v *data return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationFSxOpenZFSByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_fsx_windows_file_system.go b/internal/service/datasync/location_fsx_windows_file_system.go index bb1599b6e00..8aa0e35ce38 100644 --- a/internal/service/datasync/location_fsx_windows_file_system.go +++ b/internal/service/datasync/location_fsx_windows_file_system.go @@ -10,14 +10,15 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -118,12 +119,12 @@ func resourceLocationFSxWindowsFileSystem() *schema.Resource { func resourceLocationFSxWindowsFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationFsxWindowsInput{ FsxFilesystemArn: aws.String(d.Get("fsx_filesystem_arn").(string)), Password: aws.String(d.Get("password").(string)), - SecurityGroupArns: flex.ExpandStringSet(d.Get("security_group_arns").(*schema.Set)), + SecurityGroupArns: flex.ExpandStringValueSet(d.Get("security_group_arns").(*schema.Set)), Tags: getTagsIn(ctx), User: aws.String(d.Get("user").(string)), } @@ -136,20 +137,20 @@ func resourceLocationFSxWindowsFileSystemCreate(ctx context.Context, d *schema.R input.Subdirectory = aws.String(v.(string)) } - output, err := conn.CreateLocationFsxWindowsWithContext(ctx, input) + output, err := conn.CreateLocationFsxWindows(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location FSx for Windows File Server File System: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationFSxWindowsFileSystemRead(ctx, d, meta)...) } func resourceLocationFSxWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationFSxWindowsByARN(ctx, conn, d.Id()) @@ -163,7 +164,7 @@ func resourceLocationFSxWindowsFileSystemRead(ctx context.Context, d *schema.Res return sdkdiag.AppendErrorf(diags, "reading DataSync Location FSx for Windows File Server File System (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) subdirectory, err := subdirectoryFromLocationURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -173,7 +174,7 @@ func resourceLocationFSxWindowsFileSystemRead(ctx context.Context, d *schema.Res d.Set("creation_time", output.CreationTime.Format(time.RFC3339)) d.Set("domain", output.Domain) d.Set("fsx_filesystem_arn", d.Get("fsx_filesystem_arn")) - d.Set("security_group_arns", aws.StringValueSlice(output.SecurityGroupArns)) + d.Set("security_group_arns", output.SecurityGroupArns) d.Set("subdirectory", subdirectory) d.Set("uri", uri) d.Set("user", output.User) @@ -191,14 +192,14 @@ func resourceLocationFSxWindowsFileSystemUpdate(ctx context.Context, d *schema.R func resourceLocationFSxWindowsFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location FSx for Windows File Server File System: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -209,14 +210,14 @@ func resourceLocationFSxWindowsFileSystemDelete(ctx context.Context, d *schema.R return diags } -func findLocationFSxWindowsByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationFsxWindowsOutput, error) { +func findLocationFSxWindowsByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationFsxWindowsOutput, error) { input := &datasync.DescribeLocationFsxWindowsInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationFsxWindowsWithContext(ctx, input) + output, err := conn.DescribeLocationFsxWindows(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/datasync/location_fsx_windows_file_system_test.go b/internal/service/datasync/location_fsx_windows_file_system_test.go index 11bcd6948ac..9b92f6db6f8 100644 --- a/internal/service/datasync/location_fsx_windows_file_system_test.go +++ b/internal/service/datasync/location_fsx_windows_file_system_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/aws/aws-sdk-go/service/fsx" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -181,7 +181,7 @@ func TestAccDataSyncLocationFSxWindowsFileSystem_tags(t *testing.T) { func testAccCheckLocationFSxWindowsDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_fsx_windows_file_system" { @@ -212,7 +212,7 @@ func testAccCheckLocationFSxWindowsExists(ctx context.Context, n string, v *data return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationFSxWindowsByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_hdfs.go b/internal/service/datasync/location_hdfs.go index 8fcd72d8b7e..83aaca4f406 100644 --- a/internal/service/datasync/location_hdfs.go +++ b/internal/service/datasync/location_hdfs.go @@ -8,14 +8,16 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -52,9 +54,9 @@ func resourceLocationHDFS() *schema.Resource { Computed: true, }, "authentication_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.HdfsAuthenticationType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.HdfsAuthenticationType](), }, "block_size": { Type: schema.TypeInt, @@ -124,16 +126,16 @@ func resourceLocationHDFS() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "data_transfer_protection": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(datasync.HdfsDataTransferProtection_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.HdfsDataTransferProtection](), }, "rpc_protection": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(datasync.HdfsRpcProtection_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.HdfsRpcProtection](), }, }, }, @@ -178,18 +180,18 @@ func resourceLocationHDFS() *schema.Resource { func resourceLocationHDFSCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationHdfsInput{ - AgentArns: flex.ExpandStringSet(d.Get("agent_arns").(*schema.Set)), - AuthenticationType: aws.String(d.Get("authentication_type").(string)), + AgentArns: flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)), + AuthenticationType: awstypes.HdfsAuthenticationType(d.Get("authentication_type").(string)), NameNodes: expandHDFSNameNodes(d.Get("name_node").(*schema.Set)), Subdirectory: aws.String(d.Get("subdirectory").(string)), Tags: getTagsIn(ctx), } if v, ok := d.GetOk("block_size"); ok { - input.BlockSize = aws.Int64(int64(v.(int))) + input.BlockSize = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("kerberos_keytab"); ok { @@ -227,27 +229,27 @@ func resourceLocationHDFSCreate(ctx context.Context, d *schema.ResourceData, met } if v, ok := d.GetOk("replication_factor"); ok { - input.ReplicationFactor = aws.Int64(int64(v.(int))) + input.ReplicationFactor = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("simple_user"); ok { input.SimpleUser = aws.String(v.(string)) } - output, err := conn.CreateLocationHdfsWithContext(ctx, input) + output, err := conn.CreateLocationHdfs(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location HDFS: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationHDFSRead(ctx, d, meta)...) } func resourceLocationHDFSRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationHDFSByARN(ctx, conn, d.Id()) @@ -261,13 +263,13 @@ func resourceLocationHDFSRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading DataSync Location HDFS (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) subdirectory, err := subdirectoryFromLocationURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - d.Set("agent_arns", aws.StringValueSlice(output.AgentArns)) + d.Set("agent_arns", output.AgentArns) d.Set("arn", output.LocationArn) d.Set("authentication_type", output.AuthenticationType) d.Set("block_size", output.BlockSize) @@ -289,7 +291,7 @@ func resourceLocationHDFSRead(ctx context.Context, d *schema.ResourceData, meta func resourceLocationHDFSUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) if d.HasChangesExcept("tags", "tags_all") { input := &datasync.UpdateLocationHdfsInput{ @@ -297,15 +299,15 @@ func resourceLocationHDFSUpdate(ctx context.Context, d *schema.ResourceData, met } if d.HasChange("agent_arns") { - input.AgentArns = flex.ExpandStringSet(d.Get("agent_arns").(*schema.Set)) + input.AgentArns = flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)) } if d.HasChange("authentication_type") { - input.AuthenticationType = aws.String(d.Get("authentication_type").(string)) + input.AuthenticationType = awstypes.HdfsAuthenticationType(d.Get("authentication_type").(string)) } if d.HasChange("block_size") { - input.BlockSize = aws.Int64(int64(d.Get("block_size").(int))) + input.BlockSize = aws.Int32(int32(d.Get("block_size").(int))) } if d.HasChanges("kerberos_keytab", "kerberos_keytab_base64") { @@ -351,7 +353,7 @@ func resourceLocationHDFSUpdate(ctx context.Context, d *schema.ResourceData, met } if d.HasChange("replication_factor") { - input.ReplicationFactor = aws.Int64(int64(d.Get("replication_factor").(int))) + input.ReplicationFactor = aws.Int32(int32(d.Get("replication_factor").(int))) } if d.HasChange("simple_user") { @@ -362,7 +364,7 @@ func resourceLocationHDFSUpdate(ctx context.Context, d *schema.ResourceData, met input.Subdirectory = aws.String(d.Get("subdirectory").(string)) } - _, err := conn.UpdateLocationHdfsWithContext(ctx, input) + _, err := conn.UpdateLocationHdfs(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DataSync Location HDFS (%s): %s", d.Id(), err) @@ -374,14 +376,14 @@ func resourceLocationHDFSUpdate(ctx context.Context, d *schema.ResourceData, met func resourceLocationHDFSDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location HDFS: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -392,14 +394,14 @@ func resourceLocationHDFSDelete(ctx context.Context, d *schema.ResourceData, met return diags } -func findLocationHDFSByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationHdfsOutput, error) { +func findLocationHDFSByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationHdfsOutput, error) { input := &datasync.DescribeLocationHdfsInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationHdfsWithContext(ctx, input) + output, err := conn.DescribeLocationHdfs(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -417,13 +419,13 @@ func findLocationHDFSByARN(ctx context.Context, conn *datasync.DataSync, arn str return output, nil } -func expandHDFSNameNodes(l *schema.Set) []*datasync.HdfsNameNode { - nameNodes := make([]*datasync.HdfsNameNode, 0) +func expandHDFSNameNodes(l *schema.Set) []awstypes.HdfsNameNode { + nameNodes := make([]awstypes.HdfsNameNode, 0) for _, m := range l.List() { raw := m.(map[string]interface{}) - nameNode := &datasync.HdfsNameNode{ + nameNode := awstypes.HdfsNameNode{ Hostname: aws.String(raw["hostname"].(string)), - Port: aws.Int64(int64(raw["port"].(int))), + Port: aws.Int32(int32(raw["port"].(int))), } nameNodes = append(nameNodes, nameNode) } @@ -431,13 +433,13 @@ func expandHDFSNameNodes(l *schema.Set) []*datasync.HdfsNameNode { return nameNodes } -func flattenHDFSNameNodes(nodes []*datasync.HdfsNameNode) []map[string]interface{} { +func flattenHDFSNameNodes(nodes []awstypes.HdfsNameNode) []map[string]interface{} { dataResources := make([]map[string]interface{}, 0, len(nodes)) for _, raw := range nodes { item := make(map[string]interface{}) - item["hostname"] = aws.StringValue(raw.Hostname) - item["port"] = aws.Int64Value(raw.Port) + item["hostname"] = aws.ToString(raw.Hostname) + item["port"] = aws.ToInt32(raw.Port) dataResources = append(dataResources, item) } @@ -445,29 +447,29 @@ func flattenHDFSNameNodes(nodes []*datasync.HdfsNameNode) []map[string]interface return dataResources } -func expandHDFSQOPConfiguration(l []interface{}) *datasync.QopConfiguration { +func expandHDFSQOPConfiguration(l []interface{}) *awstypes.QopConfiguration { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - qopConfig := &datasync.QopConfiguration{ - DataTransferProtection: aws.String(m["data_transfer_protection"].(string)), - RpcProtection: aws.String(m["rpc_protection"].(string)), + qopConfig := &awstypes.QopConfiguration{ + DataTransferProtection: awstypes.HdfsDataTransferProtection(m["data_transfer_protection"].(string)), + RpcProtection: awstypes.HdfsRpcProtection(m["rpc_protection"].(string)), } return qopConfig } -func flattenHDFSQOPConfiguration(qopConfig *datasync.QopConfiguration) []interface{} { +func flattenHDFSQOPConfiguration(qopConfig *awstypes.QopConfiguration) []interface{} { if qopConfig == nil { return []interface{}{} } m := map[string]interface{}{ - "data_transfer_protection": aws.StringValue(qopConfig.DataTransferProtection), - "rpc_protection": aws.StringValue(qopConfig.RpcProtection), + "data_transfer_protection": string(qopConfig.DataTransferProtection), + "rpc_protection": string(qopConfig.RpcProtection), } return []interface{}{m} diff --git a/internal/service/datasync/location_hdfs_test.go b/internal/service/datasync/location_hdfs_test.go index aa78232ce6e..64b2bce4cdf 100644 --- a/internal/service/datasync/location_hdfs_test.go +++ b/internal/service/datasync/location_hdfs_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -190,7 +190,7 @@ func TestAccDataSyncLocationHDFS_kerberos(t *testing.T) { func testAccCheckLocationHDFSDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_hdfs" { @@ -221,7 +221,7 @@ func testAccCheckLocationHDFSExists(ctx context.Context, n string, v *datasync.D return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationHDFSByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_nfs.go b/internal/service/datasync/location_nfs.go index 4f55987eb62..ff4fc156740 100644 --- a/internal/service/datasync/location_nfs.go +++ b/internal/service/datasync/location_nfs.go @@ -8,14 +8,16 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -50,11 +52,11 @@ func resourceLocationNFS() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "version": { - Type: schema.TypeString, - Default: datasync.NfsVersionAutomatic, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(datasync.NfsVersion_Values(), false), + Type: schema.TypeString, + Default: awstypes.NfsVersionAutomatic, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.NfsVersion](), }, }, }, @@ -112,7 +114,7 @@ func resourceLocationNFS() *schema.Resource { func resourceLocationNFSCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationNfsInput{ OnPremConfig: expandOnPremConfig(d.Get("on_prem_config").([]interface{})), @@ -125,20 +127,20 @@ func resourceLocationNFSCreate(ctx context.Context, d *schema.ResourceData, meta input.MountOptions = expandNFSMountOptions(v.([]interface{})) } - output, err := conn.CreateLocationNfsWithContext(ctx, input) + output, err := conn.CreateLocationNfs(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location NFS: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationNFSRead(ctx, d, meta)...) } func resourceLocationNFSRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationNFSByARN(ctx, conn, d.Id()) @@ -152,7 +154,7 @@ func resourceLocationNFSRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading DataSync Location NFS (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) serverHostName, err := globalIDFromLocationURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -178,7 +180,7 @@ func resourceLocationNFSRead(ctx context.Context, d *schema.ResourceData, meta i func resourceLocationNFSUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) if d.HasChangesExcept("tags", "tags_all") { input := &datasync.UpdateLocationNfsInput{ @@ -191,7 +193,7 @@ func resourceLocationNFSUpdate(ctx context.Context, d *schema.ResourceData, meta input.MountOptions = expandNFSMountOptions(v.([]interface{})) } - _, err := conn.UpdateLocationNfsWithContext(ctx, input) + _, err := conn.UpdateLocationNfs(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DataSync Location NFS (%s): %s", d.Id(), err) @@ -203,14 +205,14 @@ func resourceLocationNFSUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceLocationNFSDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location NFS: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -221,14 +223,14 @@ func resourceLocationNFSDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func findLocationNFSByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationNfsOutput, error) { +func findLocationNFSByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationNfsOutput, error) { input := &datasync.DescribeLocationNfsInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationNfsWithContext(ctx, input) + output, err := conn.DescribeLocationNfs(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -246,53 +248,53 @@ func findLocationNFSByARN(ctx context.Context, conn *datasync.DataSync, arn stri return output, nil } -func expandNFSMountOptions(l []interface{}) *datasync.NfsMountOptions { +func expandNFSMountOptions(l []interface{}) *awstypes.NfsMountOptions { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - nfsMountOptions := &datasync.NfsMountOptions{ - Version: aws.String(m["version"].(string)), + nfsMountOptions := &awstypes.NfsMountOptions{ + Version: awstypes.NfsVersion(m["version"].(string)), } return nfsMountOptions } -func flattenNFSMountOptions(mountOptions *datasync.NfsMountOptions) []interface{} { +func flattenNFSMountOptions(mountOptions *awstypes.NfsMountOptions) []interface{} { if mountOptions == nil { return []interface{}{} } m := map[string]interface{}{ - "version": aws.StringValue(mountOptions.Version), + "version": string(mountOptions.Version), } return []interface{}{m} } -func flattenOnPremConfig(onPremConfig *datasync.OnPremConfig) []interface{} { +func flattenOnPremConfig(onPremConfig *awstypes.OnPremConfig) []interface{} { if onPremConfig == nil { return []interface{}{} } m := map[string]interface{}{ - "agent_arns": flex.FlattenStringSet(onPremConfig.AgentArns), + "agent_arns": onPremConfig.AgentArns, } return []interface{}{m} } -func expandOnPremConfig(l []interface{}) *datasync.OnPremConfig { +func expandOnPremConfig(l []interface{}) *awstypes.OnPremConfig { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - onPremConfig := &datasync.OnPremConfig{ - AgentArns: flex.ExpandStringSet(m["agent_arns"].(*schema.Set)), + onPremConfig := &awstypes.OnPremConfig{ + AgentArns: flex.ExpandStringValueSet(m["agent_arns"].(*schema.Set)), } return onPremConfig diff --git a/internal/service/datasync/location_nfs_test.go b/internal/service/datasync/location_nfs_test.go index 15eb279b4b2..b8f0ce673f0 100644 --- a/internal/service/datasync/location_nfs_test.go +++ b/internal/service/datasync/location_nfs_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -227,7 +227,7 @@ func TestAccDataSyncLocationNFS_tags(t *testing.T) { func testAccCheckLocationNFSDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_nfs" { @@ -258,7 +258,7 @@ func testAccCheckLocationNFSExists(ctx context.Context, n string, v *datasync.De return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationNFSByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_object_storage.go b/internal/service/datasync/location_object_storage.go index 83dc71d2b4b..fee0677da37 100644 --- a/internal/service/datasync/location_object_storage.go +++ b/internal/service/datasync/location_object_storage.go @@ -9,14 +9,16 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -85,10 +87,10 @@ func resourceLocationObjectStorage() *schema.Resource { ValidateFunc: validation.IsPortNumber, }, "server_protocol": { - Type: schema.TypeString, - Optional: true, - Default: datasync.ObjectStorageServerProtocolHttps, - ValidateFunc: validation.StringInSlice(datasync.ObjectStorageServerProtocol_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.ObjectStorageServerProtocolHttps, + ValidateDiagFunc: enum.Validate[awstypes.ObjectStorageServerProtocol](), }, "subdirectory": { Type: schema.TypeString, @@ -110,10 +112,10 @@ func resourceLocationObjectStorage() *schema.Resource { func resourceLocationObjectStorageCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationObjectStorageInput{ - AgentArns: flex.ExpandStringSet(d.Get("agent_arns").(*schema.Set)), + AgentArns: flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)), BucketName: aws.String(d.Get("bucket_name").(string)), ServerHostname: aws.String(d.Get("server_hostname").(string)), Subdirectory: aws.String(d.Get("subdirectory").(string)), @@ -133,27 +135,27 @@ func resourceLocationObjectStorageCreate(ctx context.Context, d *schema.Resource } if v, ok := d.GetOk("server_port"); ok { - input.ServerPort = aws.Int64(int64(v.(int))) + input.ServerPort = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("server_protocol"); ok { - input.ServerProtocol = aws.String(v.(string)) + input.ServerProtocol = awstypes.ObjectStorageServerProtocol(v.(string)) } - output, err := conn.CreateLocationObjectStorageWithContext(ctx, input) + output, err := conn.CreateLocationObjectStorage(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location Object Storage: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationObjectStorageRead(ctx, d, meta)...) } func resourceLocationObjectStorageRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationObjectStorageByARN(ctx, conn, d.Id()) @@ -167,14 +169,14 @@ func resourceLocationObjectStorageRead(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendErrorf(diags, "reading DataSync Location Object Storage (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) hostname, bucketName, subdirectory, err := decodeObjectStorageURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) } d.Set("access_key", output.AccessKey) - d.Set("agent_arns", aws.StringValueSlice(output.AgentArns)) + d.Set("agent_arns", output.AgentArns) d.Set("arn", output.LocationArn) d.Set("bucket_name", bucketName) d.Set("server_certificate", string(output.ServerCertificate)) @@ -189,7 +191,7 @@ func resourceLocationObjectStorageRead(ctx context.Context, d *schema.ResourceDa func resourceLocationObjectStorageUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) if d.HasChangesExcept("tags", "tags_all") { input := &datasync.UpdateLocationObjectStorageInput{ @@ -200,6 +202,22 @@ func resourceLocationObjectStorageUpdate(ctx context.Context, d *schema.Resource input.AccessKey = aws.String(d.Get("access_key").(string)) } + if d.HasChange("agent_arns") { + input.AgentArns = flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)) + + // Access key must be specified when updating agent ARNs + input.AccessKey = aws.String("") + if v, ok := d.GetOk("access_key"); ok { + input.AccessKey = aws.String(v.(string)) + } + + // Secret key must be specified when updating agent ARNs + input.SecretKey = aws.String("") + if v, ok := d.GetOk("secret_key"); ok { + input.SecretKey = aws.String(v.(string)) + } + } + if d.HasChange("secret_key") { input.SecretKey = aws.String(d.Get("secret_key").(string)) } @@ -209,18 +227,18 @@ func resourceLocationObjectStorageUpdate(ctx context.Context, d *schema.Resource } if d.HasChange("server_port") { - input.ServerPort = aws.Int64(int64(d.Get("server_port").(int))) + input.ServerPort = aws.Int32(int32(d.Get("server_port").(int))) } if d.HasChange("server_protocol") { - input.ServerProtocol = aws.String(d.Get("server_protocol").(string)) + input.ServerProtocol = awstypes.ObjectStorageServerProtocol(d.Get("server_protocol").(string)) } if d.HasChange("subdirectory") { input.Subdirectory = aws.String(d.Get("subdirectory").(string)) } - _, err := conn.UpdateLocationObjectStorageWithContext(ctx, input) + _, err := conn.UpdateLocationObjectStorage(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DataSync Location Object Storage (%s): %s", d.Id(), err) @@ -232,14 +250,14 @@ func resourceLocationObjectStorageUpdate(ctx context.Context, d *schema.Resource func resourceLocationObjectStorageDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location Object Storage: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -250,14 +268,14 @@ func resourceLocationObjectStorageDelete(ctx context.Context, d *schema.Resource return diags } -func findLocationObjectStorageByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationObjectStorageOutput, error) { +func findLocationObjectStorageByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationObjectStorageOutput, error) { input := &datasync.DescribeLocationObjectStorageInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationObjectStorageWithContext(ctx, input) + output, err := conn.DescribeLocationObjectStorage(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/datasync/location_object_storage_test.go b/internal/service/datasync/location_object_storage_test.go index 6f09dfa9d8f..28131e4486c 100644 --- a/internal/service/datasync/location_object_storage_test.go +++ b/internal/service/datasync/location_object_storage_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -60,6 +60,77 @@ func TestAccDataSyncLocationObjectStorage_basic(t *testing.T) { }) } +func TestAccDataSyncLocationObjectStorage_update(t *testing.T) { + ctx := acctest.Context(t) + var v datasync.DescribeLocationObjectStorageOutput + resourceName := "aws_datasync_location_object_storage.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLocationObjectStorageConfig_basic(rName, domain), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "access_key", ""), + resource.TestCheckResourceAttr(resourceName, "agent_arns.#", "1"), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "datasync", regexache.MustCompile(`location/loc-.+`)), + resource.TestCheckResourceAttr(resourceName, "bucket_name", rName), + resource.TestCheckNoResourceAttr(resourceName, "secret_key"), + resource.TestCheckResourceAttr(resourceName, "server_certificate", ""), + resource.TestCheckResourceAttr(resourceName, "server_hostname", domain), + resource.TestCheckResourceAttr(resourceName, "server_port", "8080"), + resource.TestCheckResourceAttr(resourceName, "server_protocol", "HTTP"), + resource.TestCheckResourceAttr(resourceName, "subdirectory", "/"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "uri", fmt.Sprintf("object-storage://%s/%s/", domain, rName)), + ), + }, + { + Config: testAccLocationObjectStorageConfig_updateAddAgent(rName, domain), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "access_key", ""), + resource.TestCheckResourceAttr(resourceName, "agent_arns.#", "2"), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "datasync", regexache.MustCompile(`location/loc-.+`)), + resource.TestCheckResourceAttr(resourceName, "bucket_name", rName), + resource.TestCheckNoResourceAttr(resourceName, "secret_key"), + resource.TestCheckResourceAttr(resourceName, "server_certificate", ""), + resource.TestCheckResourceAttr(resourceName, "server_hostname", domain), + resource.TestCheckResourceAttr(resourceName, "server_port", "8080"), + resource.TestCheckResourceAttr(resourceName, "server_protocol", "HTTP"), + resource.TestCheckResourceAttr(resourceName, "subdirectory", "/"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "uri", fmt.Sprintf("object-storage://%s/%s/", domain, rName)), + ), + }, + { + Config: testAccLocationObjectStorageConfig_updateRemoveAgent(rName, domain), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "access_key", ""), + resource.TestCheckResourceAttr(resourceName, "agent_arns.#", "1"), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "datasync", regexache.MustCompile(`location/loc-.+`)), + resource.TestCheckResourceAttr(resourceName, "bucket_name", rName), + resource.TestCheckNoResourceAttr(resourceName, "secret_key"), + resource.TestCheckResourceAttr(resourceName, "server_certificate", ""), + resource.TestCheckResourceAttr(resourceName, "server_hostname", domain), + resource.TestCheckResourceAttr(resourceName, "server_port", "8080"), + resource.TestCheckResourceAttr(resourceName, "server_protocol", "HTTP"), + resource.TestCheckResourceAttr(resourceName, "subdirectory", "/"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "uri", fmt.Sprintf("object-storage://%s/%s/", domain, rName)), + ), + }, + }, + }) +} + func TestAccDataSyncLocationObjectStorage_disappears(t *testing.T) { ctx := acctest.Context(t) var v datasync.DescribeLocationObjectStorageOutput @@ -171,7 +242,7 @@ func TestAccDataSyncLocationObjectStorage_serverCertificate(t *testing.T) { func testAccCheckLocationObjectStorageDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_object_storage" { @@ -202,7 +273,7 @@ func testAccCheckLocationObjectStorageExists(ctx context.Context, n string, v *d return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationObjectStorageByARN(ctx, conn, rs.Primary.ID) @@ -237,6 +308,55 @@ resource "aws_datasync_location_object_storage" "test" { `, rName, domain)) } +func testAccLocationObjectStorageConfig_baseUpdate(rName string) string { + return acctest.ConfigCompose(testAccLocationObjectStorageConfig_base(rName), fmt.Sprintf(` +resource "aws_instance" "test2" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + associate_public_ip_address = true + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + tags = { + Name = "%[1]s-2" + } +} + +resource "aws_datasync_agent" "test2" { + ip_address = aws_instance.test2.public_ip + name = "%[1]s-2" +} +`, rName)) +} + +func testAccLocationObjectStorageConfig_updateAddAgent(rName, domain string) string { + return acctest.ConfigCompose(testAccLocationObjectStorageConfig_baseUpdate(rName), + fmt.Sprintf(` +resource "aws_datasync_location_object_storage" "test" { + agent_arns = [aws_datasync_agent.test.arn, aws_datasync_agent.test2.arn] + server_hostname = %[2]q + bucket_name = %[1]q + server_protocol = "HTTP" + server_port = 8080 +} +`, rName, domain)) +} + +func testAccLocationObjectStorageConfig_updateRemoveAgent(rName, domain string) string { + return acctest.ConfigCompose(testAccLocationObjectStorageConfig_baseUpdate(rName), + fmt.Sprintf(` +resource "aws_datasync_location_object_storage" "test" { + agent_arns = [aws_datasync_agent.test.arn] + server_hostname = %[2]q + bucket_name = %[1]q + server_protocol = "HTTP" + server_port = 8080 +} +`, rName, domain)) +} + func testAccLocationObjectStorageConfig_tags1(rName, domain, key1, value1 string) string { return acctest.ConfigCompose(testAccLocationObjectStorageConfig_base(rName), fmt.Sprintf(` resource "aws_datasync_location_object_storage" "test" { diff --git a/internal/service/datasync/location_s3.go b/internal/service/datasync/location_s3.go index 936f7d52b61..f9ebdf17be0 100644 --- a/internal/service/datasync/location_s3.go +++ b/internal/service/datasync/location_s3.go @@ -9,15 +9,16 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -76,11 +77,11 @@ func resourceLocationS3() *schema.Resource { }, }, "s3_storage_class": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(datasync.S3StorageClass_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.S3StorageClass](), }, "subdirectory": { Type: schema.TypeString, @@ -111,7 +112,7 @@ func resourceLocationS3() *schema.Resource { func resourceLocationS3Create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationS3Input{ S3BucketArn: aws.String(d.Get("s3_bucket_arn").(string)), @@ -121,27 +122,27 @@ func resourceLocationS3Create(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("agent_arns"); ok { - input.AgentArns = flex.ExpandStringSet(v.(*schema.Set)) + input.AgentArns = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("s3_storage_class"); ok { - input.S3StorageClass = aws.String(v.(string)) + input.S3StorageClass = awstypes.S3StorageClass(v.(string)) } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateLocationS3WithContext(ctx, input) + return conn.CreateLocationS3(ctx, input) }, func(err error) (bool, error) { // Retry for IAM eventual consistency on error: // InvalidRequestException: Unable to assume role. Reason: Access denied when calling sts:AssumeRole - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "Unable to assume role") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "Unable to assume role") { return true, err } // Retry for IAM eventual consistency on error: // InvalidRequestException: DataSync location access test failed: could not perform s3:ListObjectsV2 on bucket - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "access test failed") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "access test failed") { return true, err } @@ -152,14 +153,14 @@ func resourceLocationS3Create(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "creating DataSync Location S3: %s", err) } - d.SetId(aws.StringValue(outputRaw.(*datasync.CreateLocationS3Output).LocationArn)) + d.SetId(aws.ToString(outputRaw.(*datasync.CreateLocationS3Output).LocationArn)) return append(diags, resourceLocationS3Read(ctx, d, meta)...) } func resourceLocationS3Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationS3ByARN(ctx, conn, d.Id()) @@ -173,12 +174,12 @@ func resourceLocationS3Read(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "reading DataSync Location S3 (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) - s3BucketName, err := globalIDFromLocationURI(aws.StringValue(output.LocationUri)) + uri := aws.ToString(output.LocationUri) + s3BucketName, err := globalIDFromLocationURI(aws.ToString(output.LocationUri)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.ToString(output.LocationUri)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -187,7 +188,7 @@ func resourceLocationS3Read(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendFromErr(diags, err) } - d.Set("agent_arns", aws.StringValueSlice(output.AgentArns)) + d.Set("agent_arns", output.AgentArns) d.Set("arn", output.LocationArn) s3BucketArn := fmt.Sprintf("arn:%s:s3:::%s", locationARN.Partition, s3BucketName) d.Set("s3_bucket_arn", s3BucketArn) @@ -211,14 +212,14 @@ func resourceLocationS3Update(ctx context.Context, d *schema.ResourceData, meta func resourceLocationS3Delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location S3: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -229,14 +230,14 @@ func resourceLocationS3Delete(ctx context.Context, d *schema.ResourceData, meta return diags } -func findLocationS3ByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationS3Output, error) { +func findLocationS3ByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationS3Output, error) { input := &datasync.DescribeLocationS3Input{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationS3WithContext(ctx, input) + output, err := conn.DescribeLocationS3(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -254,26 +255,26 @@ func findLocationS3ByARN(ctx context.Context, conn *datasync.DataSync, arn strin return output, nil } -func flattenS3Config(s3Config *datasync.S3Config) []interface{} { +func flattenS3Config(s3Config *awstypes.S3Config) []interface{} { if s3Config == nil { return []interface{}{} } m := map[string]interface{}{ - "bucket_access_role_arn": aws.StringValue(s3Config.BucketAccessRoleArn), + "bucket_access_role_arn": aws.ToString(s3Config.BucketAccessRoleArn), } return []interface{}{m} } -func expandS3Config(l []interface{}) *datasync.S3Config { +func expandS3Config(l []interface{}) *awstypes.S3Config { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - s3Config := &datasync.S3Config{ + s3Config := &awstypes.S3Config{ BucketAccessRoleArn: aws.String(m["bucket_access_role_arn"].(string)), } diff --git a/internal/service/datasync/location_s3_test.go b/internal/service/datasync/location_s3_test.go index 9fc08238315..54b6408e2fc 100644 --- a/internal/service/datasync/location_s3_test.go +++ b/internal/service/datasync/location_s3_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -166,7 +166,7 @@ func TestAccDataSyncLocationS3_tags(t *testing.T) { func testAccCheckLocationS3Destroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_s3" { @@ -197,7 +197,7 @@ func testAccCheckLocationS3Exists(ctx context.Context, n string, v *datasync.Des return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationS3ByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/location_smb.go b/internal/service/datasync/location_smb.go index c68d10f8094..690a7fd9754 100644 --- a/internal/service/datasync/location_smb.go +++ b/internal/service/datasync/location_smb.go @@ -7,14 +7,16 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -63,10 +65,10 @@ func resourceLocationSMB() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "version": { - Type: schema.TypeString, - Default: datasync.SmbVersionAutomatic, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.SmbVersion_Values(), false), + Type: schema.TypeString, + Default: awstypes.SmbVersionAutomatic, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.SmbVersion](), }, }, }, @@ -118,10 +120,10 @@ func resourceLocationSMB() *schema.Resource { func resourceLocationSMBCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationSmbInput{ - AgentArns: flex.ExpandStringSet(d.Get("agent_arns").(*schema.Set)), + AgentArns: flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)), MountOptions: expandSMBMountOptions(d.Get("mount_options").([]interface{})), Password: aws.String(d.Get("password").(string)), ServerHostname: aws.String(d.Get("server_hostname").(string)), @@ -134,20 +136,20 @@ func resourceLocationSMBCreate(ctx context.Context, d *schema.ResourceData, meta input.Domain = aws.String(v.(string)) } - output, err := conn.CreateLocationSmbWithContext(ctx, input) + output, err := conn.CreateLocationSmb(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Location SMB: %s", err) } - d.SetId(aws.StringValue(output.LocationArn)) + d.SetId(aws.ToString(output.LocationArn)) return append(diags, resourceLocationSMBRead(ctx, d, meta)...) } func resourceLocationSMBRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findLocationSMBByARN(ctx, conn, d.Id()) @@ -161,17 +163,17 @@ func resourceLocationSMBRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading DataSync Location SMB (%s): %s", d.Id(), err) } - uri := aws.StringValue(output.LocationUri) + uri := aws.ToString(output.LocationUri) serverHostName, err := globalIDFromLocationURI(uri) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.ToString(output.LocationUri)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - d.Set("agent_arns", aws.StringValueSlice(output.AgentArns)) + d.Set("agent_arns", output.AgentArns) d.Set("arn", output.LocationArn) d.Set("domain", output.Domain) if err := d.Set("mount_options", flattenSMBMountOptions(output.MountOptions)); err != nil { @@ -187,12 +189,12 @@ func resourceLocationSMBRead(ctx context.Context, d *schema.ResourceData, meta i func resourceLocationSMBUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) if d.HasChangesExcept("tags", "tags_all") { input := &datasync.UpdateLocationSmbInput{ LocationArn: aws.String(d.Id()), - AgentArns: flex.ExpandStringSet(d.Get("agent_arns").(*schema.Set)), + AgentArns: flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)), MountOptions: expandSMBMountOptions(d.Get("mount_options").([]interface{})), Password: aws.String(d.Get("password").(string)), Subdirectory: aws.String(d.Get("subdirectory").(string)), @@ -203,7 +205,7 @@ func resourceLocationSMBUpdate(ctx context.Context, d *schema.ResourceData, meta input.Domain = aws.String(v.(string)) } - _, err := conn.UpdateLocationSmbWithContext(ctx, input) + _, err := conn.UpdateLocationSmb(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DataSync Location SMB (%s): %s", d.Id(), err) @@ -215,14 +217,14 @@ func resourceLocationSMBUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceLocationSMBDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Location SMB: %s", d.Id()) - _, err := conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -233,14 +235,14 @@ func resourceLocationSMBDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func findLocationSMBByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeLocationSmbOutput, error) { +func findLocationSMBByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeLocationSmbOutput, error) { input := &datasync.DescribeLocationSmbInput{ LocationArn: aws.String(arn), } - output, err := conn.DescribeLocationSmbWithContext(ctx, input) + output, err := conn.DescribeLocationSmb(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -258,27 +260,27 @@ func findLocationSMBByARN(ctx context.Context, conn *datasync.DataSync, arn stri return output, nil } -func flattenSMBMountOptions(mountOptions *datasync.SmbMountOptions) []interface{} { +func flattenSMBMountOptions(mountOptions *awstypes.SmbMountOptions) []interface{} { if mountOptions == nil { return []interface{}{} } m := map[string]interface{}{ - "version": aws.StringValue(mountOptions.Version), + "version": string(mountOptions.Version), } return []interface{}{m} } -func expandSMBMountOptions(l []interface{}) *datasync.SmbMountOptions { +func expandSMBMountOptions(l []interface{}) *awstypes.SmbMountOptions { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - smbMountOptions := &datasync.SmbMountOptions{ - Version: aws.String(m["version"].(string)), + smbMountOptions := &awstypes.SmbMountOptions{ + Version: awstypes.SmbVersion(m["version"].(string)), } return smbMountOptions diff --git a/internal/service/datasync/location_smb_test.go b/internal/service/datasync/location_smb_test.go index df4f7834561..defad42df7f 100644 --- a/internal/service/datasync/location_smb_test.go +++ b/internal/service/datasync/location_smb_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -141,7 +141,7 @@ func TestAccDataSyncLocationSMB_tags(t *testing.T) { func testAccCheckLocationSMBDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_location_smb" { @@ -172,7 +172,7 @@ func testAccCheckLocationSMBExists(ctx context.Context, n string, v *datasync.De return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindLocationSMBByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/datasync/service_endpoints_gen_test.go b/internal/service/datasync/service_endpoints_gen_test.go index ac777c4c16a..5ec6ed2fa01 100644 --- a/internal/service/datasync/service_endpoints_gen_test.go +++ b/internal/service/datasync/service_endpoints_gen_test.go @@ -4,17 +4,17 @@ package datasync_test import ( "context" + "errors" "fmt" "maps" - "net/url" "os" "path/filepath" "reflect" "strings" "testing" - "github.com/aws/aws-sdk-go/aws/endpoints" - datasync_sdkv1 "github.com/aws/aws-sdk-go/service/datasync" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + datasync_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" @@ -212,32 +212,42 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() + r := datasync_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(datasync_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), datasync_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return err.Error() } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI.String() } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { t.Helper() - client := meta.DataSyncConn(ctx) - - req, _ := client.ListAgentsRequest(&datasync_sdkv1.ListAgentsInput{}) + var endpoint string - req.HTTPRequest.URL.Path = "/" + client := meta.DataSyncClient(ctx) - endpoint := req.HTTPRequest.URL.String() + _, err := client.ListAgents(ctx, &datasync_sdkv2.ListAgentsInput{}, + func(opts *datasync_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &endpoint), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } return endpoint } diff --git a/internal/service/datasync/service_package_gen.go b/internal/service/datasync/service_package_gen.go index e83a4b11901..3c77f5bb8d8 100644 --- a/internal/service/datasync/service_package_gen.go +++ b/internal/service/datasync/service_package_gen.go @@ -5,9 +5,8 @@ package datasync import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - datasync_sdkv1 "github.com/aws/aws-sdk-go/service/datasync" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + datasync_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -140,11 +139,15 @@ func (p *servicePackage) ServicePackageName() string { return names.DataSync } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*datasync_sdkv1.DataSync, error) { - sess := config["session"].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*datasync_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return datasync_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil + return datasync_sdkv2.NewFromConfig(cfg, func(o *datasync_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/datasync/sweep.go b/internal/service/datasync/sweep.go index 38698a52362..a70d0ee4b91 100644 --- a/internal/service/datasync/sweep.go +++ b/internal/service/datasync/sweep.go @@ -9,12 +9,13 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -48,33 +49,30 @@ func sweepAgents(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DataSyncConn(ctx) + conn := client.DataSyncClient(ctx) input := &datasync.ListAgentsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListAgentsPagesWithContext(ctx, input, func(page *datasync.ListAgentsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := datasync.NewListAgentsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DataSync Location sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DataSync Agents (%s): %w", region, err) } for _, v := range page.Agents { r := ResourceAgent() d := r.Data(nil) - d.SetId(aws.StringValue(v.AgentArn)) + d.SetId(aws.ToString(v.AgentArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DataSync Agent sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DataSync Agents (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -92,34 +90,31 @@ func sweepLocations(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.DataSyncConn(ctx) + conn := client.DataSyncClient(ctx) input := &datasync.ListLocationsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListLocationsPagesWithContext(ctx, input, func(page *datasync.ListLocationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := datasync.NewListLocationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DataSync Location sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DataSync Locations (%s): %w", region, err) } for _, v := range page.Locations { sweepable := &sweepableLocation{ - arn: aws.StringValue(v.LocationArn), + arn: aws.ToString(v.LocationArn), conn: conn, } sweepResources = append(sweepResources, sweepable) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DataSync Location sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DataSync Locations (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -133,16 +128,16 @@ func sweepLocations(region string) error { type sweepableLocation struct { arn string - conn *datasync.DataSync + conn *datasync.Client } func (sweepable *sweepableLocation) Delete(ctx context.Context, timeout time.Duration, optFns ...tfresource.OptionsFunc) error { log.Printf("[DEBUG] Deleting DataSync Location: %s", sweepable.arn) - _, err := sweepable.conn.DeleteLocationWithContext(ctx, &datasync.DeleteLocationInput{ + _, err := sweepable.conn.DeleteLocation(ctx, &datasync.DeleteLocationInput{ LocationArn: aws.String(sweepable.arn), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil } @@ -159,33 +154,30 @@ func sweepTasks(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.DataSyncConn(ctx) + conn := client.DataSyncClient(ctx) input := &datasync.ListTasksInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListTasksPagesWithContext(ctx, input, func(page *datasync.ListTasksOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := datasync.NewListTasksPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DataSync Location sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DataSync Locations (%s): %w", region, err) } for _, v := range page.Tasks { r := resourceTask() d := r.Data(nil) - d.SetId(aws.StringValue(v.TaskArn)) + d.SetId(aws.ToString(v.TaskArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DataSync Task sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DataSync Tasks (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/datasync/tags_gen.go b/internal/service/datasync/tags_gen.go index b761fcc9dc6..7d2d59a5b83 100644 --- a/internal/service/datasync/tags_gen.go +++ b/internal/service/datasync/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/aws/aws-sdk-go/service/datasync/datasynciface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists datasync service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn datasynciface.DataSyncAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *datasync.Client, identifier string, optFns ...func(*datasync.Options)) (tftags.KeyValueTags, error) { input := &datasync.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn datasynciface.DataSyncAPI, identifier st // ListTags lists datasync service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).DataSyncConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).DataSyncClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns datasync service tags. -func Tags(tags tftags.KeyValueTags) []*datasync.TagListEntry { - result := make([]*datasync.TagListEntry, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.TagListEntry { + result := make([]awstypes.TagListEntry, 0, len(tags)) for k, v := range tags.Map() { - tag := &datasync.TagListEntry{ + tag := awstypes.TagListEntry{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*datasync.TagListEntry { } // KeyValueTags creates tftags.KeyValueTags from datasync service tags. -func KeyValueTags(ctx context.Context, tags []*datasync.TagListEntry) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.TagListEntry) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*datasync.TagListEntry) tftags.Key // getTagsIn returns datasync service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*datasync.TagListEntry { +func getTagsIn(ctx context.Context) []awstypes.TagListEntry { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*datasync.TagListEntry { } // setTagsOut sets datasync service tags in Context. -func setTagsOut(ctx context.Context, tags []*datasync.TagListEntry) { +func setTagsOut(ctx context.Context, tags []awstypes.TagListEntry) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*datasync.TagListEntry) { // updateTags updates datasync service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn datasynciface.DataSyncAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *datasync.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*datasync.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn datasynciface.DataSyncAPI, identifier if len(removedTags) > 0 { input := &datasync.UntagResourceInput{ ResourceArn: aws.String(identifier), - Keys: aws.StringSlice(removedTags.Keys()), + Keys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn datasynciface.DataSyncAPI, identifier Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn datasynciface.DataSyncAPI, identifier // UpdateTags updates datasync service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).DataSyncConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).DataSyncClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/datasync/task.go b/internal/service/datasync/task.go index 9b21a6f515c..7cfdfd97d9a 100644 --- a/internal/service/datasync/task.go +++ b/internal/service/datasync/task.go @@ -10,14 +10,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -63,9 +65,9 @@ func resourceTask() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "filter_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.FilterType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.FilterType](), }, "value": { Type: schema.TypeString, @@ -81,9 +83,9 @@ func resourceTask() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "filter_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.FilterType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.FilterType](), }, "value": { Type: schema.TypeString, @@ -104,10 +106,10 @@ func resourceTask() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "atime": { - Type: schema.TypeString, - Optional: true, - Default: datasync.AtimeBestEffort, - ValidateFunc: validation.StringInSlice(datasync.Atime_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.AtimeBestEffort, + ValidateDiagFunc: enum.Validate[awstypes.Atime](), }, "bytes_per_second": { Type: schema.TypeInt, @@ -116,82 +118,82 @@ func resourceTask() *schema.Resource { ValidateFunc: validation.IntAtLeast(-1), }, "gid": { - Type: schema.TypeString, - Optional: true, - Default: datasync.GidIntValue, - ValidateFunc: validation.StringInSlice(datasync.Gid_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.GidIntValue, + ValidateDiagFunc: enum.Validate[awstypes.Gid](), }, "log_level": { - Type: schema.TypeString, - Optional: true, - Default: datasync.LogLevelOff, - ValidateFunc: validation.StringInSlice(datasync.LogLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.LogLevelOff, + ValidateDiagFunc: enum.Validate[awstypes.LogLevel](), }, "mtime": { - Type: schema.TypeString, - Optional: true, - Default: datasync.MtimePreserve, - ValidateFunc: validation.StringInSlice(datasync.Mtime_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.MtimePreserve, + ValidateDiagFunc: enum.Validate[awstypes.Mtime](), }, "object_tags": { - Type: schema.TypeString, - Optional: true, - Default: datasync.ObjectTagsPreserve, - ValidateFunc: validation.StringInSlice(datasync.ObjectTags_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.ObjectTagsPreserve, + ValidateDiagFunc: enum.Validate[awstypes.ObjectTags](), }, "overwrite_mode": { - Type: schema.TypeString, - Optional: true, - Default: datasync.OverwriteModeAlways, - ValidateFunc: validation.StringInSlice(datasync.OverwriteMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.OverwriteModeAlways, + ValidateDiagFunc: enum.Validate[awstypes.OverwriteMode](), }, "posix_permissions": { - Type: schema.TypeString, - Optional: true, - Default: datasync.PosixPermissionsPreserve, - ValidateFunc: validation.StringInSlice(datasync.PosixPermissions_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.PosixPermissionsPreserve, + ValidateDiagFunc: enum.Validate[awstypes.PosixPermissions](), }, "preserve_deleted_files": { - Type: schema.TypeString, - Optional: true, - Default: datasync.PreserveDeletedFilesPreserve, - ValidateFunc: validation.StringInSlice(datasync.PreserveDeletedFiles_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.PreserveDeletedFilesPreserve, + ValidateDiagFunc: enum.Validate[awstypes.PreserveDeletedFiles](), }, "preserve_devices": { - Type: schema.TypeString, - Optional: true, - Default: datasync.PreserveDevicesNone, - ValidateFunc: validation.StringInSlice(datasync.PreserveDevices_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.PreserveDevicesNone, + ValidateDiagFunc: enum.Validate[awstypes.PreserveDevices](), }, "security_descriptor_copy_flags": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(datasync.SmbSecurityDescriptorCopyFlags_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.SmbSecurityDescriptorCopyFlags](), }, "task_queueing": { - Type: schema.TypeString, - Optional: true, - Default: datasync.TaskQueueingEnabled, - ValidateFunc: validation.StringInSlice(datasync.TaskQueueing_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.TaskQueueingEnabled, + ValidateDiagFunc: enum.Validate[awstypes.TaskQueueing](), }, "transfer_mode": { - Type: schema.TypeString, - Optional: true, - Default: datasync.TransferModeChanged, - ValidateFunc: validation.StringInSlice(datasync.TransferMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.TransferModeChanged, + ValidateDiagFunc: enum.Validate[awstypes.TransferMode](), }, "uid": { - Type: schema.TypeString, - Optional: true, - Default: datasync.UidIntValue, - ValidateFunc: validation.StringInSlice(datasync.Uid_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.UidIntValue, + ValidateDiagFunc: enum.Validate[awstypes.Uid](), }, "verify_mode": { - Type: schema.TypeString, - Optional: true, - Default: datasync.VerifyModePointInTimeConsistent, - ValidateFunc: validation.StringInSlice(datasync.VerifyMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.VerifyModePointInTimeConsistent, + ValidateDiagFunc: enum.Validate[awstypes.VerifyMode](), }, }, }, @@ -252,14 +254,14 @@ func resourceTask() *schema.Resource { }, }, "s3_object_versioning": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.ObjectVersionIds_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ObjectVersionIds](), }, "output_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.ReportOutputType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ReportOutputType](), }, "report_overrides": { Type: schema.TypeList, @@ -268,32 +270,32 @@ func resourceTask() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "deleted_override": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.ReportLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ReportLevel](), }, "skipped_override": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.ReportLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ReportLevel](), }, "transferred_override": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.ReportLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ReportLevel](), }, "verified_override": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.ReportLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ReportLevel](), }, }, }, }, "report_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(datasync.ReportLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ReportLevel](), }, }, }, @@ -306,7 +308,7 @@ func resourceTask() *schema.Resource { func resourceTaskCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateTaskInput{ DestinationLocationArn: aws.String(d.Get("destination_location_arn").(string)), @@ -339,13 +341,13 @@ func resourceTaskCreate(ctx context.Context, d *schema.ResourceData, meta interf input.Schedule = expandTaskSchedule(v.([]interface{})) } - output, err := conn.CreateTaskWithContext(ctx, input) + output, err := conn.CreateTask(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataSync Task: %s", err) } - d.SetId(aws.StringValue(output.TaskArn)) + d.SetId(aws.ToString(output.TaskArn)) if _, err := waitTaskAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for DataSync Task (%s) creation: %s", d.Id(), err) @@ -356,7 +358,7 @@ func resourceTaskCreate(ctx context.Context, d *schema.ResourceData, meta interf func resourceTaskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) output, err := findTaskByARN(ctx, conn, d.Id()) @@ -396,7 +398,7 @@ func resourceTaskRead(ctx context.Context, d *schema.ResourceData, meta interfac func resourceTaskUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) if d.HasChangesExcept("tags", "tags_all") { input := &datasync.UpdateTaskInput{ @@ -431,7 +433,7 @@ func resourceTaskUpdate(ctx context.Context, d *schema.ResourceData, meta interf input.TaskReportConfig = expandTaskReportConfig(d.Get("task_report_config").([]interface{})) } - if _, err := conn.UpdateTaskWithContext(ctx, input); err != nil { + if _, err := conn.UpdateTask(ctx, input); err != nil { return sdkdiag.AppendErrorf(diags, "updating DataSync Task (%s): %s", d.Id(), err) } } @@ -441,14 +443,14 @@ func resourceTaskUpdate(ctx context.Context, d *schema.ResourceData, meta interf func resourceTaskDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataSyncConn(ctx) + conn := meta.(*conns.AWSClient).DataSyncClient(ctx) log.Printf("[DEBUG] Deleting DataSync Task: %s", d.Id()) - _, err := conn.DeleteTaskWithContext(ctx, &datasync.DeleteTaskInput{ + _, err := conn.DeleteTask(ctx, &datasync.DeleteTaskInput{ TaskArn: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return diags } @@ -459,14 +461,14 @@ func resourceTaskDelete(ctx context.Context, d *schema.ResourceData, meta interf return diags } -func findTaskByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*datasync.DescribeTaskOutput, error) { +func findTaskByARN(ctx context.Context, conn *datasync.Client, arn string) (*datasync.DescribeTaskOutput, error) { input := &datasync.DescribeTaskInput{ TaskArn: aws.String(arn), } - output, err := conn.DescribeTaskWithContext(ctx, input) + output, err := conn.DescribeTask(ctx, input) - if tfawserr.ErrMessageContains(err, datasync.ErrCodeInvalidRequestException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "not found") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -484,7 +486,7 @@ func findTaskByARN(ctx context.Context, conn *datasync.DataSync, arn string) (*d return output, nil } -func statusTask(ctx context.Context, conn *datasync.DataSync, arn string) retry.StateRefreshFunc { +func statusTask(ctx context.Context, conn *datasync.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findTaskByARN(ctx, conn, arn) @@ -496,14 +498,14 @@ func statusTask(ctx context.Context, conn *datasync.DataSync, arn string) retry. return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitTaskAvailable(ctx context.Context, conn *datasync.DataSync, arn string, timeout time.Duration) (*datasync.DescribeTaskOutput, error) { +func waitTaskAvailable(ctx context.Context, conn *datasync.Client, arn string, timeout time.Duration) (*datasync.DescribeTaskOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{datasync.TaskStatusCreating, datasync.TaskStatusUnavailable}, - Target: []string{datasync.TaskStatusAvailable, datasync.TaskStatusRunning}, + Pending: enum.Slice(awstypes.TaskStatusCreating, awstypes.TaskStatusUnavailable), + Target: enum.Slice(awstypes.TaskStatusAvailable, awstypes.TaskStatusRunning), Refresh: statusTask(ctx, conn, arn), Timeout: timeout, } @@ -511,7 +513,7 @@ func waitTaskAvailable(ctx context.Context, conn *datasync.DataSync, arn string, outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*datasync.DescribeTaskOutput); ok { - if errorCode, errorDetail := aws.StringValue(output.ErrorCode), aws.StringValue(output.ErrorDetail); errorCode != "" && errorDetail != "" { + if errorCode, errorDetail := aws.ToString(output.ErrorCode), aws.ToString(output.ErrorDetail); errorCode != "" && errorDetail != "" { tfresource.SetLastError(err, fmt.Errorf("%s: %s", errorCode, errorDetail)) } @@ -521,41 +523,41 @@ func waitTaskAvailable(ctx context.Context, conn *datasync.DataSync, arn string, return nil, err } -func flattenOptions(options *datasync.Options) []interface{} { +func flattenOptions(options *awstypes.Options) []interface{} { if options == nil { return []interface{}{} } m := map[string]interface{}{ - "atime": aws.StringValue(options.Atime), - "bytes_per_second": aws.Int64Value(options.BytesPerSecond), - "gid": aws.StringValue(options.Gid), - "log_level": aws.StringValue(options.LogLevel), - "mtime": aws.StringValue(options.Mtime), - "object_tags": aws.StringValue(options.ObjectTags), - "overwrite_mode": aws.StringValue(options.OverwriteMode), - "posix_permissions": aws.StringValue(options.PosixPermissions), - "preserve_deleted_files": aws.StringValue(options.PreserveDeletedFiles), - "preserve_devices": aws.StringValue(options.PreserveDevices), - "security_descriptor_copy_flags": aws.StringValue(options.SecurityDescriptorCopyFlags), - "task_queueing": aws.StringValue(options.TaskQueueing), - "transfer_mode": aws.StringValue(options.TransferMode), - "uid": aws.StringValue(options.Uid), - "verify_mode": aws.StringValue(options.VerifyMode), + "atime": string(options.Atime), + "bytes_per_second": aws.ToInt64(options.BytesPerSecond), + "gid": string(options.Gid), + "log_level": string(options.LogLevel), + "mtime": string(options.Mtime), + "object_tags": string(options.ObjectTags), + "overwrite_mode": string(options.OverwriteMode), + "posix_permissions": string(options.PosixPermissions), + "preserve_deleted_files": string(options.PreserveDeletedFiles), + "preserve_devices": string(options.PreserveDevices), + "security_descriptor_copy_flags": string(options.SecurityDescriptorCopyFlags), + "task_queueing": string(options.TaskQueueing), + "transfer_mode": string(options.TransferMode), + "uid": string(options.Uid), + "verify_mode": string(options.VerifyMode), } return []interface{}{m} } -func flattenTaskReportConfig(options *datasync.TaskReportConfig) []interface{} { +func flattenTaskReportConfig(options *awstypes.TaskReportConfig) []interface{} { if options == nil { return []interface{}{} } m := map[string]interface{}{ - "s3_object_versioning": aws.StringValue(options.ObjectVersionIds), - "output_type": aws.StringValue(options.OutputType), - "report_level": aws.StringValue(options.ReportLevel), + "s3_object_versioning": string(options.ObjectVersionIds), + "output_type": string(options.OutputType), + "report_level": string(options.ReportLevel), "s3_destination": flattenTaskReportConfigS3Destination(options.Destination.S3), "report_overrides": flattenTaskReportConfigReportOverrides(options.Overrides), } @@ -563,67 +565,67 @@ func flattenTaskReportConfig(options *datasync.TaskReportConfig) []interface{} { return []interface{}{m} } -func flattenTaskReportConfigReportOverrides(options *datasync.ReportOverrides) []interface{} { +func flattenTaskReportConfigReportOverrides(options *awstypes.ReportOverrides) []interface{} { m := make(map[string]interface{}) if options == nil { return []interface{}{m} } - if options.Deleted != nil && options.Deleted.ReportLevel != nil { - m["deleted_override"] = aws.StringValue(options.Deleted.ReportLevel) + if options.Deleted != nil && options.Deleted.ReportLevel != "" { + m["deleted_override"] = string(options.Deleted.ReportLevel) } - if options.Skipped != nil && options.Skipped.ReportLevel != nil { - m["skipped_override"] = aws.StringValue(options.Skipped.ReportLevel) + if options.Skipped != nil && options.Skipped.ReportLevel != "" { + m["skipped_override"] = string(options.Skipped.ReportLevel) } - if options.Transferred != nil && options.Transferred.ReportLevel != nil { - m["transferred_override"] = aws.StringValue(options.Transferred.ReportLevel) + if options.Transferred != nil && options.Transferred.ReportLevel != "" { + m["transferred_override"] = string(options.Transferred.ReportLevel) } - if options.Verified != nil && options.Verified.ReportLevel != nil { - m["verified_override"] = aws.StringValue(options.Verified.ReportLevel) + if options.Verified != nil && options.Verified.ReportLevel != "" { + m["verified_override"] = string(options.Verified.ReportLevel) } return []interface{}{m} } -func flattenTaskReportConfigS3Destination(options *datasync.ReportDestinationS3) []interface{} { +func flattenTaskReportConfigS3Destination(options *awstypes.ReportDestinationS3) []interface{} { if options == nil { return []interface{}{} } m := map[string]interface{}{ - "bucket_access_role_arn": aws.StringValue(options.BucketAccessRoleArn), - "s3_bucket_arn": aws.StringValue(options.S3BucketArn), - "subdirectory": aws.StringValue(options.Subdirectory), + "bucket_access_role_arn": aws.ToString(options.BucketAccessRoleArn), + "s3_bucket_arn": aws.ToString(options.S3BucketArn), + "subdirectory": aws.ToString(options.Subdirectory), } return []interface{}{m} } -func expandOptions(l []interface{}) *datasync.Options { +func expandOptions(l []interface{}) *awstypes.Options { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - options := &datasync.Options{ - Atime: aws.String(m["atime"].(string)), - Gid: aws.String(m["gid"].(string)), - LogLevel: aws.String(m["log_level"].(string)), - Mtime: aws.String(m["mtime"].(string)), - ObjectTags: aws.String(m["object_tags"].(string)), - OverwriteMode: aws.String(m["overwrite_mode"].(string)), - PreserveDeletedFiles: aws.String(m["preserve_deleted_files"].(string)), - PreserveDevices: aws.String(m["preserve_devices"].(string)), - PosixPermissions: aws.String(m["posix_permissions"].(string)), - TaskQueueing: aws.String(m["task_queueing"].(string)), - TransferMode: aws.String(m["transfer_mode"].(string)), - Uid: aws.String(m["uid"].(string)), - VerifyMode: aws.String(m["verify_mode"].(string)), + options := &awstypes.Options{ + Atime: awstypes.Atime(m["atime"].(string)), + Gid: awstypes.Gid(m["gid"].(string)), + LogLevel: awstypes.LogLevel(m["log_level"].(string)), + Mtime: awstypes.Mtime(m["mtime"].(string)), + ObjectTags: awstypes.ObjectTags(m["object_tags"].(string)), + OverwriteMode: awstypes.OverwriteMode(m["overwrite_mode"].(string)), + PreserveDeletedFiles: awstypes.PreserveDeletedFiles(m["preserve_deleted_files"].(string)), + PreserveDevices: awstypes.PreserveDevices(m["preserve_devices"].(string)), + PosixPermissions: awstypes.PosixPermissions(m["posix_permissions"].(string)), + TaskQueueing: awstypes.TaskQueueing(m["task_queueing"].(string)), + TransferMode: awstypes.TransferMode(m["transfer_mode"].(string)), + Uid: awstypes.Uid(m["uid"].(string)), + VerifyMode: awstypes.VerifyMode(m["verify_mode"].(string)), } if v, ok := m["bytes_per_second"].(int); ok && v != 0 { @@ -631,64 +633,64 @@ func expandOptions(l []interface{}) *datasync.Options { } if v, ok := m["security_descriptor_copy_flags"].(string); ok && v != "" { - options.SecurityDescriptorCopyFlags = aws.String(v) + options.SecurityDescriptorCopyFlags = awstypes.SmbSecurityDescriptorCopyFlags(v) } return options } -func expandTaskSchedule(l []interface{}) *datasync.TaskSchedule { +func expandTaskSchedule(l []interface{}) *awstypes.TaskSchedule { if len(l) == 0 || l[0] == nil { - return &datasync.TaskSchedule{ScheduleExpression: aws.String("")} // explicitly set empty object if schedule is nil + return &awstypes.TaskSchedule{ScheduleExpression: aws.String("")} // explicitly set empty object if schedule is nil } m := l[0].(map[string]interface{}) - schedule := &datasync.TaskSchedule{ + schedule := &awstypes.TaskSchedule{ ScheduleExpression: aws.String(m["schedule_expression"].(string)), } return schedule } -func flattenTaskSchedule(schedule *datasync.TaskSchedule) []interface{} { +func flattenTaskSchedule(schedule *awstypes.TaskSchedule) []interface{} { if schedule == nil { return []interface{}{} } m := map[string]interface{}{ - "schedule_expression": aws.StringValue(schedule.ScheduleExpression), + "schedule_expression": aws.ToString(schedule.ScheduleExpression), } return []interface{}{m} } -func expandTaskReportConfig(l []interface{}) *datasync.TaskReportConfig { +func expandTaskReportConfig(l []interface{}) *awstypes.TaskReportConfig { if len(l) == 0 || l[0] == nil { return nil } - reportConfig := &datasync.TaskReportConfig{} + reportConfig := &awstypes.TaskReportConfig{} m := l[0].(map[string]interface{}) dest := m["s3_destination"].([]interface{}) - reportConfig = reportConfig.SetDestination(expandTaskReportDestination(dest)) - reportConfig = reportConfig.SetObjectVersionIds(m["s3_object_versioning"].(string)) - reportConfig = reportConfig.SetOutputType(m["output_type"].(string)) - reportConfig = reportConfig.SetReportLevel(m["report_level"].(string)) + reportConfig.Destination = expandTaskReportDestination(dest) + reportConfig.ObjectVersionIds = awstypes.ObjectVersionIds(m["s3_object_versioning"].(string)) + reportConfig.OutputType = awstypes.ReportOutputType(m["output_type"].(string)) + reportConfig.ReportLevel = awstypes.ReportLevel(m["report_level"].(string)) o := m["report_overrides"].([]interface{}) - reportConfig = reportConfig.SetOverrides(expandTaskReportOverrides(o)) + reportConfig.Overrides = expandTaskReportOverrides(o) return reportConfig } -func expandTaskReportDestination(l []interface{}) *datasync.ReportDestination { +func expandTaskReportDestination(l []interface{}) *awstypes.ReportDestination { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - return &datasync.ReportDestination{ - S3: &datasync.ReportDestinationS3{ + return &awstypes.ReportDestination{ + S3: &awstypes.ReportDestinationS3{ BucketAccessRoleArn: aws.String(m["bucket_access_role_arn"].(string)), S3BucketArn: aws.String(m["s3_bucket_arn"].(string)), Subdirectory: aws.String(m["subdirectory"].(string)), @@ -696,8 +698,8 @@ func expandTaskReportDestination(l []interface{}) *datasync.ReportDestination { } } -func expandTaskReportOverrides(l []interface{}) *datasync.ReportOverrides { - var overrides = &datasync.ReportOverrides{} +func expandTaskReportOverrides(l []interface{}) *awstypes.ReportOverrides { + var overrides = &awstypes.ReportOverrides{} if len(l) == 0 || l[0] == nil { return overrides @@ -707,45 +709,45 @@ func expandTaskReportOverrides(l []interface{}) *datasync.ReportOverrides { deleteOverride := m["deleted_override"].(string) if deleteOverride != "" { - overrides.SetDeleted(&datasync.ReportOverride{ - ReportLevel: aws.String(deleteOverride), - }) + overrides.Deleted = &awstypes.ReportOverride{ + ReportLevel: awstypes.ReportLevel(deleteOverride), + } } skippedOverride := m["skipped_override"].(string) if skippedOverride != "" { - overrides.SetSkipped(&datasync.ReportOverride{ - ReportLevel: aws.String(skippedOverride), - }) + overrides.Skipped = &awstypes.ReportOverride{ + ReportLevel: awstypes.ReportLevel(skippedOverride), + } } transferredOverride := m["transferred_override"].(string) if transferredOverride != "" { - overrides.SetTransferred(&datasync.ReportOverride{ - ReportLevel: aws.String(transferredOverride), - }) + overrides.Transferred = &awstypes.ReportOverride{ + ReportLevel: awstypes.ReportLevel(transferredOverride), + } } verifiedOverride := m["verified_override"].(string) if verifiedOverride != "" { - overrides.SetVerified(&datasync.ReportOverride{ - ReportLevel: aws.String(verifiedOverride), - }) + overrides.Verified = &awstypes.ReportOverride{ + ReportLevel: awstypes.ReportLevel(verifiedOverride), + } } return overrides } -func expandFilterRules(l []interface{}) []*datasync.FilterRule { - filterRules := []*datasync.FilterRule{} +func expandFilterRules(l []interface{}) []awstypes.FilterRule { + filterRules := []awstypes.FilterRule{} for _, mRaw := range l { if mRaw == nil { continue } m := mRaw.(map[string]interface{}) - filterRule := &datasync.FilterRule{ - FilterType: aws.String(m["filter_type"].(string)), + filterRule := awstypes.FilterRule{ + FilterType: awstypes.FilterType(m["filter_type"].(string)), Value: aws.String(m["value"].(string)), } filterRules = append(filterRules, filterRule) @@ -754,13 +756,13 @@ func expandFilterRules(l []interface{}) []*datasync.FilterRule { return filterRules } -func flattenFilterRules(filterRules []*datasync.FilterRule) []interface{} { +func flattenFilterRules(filterRules []awstypes.FilterRule) []interface{} { l := []interface{}{} for _, filterRule := range filterRules { m := map[string]interface{}{ - "filter_type": aws.StringValue(filterRule.FilterType), - "value": aws.StringValue(filterRule.Value), + "filter_type": string(filterRule.FilterType), + "value": aws.ToString(filterRule.Value), } l = append(l, m) } diff --git a/internal/service/datasync/task_test.go b/internal/service/datasync/task_test.go index 33340ea4c23..7d14d36ff2c 100644 --- a/internal/service/datasync/task_test.go +++ b/internal/service/datasync/task_test.go @@ -10,8 +10,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datasync" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datasync" + awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -896,7 +897,7 @@ func TestAccDataSyncTask_tags(t *testing.T) { func testAccCheckTaskDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datasync_task" { @@ -927,7 +928,7 @@ func testAccCheckTaskExists(ctx context.Context, resourceName string, task *data return fmt.Errorf("Not found: %s", resourceName) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) output, err := tfdatasync.FindTaskByARN(ctx, conn, rs.Primary.ID) @@ -935,9 +936,9 @@ func testAccCheckTaskExists(ctx context.Context, resourceName string, task *data return err } - if aws.StringValue(output.Status) != datasync.TaskStatusAvailable && aws.StringValue(output.Status) != datasync.TaskStatusRunning { + if output.Status != awstypes.TaskStatusAvailable && output.Status != awstypes.TaskStatusRunning { return fmt.Errorf("Task %q not available or running: last status (%s), error code (%s), error detail: %s", - rs.Primary.ID, aws.StringValue(output.Status), aws.StringValue(output.ErrorCode), aws.StringValue(output.ErrorDetail)) + rs.Primary.ID, string(output.Status), aws.ToString(output.ErrorCode), aws.ToString(output.ErrorDetail)) } *task = *output @@ -948,7 +949,7 @@ func testAccCheckTaskExists(ctx context.Context, resourceName string, task *data func testAccCheckTaskNotRecreated(i, j *datasync.DescribeTaskOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.TaskArn) != aws.StringValue(j.TaskArn) { + if aws.ToString(i.TaskArn) != aws.ToString(j.TaskArn) { return errors.New("DataSync Task was recreated") } @@ -957,13 +958,13 @@ func testAccCheckTaskNotRecreated(i, j *datasync.DescribeTaskOutput) resource.Te } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.ListTasksInput{ - MaxResults: aws.Int64(1), + MaxResults: aws.Int32(1), } - _, err := conn.ListTasksWithContext(ctx, input) + _, err := conn.ListTasks(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/datasync/uri.go b/internal/service/datasync/uri.go index 1947bcd471d..65728402c34 100644 --- a/internal/service/datasync/uri.go +++ b/internal/service/datasync/uri.go @@ -7,7 +7,7 @@ import ( "fmt" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go-v2/aws/arn" ) var ( diff --git a/names/data/names_data.csv b/names/data/names_data.csv index 775823a45db..0dabf8af299 100644 --- a/names/data/names_data.csv +++ b/names/data/names_data.csv @@ -109,7 +109,7 @@ cur,cur,costandusagereportservice,costandusagereportservice,,cur,,costandusagere ,,,,,,,,,,,,,,,,,Cryptographic Services Overview,AWS,x,,,,,,,,,No SDK support dataexchange,dataexchange,dataexchange,dataexchange,,dataexchange,,,DataExchange,DataExchange,,1,,,aws_dataexchange_,,dataexchange_,Data Exchange,AWS,,,,,,,DataExchange,ListDataSets,, datapipeline,datapipeline,datapipeline,datapipeline,,datapipeline,,,DataPipeline,DataPipeline,,1,,,aws_datapipeline_,,datapipeline_,Data Pipeline,AWS,,,,,,,Data Pipeline,ListPipelines,, -datasync,datasync,datasync,datasync,,datasync,,,DataSync,DataSync,,1,,,aws_datasync_,,datasync_,DataSync,AWS,,,,,,,DataSync,ListAgents,, +datasync,datasync,datasync,datasync,,datasync,,,DataSync,DataSync,,,2,,aws_datasync_,,datasync_,DataSync,AWS,,,,,,,DataSync,ListAgents,, datazone,datazone,datazone,datazone,,datazone,,,DataZone,DataZone,,,2,,aws_datazone_,,datazone_,DataZone,Amazon,,,,,,,DataZone,ListDomains,, ,,,,,,,,,,,,,,,,,Deep Learning AMIs,AWS,x,,,,,,,,,No SDK support ,,,,,,,,,,,,,,,,,Deep Learning Containers,AWS,x,,,,,,,,,No SDK support