From c945dd0ce74145798d142487d4930c3952182c84 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Sun, 7 Apr 2024 19:30:13 +0100 Subject: [PATCH 01/38] chime: Upgrade to AWS SDK v2 --- go.mod | 1 + go.sum | 2 + internal/conns/awsclient_gen.go | 6 +-- .../chime/service_endpoints_gen_test.go | 40 ++++++++++++------- internal/service/chime/service_package.go | 28 ------------- internal/service/chime/service_package_gen.go | 17 ++++---- names/data/names_data.csv | 2 +- names/names.go | 1 + 8 files changed, 43 insertions(+), 54 deletions(-) delete mode 100644 internal/service/chime/service_package.go diff --git a/go.mod b/go.mod index 8c3fc696a28..6b236339445 100644 --- a/go.mod +++ b/go.mod @@ -214,6 +214,7 @@ require ( github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/service/chime v1.30.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 // indirect diff --git a/go.sum b/go.sum index 1bbe0c12522..77f654c867d 100644 --- a/go.sum +++ b/go.sum @@ -82,6 +82,8 @@ github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.5.1 h1:LWEonf7hVyLqY4AK46rj github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.5.1/go.mod h1:6CwV+GE3wrFqkrU2LB8cajHMWJn7jFFhRtxBQiOZ5kw= github.com/aws/aws-sdk-go-v2/service/budgets v1.22.4 h1:sVv+p2Wo+sUXa8dC1pCMJ/+9ncOriq8EiRWvAkOuaLY= github.com/aws/aws-sdk-go-v2/service/budgets v1.22.4/go.mod h1:JFS3MaNoisHXHQm5/xRQjj1tICixIgT8Vv32D0lV5NE= +github.com/aws/aws-sdk-go-v2/service/chime v1.30.4 h1:IOShdLvjngLscUJozVEaPl3P2B2JSGfP7qVOJPdWaj0= +github.com/aws/aws-sdk-go-v2/service/chime v1.30.4/go.mod h1:5Aw544A4C/xQGeP5kRixFnj0078cPoSBmbLVFGWapOc= github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.15.5 h1:FgeK3aPbB/ARkhxUXfSn9d2ibb4Q9kUhHl/dWwqIy8Y= github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.15.5/go.mod h1:yPGCqtEO6NNwd6kebco4VSvyHkKbjjwd7K6g49Ze/Uw= github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.14.4 h1:rea/sazWAyaUXPcbSCBDGKM1Kb6YiU25xvNnN0p7AyM= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 35c6c862819..e7b6b1b4369 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -23,6 +23,7 @@ import ( bedrock_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrock" bedrockagent_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrockagent" budgets_sdkv2 "github.com/aws/aws-sdk-go-v2/service/budgets" + chime_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chime" chimesdkmediapipelines_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines" chimesdkvoice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chimesdkvoice" cleanrooms_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cleanrooms" @@ -162,7 +163,6 @@ import ( autoscaling_sdkv1 "github.com/aws/aws-sdk-go/service/autoscaling" backup_sdkv1 "github.com/aws/aws-sdk-go/service/backup" batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" - chime_sdkv1 "github.com/aws/aws-sdk-go/service/chime" cloudformation_sdkv1 "github.com/aws/aws-sdk-go/service/cloudformation" cloudfront_sdkv1 "github.com/aws/aws-sdk-go/service/cloudfront" cloudwatchrum_sdkv1 "github.com/aws/aws-sdk-go/service/cloudwatchrum" @@ -377,8 +377,8 @@ func (c *AWSClient) CURClient(ctx context.Context) *costandusagereportservice_sd return errs.Must(client[*costandusagereportservice_sdkv2.Client](ctx, c, names.CUR, make(map[string]any))) } -func (c *AWSClient) ChimeConn(ctx context.Context) *chime_sdkv1.Chime { - return errs.Must(conn[*chime_sdkv1.Chime](ctx, c, names.Chime, make(map[string]any))) +func (c *AWSClient) ChimeClient(ctx context.Context) *chime_sdkv2.Client { + return errs.Must(client[*chime_sdkv2.Client](ctx, c, names.Chime, make(map[string]any))) } func (c *AWSClient) ChimeSDKMediaPipelinesClient(ctx context.Context) *chimesdkmediapipelines_sdkv2.Client { diff --git a/internal/service/chime/service_endpoints_gen_test.go b/internal/service/chime/service_endpoints_gen_test.go index 366dde7099c..3896336c8bc 100644 --- a/internal/service/chime/service_endpoints_gen_test.go +++ b/internal/service/chime/service_endpoints_gen_test.go @@ -4,17 +4,17 @@ package chime_test import ( "context" + "errors" "fmt" "maps" - "net/url" "os" "path/filepath" "reflect" "strings" "testing" - "github.com/aws/aws-sdk-go/aws/endpoints" - chime_sdkv1 "github.com/aws/aws-sdk-go/service/chime" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + chime_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chime" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" @@ -212,32 +212,42 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() + r := chime_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(chime_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), chime_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return err.Error() } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI.String() } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { t.Helper() - client := meta.ChimeConn(ctx) - - req, _ := client.ListAccountsRequest(&chime_sdkv1.ListAccountsInput{}) + var endpoint string - req.HTTPRequest.URL.Path = "/" + client := meta.ChimeClient(ctx) - endpoint := req.HTTPRequest.URL.String() + _, err := client.ListAccounts(ctx, &chime_sdkv2.ListAccountsInput{}, + func(opts *chime_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &endpoint), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } return endpoint } diff --git a/internal/service/chime/service_package.go b/internal/service/chime/service_package.go deleted file mode 100644 index 2ddfe0505ea..00000000000 --- a/internal/service/chime/service_package.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package chime - -import ( - "context" - - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - request_sdkv1 "github.com/aws/aws-sdk-go/aws/request" - chime_sdkv1 "github.com/aws/aws-sdk-go/service/chime" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" -) - -// CustomizeConn customizes a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) CustomizeConn(ctx context.Context, conn *chime_sdkv1.Chime) (*chime_sdkv1.Chime, error) { - conn.Handlers.Retry.PushBack(func(r *request_sdkv1.Request) { - // When calling CreateVoiceConnector across multiple resources, - // the API can randomly return a BadRequestException without explanation - if r.Operation.Name == "CreateVoiceConnector" { - if tfawserr.ErrMessageContains(r.Error, chime_sdkv1.ErrCodeBadRequestException, "Service received a bad request") { - r.Retryable = aws_sdkv1.Bool(true) - } - } - }) - - return conn, nil -} diff --git a/internal/service/chime/service_package_gen.go b/internal/service/chime/service_package_gen.go index e875959ddac..43a4201e971 100644 --- a/internal/service/chime/service_package_gen.go +++ b/internal/service/chime/service_package_gen.go @@ -5,9 +5,8 @@ package chime import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - chime_sdkv1 "github.com/aws/aws-sdk-go/service/chime" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + chime_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chime" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -68,11 +67,15 @@ func (p *servicePackage) ServicePackageName() string { return names.Chime } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*chime_sdkv1.Chime, error) { - sess := config["session"].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*chime_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return chime_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil + return chime_sdkv2.NewFromConfig(cfg, func(o *chime_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/names/data/names_data.csv b/names/data/names_data.csv index c2dc14cb28a..5f06a776efa 100644 --- a/names/data/names_data.csv +++ b/names/data/names_data.csv @@ -42,7 +42,7 @@ billingconductor,billingconductor,billingconductor,,,billingconductor,,,BillingC braket,braket,braket,braket,,braket,,,Braket,Braket,,1,,,aws_braket_,,braket_,Braket,Amazon,,x,,,,,Braket,,, ce,ce,costexplorer,costexplorer,,ce,,costexplorer,CE,CostExplorer,,1,,,aws_ce_,,ce_,CE (Cost Explorer),AWS,,,,,,,Cost Explorer,ListCostCategoryDefinitions,, ,,,,,,,,,,,,,,,,,Chatbot,AWS,x,,,,,,,,,No SDK support -chime,chime,chime,chime,,chime,,,Chime,Chime,,1,,,aws_chime_,,chime_,Chime,Amazon,,,,,,,Chime,ListAccounts,, +chime,chime,chime,chime,,chime,,,Chime,Chime,,,2,,aws_chime_,,chime_,Chime,Amazon,,,,,,,Chime,ListAccounts,, chime-sdk-identity,chimesdkidentity,chimesdkidentity,chimesdkidentity,,chimesdkidentity,,,ChimeSDKIdentity,ChimeSDKIdentity,,1,,,aws_chimesdkidentity_,,chimesdkidentity_,Chime SDK Identity,Amazon,,x,,,,,Chime SDK Identity,,, chime-sdk-mediapipelines,chimesdkmediapipelines,chimesdkmediapipelines,chimesdkmediapipelines,,chimesdkmediapipelines,,,ChimeSDKMediaPipelines,ChimeSDKMediaPipelines,,,2,,aws_chimesdkmediapipelines_,,chimesdkmediapipelines_,Chime SDK Media Pipelines,Amazon,,,,,,,Chime SDK Media Pipelines,ListMediaPipelines,, chime-sdk-meetings,chimesdkmeetings,chimesdkmeetings,chimesdkmeetings,,chimesdkmeetings,,,ChimeSDKMeetings,ChimeSDKMeetings,,1,,,aws_chimesdkmeetings_,,chimesdkmeetings_,Chime SDK Meetings,Amazon,,x,,,,,Chime SDK Meetings,,, diff --git a/names/names.go b/names/names.go index 7b2c5cf4324..dc68471f559 100644 --- a/names/names.go +++ b/names/names.go @@ -37,6 +37,7 @@ const ( BatchEndpointID = "batch" BedrockEndpointID = "bedrock" BudgetsEndpointID = "budgets" + ChimeEndpointID = "chime" ChimeSDKMediaPipelinesEndpointID = "media-pipelines-chime" ChimeSDKVoiceEndpointID = "voice-chime" CloudSearchEndpointID = "cloudsearch" From d3a24fd686df584b87e3b990006618cb10103e87 Mon Sep 17 00:00:00 2001 From: Anthony Wat Date: Sun, 26 May 2024 17:09:11 -0400 Subject: [PATCH 02/38] feat: Add delete config args to aws_fsx_[lustre|openzfs|windows]_file_system resources --- .changelog/37717.txt | 9 + internal/service/fsx/lustre_file_system.go | 93 ++++- .../service/fsx/lustre_file_system_test.go | 390 +++++++++++++----- internal/service/fsx/openzfs_file_system.go | 58 ++- .../service/fsx/openzfs_file_system_test.go | 119 +++++- internal/service/fsx/windows_file_system.go | 38 +- .../service/fsx/windows_file_system_test.go | 99 ++++- .../r/fsx_lustre_file_system.html.markdown | 58 ++- .../r/fsx_openzfs_file_system.html.markdown | 70 ++-- .../r/fsx_windows_file_system.html.markdown | 38 +- 10 files changed, 799 insertions(+), 173 deletions(-) create mode 100644 .changelog/37717.txt diff --git a/.changelog/37717.txt b/.changelog/37717.txt new file mode 100644 index 00000000000..7a05d499056 --- /dev/null +++ b/.changelog/37717.txt @@ -0,0 +1,9 @@ +```release-note:enhancement +resource/aws_fsx_lustre_file_system: Add `final_backup_tags` and `skip_final_backup` arguments +``` +```release-note:enhancement +resource/aws_fsx_openzfs_file_system: Add `delete_options` and `final_backup_tags` arguments +``` +```release-note:enhancement +resource/aws_fsx_windows_file_system: Add `final_backup_tags` argument +``` \ No newline at end of file diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index a0621369eb1..57418b134b1 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -42,7 +42,11 @@ func resourceLustreFileSystem() *schema.Resource { DeleteWithoutTimeout: resourceLustreFileSystemDelete, Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("skip_final_backup", true) + + return []*schema.ResourceData{d}, nil + }, }, Timeouts: &schema.ResourceTimeout{ @@ -131,6 +135,32 @@ func resourceLustreFileSystem() *schema.Resource { validation.StringMatch(regexache.MustCompile(`^[0-9].[0-9]+$`), "must be in format x.y"), ), }, + "final_backup_tags": { + Type: schema.TypeSet, + Optional: true, + MinItems: 1, + MaxItems: 50, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 128), + validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag key"), + ), + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(0, 128), + validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag value"), + ), + }, + }, + }, + }, "import_path": { Type: schema.TypeString, Optional: true, @@ -238,6 +268,11 @@ func resourceLustreFileSystem() *schema.Resource { MaxItems: 50, Elem: &schema.Schema{Type: schema.TypeString}, }, + "skip_final_backup": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, "storage_capacity": { Type: schema.TypeInt, Optional: true, @@ -491,7 +526,12 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + if d.HasChangesExcept( + "final_backup_tags", + "skip_final_backup", + names.AttrTags, + names.AttrTagsAll, + ) { input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), @@ -557,10 +597,27 @@ func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) + input := &fsx.DeleteFileSystemInput{ + ClientRequestToken: aws.String(id.UniqueId()), + FileSystemId: aws.String(d.Id()), + } + + // Final backup during delete is not supported on file systems using the Scratch deployment type + // LustreConfiguration cannot be supplied at all, even when empty, in this scenario + if v, ok := d.GetOk("deployment_type"); ok && !strings.HasPrefix(v.(string), "SCRATCH_") { + lustreConfig := &fsx.DeleteFileSystemLustreConfiguration{ + SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), + } + + if v, ok := d.GetOk("final_backup_tags"); ok { + lustreConfig.FinalBackupTags = expandFinalBackupTags(v.(*schema.Set)) + } + + input.LustreConfiguration = lustreConfig + } + log.Printf("[DEBUG] Deleting FSx for Lustre File System: %s", d.Id()) - _, err := conn.DeleteFileSystemWithContext(ctx, &fsx.DeleteFileSystemInput{ - FileSystemId: aws.String(d.Id()), - }) + _, err := conn.DeleteFileSystemWithContext(ctx, input) if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { return diags @@ -577,6 +634,32 @@ func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, return diags } +func expandFinalBackupTags(cfg *schema.Set) []*fsx.Tag { + tags := []*fsx.Tag{} + + for _, tag := range cfg.List() { + expandedTag := expandFinalBackupTag(tag.(map[string]interface{})) + if expandedTag != nil { + tags = append(tags, expandedTag) + } + } + + return tags +} + +func expandFinalBackupTag(cfg map[string]interface{}) *fsx.Tag { + out := fsx.Tag{} + + if v, ok := cfg[names.AttrKey].(string); ok { + out.Key = aws.String(v) + } + if v, ok := cfg[names.AttrValue].(string); ok { + out.Value = aws.String(v) + } + + return &out +} + func expandLustreRootSquashConfiguration(l []interface{}) *fsx.LustreRootSquashConfiguration { if len(l) == 0 || l[0] == nil { return nil diff --git a/internal/service/fsx/lustre_file_system_test.go b/internal/service/fsx/lustre_file_system_test.go index c7d0d52b9d6..a03b1dc7806 100644 --- a/internal/service/fsx/lustre_file_system_test.go +++ b/internal/service/fsx/lustre_file_system_test.go @@ -6,6 +6,7 @@ package fsx_test import ( "context" "fmt" + "os" "testing" "github.com/YakDriver/regexache" @@ -57,6 +58,7 @@ func TestAccFSxLustreFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", acctest.Ct2), acctest.CheckResourceAttrAccountID(resourceName, names.AttrOwnerID), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"), resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, fsx.StorageTypeSsd), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", acctest.Ct1), @@ -66,10 +68,14 @@ func TestAccFSxLustreFileSystem_basic(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, }, }) @@ -119,10 +125,14 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_basic(rName), @@ -142,6 +152,62 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { }) } +func TestAccFSxLustreFileSystem_deleteConfig(t *testing.T) { + ctx := acctest.Context(t) + + if os.Getenv("FSX_CREATE_FINAL_BACKUP") != acctest.CtTrue { + t.Skip("Environment variable FSX_CREATE_FINAL_BACKUP is not set to true") + } + + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLustreFileSystemConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, + }, + { + Config: testAccLustreFileSystemConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", ""), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + ), + }, + }, + }) +} + func TestAccFSxLustreFileSystem_exportPath(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem @@ -163,10 +229,14 @@ func TestAccFSxLustreFileSystem_exportPath(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_exportPath(rName, "/prefix/"), @@ -202,10 +272,14 @@ func TestAccFSxLustreFileSystem_importPath(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_importPath(rName, "/prefix/"), @@ -240,10 +314,14 @@ func TestAccFSxLustreFileSystem_importedFileChunkSize(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_importedChunkSize(rName, 4096), @@ -277,10 +355,14 @@ func TestAccFSxLustreFileSystem_securityGroupIDs(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_securityGroupIDs2(rName), @@ -314,10 +396,14 @@ func TestAccFSxLustreFileSystem_storageCapacity(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_storageCapacity(rName, 1200), @@ -351,10 +437,14 @@ func TestAccFSxLustreFileSystem_storageCapacityUpdate(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_storageCapacityScratch2(rName, 1200), @@ -396,10 +486,14 @@ func TestAccFSxLustreFileSystem_fileSystemTypeVersion(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_typeVersion(rName, "2.12"), @@ -434,10 +528,14 @@ func TestAccFSxLustreFileSystem_tags(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), @@ -482,10 +580,14 @@ func TestAccFSxLustreFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_weeklyMaintenanceStartTime(rName, "2:02:02"), @@ -519,10 +621,14 @@ func TestAccFSxLustreFileSystem_automaticBackupRetentionDays(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_automaticBackupRetentionDays(rName, 0), @@ -563,10 +669,14 @@ func TestAccFSxLustreFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_dailyAutomaticBackupStartTime(rName, "02:02"), @@ -606,10 +716,14 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, }, }) @@ -637,10 +751,14 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1_perUnitStorageThroughp ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_persistent1DeploymentType(rName, 100), @@ -680,10 +798,14 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, }, }) @@ -711,10 +833,14 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2_perUnitStorageThroughp ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_persistent2DeploymentType(rName, 250), @@ -750,10 +876,14 @@ func TestAccFSxLustreFileSystem_logConfig(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_log(rName, "ERROR_ONLY"), @@ -789,10 +919,14 @@ func TestAccFSxLustreFileSystem_rootSquashConfig(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_rootSquash(rName, "355534:64534"), @@ -828,10 +962,14 @@ func TestAccFSxLustreFileSystem_fromBackup(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs, "backup_id"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "backup_id", + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup"}, }, }, }) @@ -860,10 +998,14 @@ func TestAccFSxLustreFileSystem_kmsKeyID(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_kmsKeyID2(rName), @@ -900,10 +1042,14 @@ func TestAccFSxLustreFileSystem_deploymentTypeScratch2(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, }, }) @@ -930,10 +1076,14 @@ func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheRead(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, }, }) @@ -960,10 +1110,14 @@ func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheNone(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, }, }) @@ -989,10 +1143,14 @@ func TestAccFSxLustreFileSystem_copyTagsToBackups(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, }, }) @@ -1018,10 +1176,14 @@ func TestAccFSxLustreFileSystem_autoImportPolicy(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, }, { Config: testAccLustreFileSystemConfig_autoImportPolicy(rName, "", "NEW_CHANGED"), @@ -1117,6 +1279,26 @@ resource "aws_fsx_lustre_file_system" "test" { `) } +func testAccLustreFileSystemConfig_deleteConfig(rName, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2 string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_lustre_file_system" "test" { + skip_final_backup = false + storage_capacity = 1200 + subnet_ids = aws_subnet.test[*].id + deployment_type = "PERSISTENT_1" + per_unit_storage_throughput = 50 + final_backup_tags { + key = %[1]q + value = %[2]q + } + final_backup_tags { + key = %[3]q + value = %[4]q + } +} +`, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2)) +} + func testAccLustreFileSystemConfig_exportPath(rName, exportPrefix string) string { return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index d8933eba078..bdc30f635a6 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -86,6 +86,14 @@ func resourceOpenZFSFileSystem() *schema.Resource { validation.StringMatch(regexache.MustCompile(`^([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format HH:MM"), ), }, + "delete_options": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(fsx.DeleteFileSystemOpenZFSOption_Values(), false), + }, + }, "deployment_type": { Type: schema.TypeString, Required: true, @@ -127,6 +135,32 @@ func resourceOpenZFSFileSystem() *schema.Resource { Computed: true, ForceNew: true, }, + "final_backup_tags": { + Type: schema.TypeSet, + Optional: true, + MinItems: 1, + MaxItems: 50, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 128), + validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag key"), + ), + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(0, 128), + validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag value"), + ), + }, + }, + }, + }, names.AttrKMSKeyID: { Type: schema.TypeString, Optional: true, @@ -534,7 +568,13 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + if d.HasChangesExcept( + "delete_options", + "final_backup_tags", + "skip_final_backup", + names.AttrTags, + names.AttrTagsAll, + ) { input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), @@ -633,13 +673,23 @@ func resourceOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - log.Printf("[DEBUG] Deleting FSx for OpenZFS File System: %s", d.Id()) - _, err := conn.DeleteFileSystemWithContext(ctx, &fsx.DeleteFileSystemInput{ + input := &fsx.DeleteFileSystemInput{ FileSystemId: aws.String(d.Id()), OpenZFSConfiguration: &fsx.DeleteFileSystemOpenZFSConfiguration{ SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), }, - }) + } + + if v, ok := d.GetOk("delete_options"); ok { + input.OpenZFSConfiguration.Options = flex.ExpandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("final_backup_tags"); ok { + input.OpenZFSConfiguration.FinalBackupTags = expandFinalBackupTags(v.(*schema.Set)) + } + + log.Printf("[DEBUG] Deleting FSx for OpenZFS File System: %s", d.Id()) + _, err := conn.DeleteFileSystemWithContext(ctx, input) if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { return diags diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go index 9ce1b1ab082..6bbe8b6221a 100644 --- a/internal/service/fsx/openzfs_file_system_test.go +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -6,6 +6,7 @@ package fsx_test import ( "context" "fmt" + "os" "testing" "github.com/YakDriver/regexache" @@ -78,7 +79,7 @@ func TestAccFSxOpenZFSFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "root_volume_id"), resource.TestCheckResourceAttr(resourceName, "route_table_ids.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "64"), resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, fsx.StorageTypeSsd), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", acctest.Ct1), @@ -94,6 +95,8 @@ func TestAccFSxOpenZFSFileSystem_basic(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -128,6 +131,8 @@ func TestAccFSxOpenZFSFileSystem_diskIops(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -218,6 +223,8 @@ func TestAccFSxOpenZFSFileSystem_rootVolume(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -379,6 +386,8 @@ func TestAccFSxOpenZFSFileSystem_securityGroupIDs(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -420,6 +429,8 @@ func TestAccFSxOpenZFSFileSystem_tags(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -474,6 +485,8 @@ func TestAccFSxOpenZFSFileSystem_copyTags(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -516,6 +529,8 @@ func TestAccFSxOpenZFSFileSystem_throughput(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -556,6 +571,8 @@ func TestAccFSxOpenZFSFileSystem_storageType(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -588,6 +605,8 @@ func TestAccFSxOpenZFSFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -628,6 +647,8 @@ func TestAccFSxOpenZFSFileSystem_automaticBackupRetentionDays(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -675,6 +696,8 @@ func TestAccFSxOpenZFSFileSystem_kmsKeyID(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -707,6 +730,8 @@ func TestAccFSxOpenZFSFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -747,6 +772,8 @@ func TestAccFSxOpenZFSFileSystem_throughputCapacity(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -787,6 +814,8 @@ func TestAccFSxOpenZFSFileSystem_storageCapacity(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -828,6 +857,8 @@ func TestAccFSxOpenZFSFileSystem_deploymentType(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -909,6 +940,8 @@ func TestAccFSxOpenZFSFileSystem_multiAZ(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -942,6 +975,8 @@ func TestAccFSxOpenZFSFileSystem_routeTableIDs(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{ names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", "skip_final_backup", }, }, @@ -966,6 +1001,66 @@ func TestAccFSxOpenZFSFileSystem_routeTableIDs(t *testing.T) { }) } +func TestAccFSxOpenZFSFileSystem_deleteConfig(t *testing.T) { + ctx := acctest.Context(t) + + if os.Getenv("FSX_CREATE_FINAL_BACKUP") != acctest.CtTrue { + t.Skip("Environment variable FSX_CREATE_FINAL_BACKUP is not set to true") + } + + var filesystem fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccOpenZFSFileSystemConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "delete_options.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "delete_options.0", "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrSecurityGroupIDs, + "delete_options", + "final_backup_tags", + "skip_final_backup", + }, + }, + { + Config: testAccOpenZFSFileSystemConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "delete_options.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "delete_options.0", "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", ""), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + ), + }, + }, + }) +} + func testAccCheckOpenZFSFileSystemExists(ctx context.Context, n string, v *fsx.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -1044,6 +1139,7 @@ func testAccOpenZFSFileSystemConfig_baseMultiAZ(rName string) string { func testAccOpenZFSFileSystemConfig_basic(rName string) string { return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), ` resource "aws_fsx_openzfs_file_system" "test" { + skip_final_backup = true storage_capacity = 64 subnet_ids = aws_subnet.test[*].id deployment_type = "SINGLE_AZ_1" @@ -1659,3 +1755,24 @@ resource "aws_fsx_openzfs_file_system" "test" { } `, rName, n)) } + +func testAccOpenZFSFileSystemConfig_deleteConfig(rName, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2 string) string { + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + skip_final_backup = false + storage_capacity = 64 + subnet_ids = aws_subnet.test[*].id + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + delete_options = ["DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"] + final_backup_tags { + key = %[1]q + value = %[2]q + } + final_backup_tags { + key = %[3]q + value = %[4]q + } +} +`, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2)) +} diff --git a/internal/service/fsx/windows_file_system.go b/internal/service/fsx/windows_file_system.go index 0376e35ecf5..5c8e9ddc82d 100644 --- a/internal/service/fsx/windows_file_system.go +++ b/internal/service/fsx/windows_file_system.go @@ -165,6 +165,32 @@ func resourceWindowsFileSystem() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "final_backup_tags": { + Type: schema.TypeSet, + Optional: true, + MinItems: 1, + MaxItems: 50, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 128), + validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag key"), + ), + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(0, 128), + validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag value"), + ), + }, + }, + }, + }, names.AttrKMSKeyID: { Type: schema.TypeString, Optional: true, @@ -541,7 +567,13 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData } } - if d.HasChangesExcept("aliases", names.AttrTags, names.AttrTagsAll) { + if d.HasChangesExcept( + "aliases", + "final_backup_tags", + "skip_final_backup", + names.AttrTags, + names.AttrTagsAll, + ) { input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), @@ -611,6 +643,10 @@ func resourceWindowsFileSystemDelete(ctx context.Context, d *schema.ResourceData }, } + if v, ok := d.GetOk("final_backup_tags"); ok { + input.WindowsConfiguration.FinalBackupTags = expandFinalBackupTags(v.(*schema.Set)) + } + log.Printf("[DEBUG] Deleting FSx for Windows File Server File System: %s", d.Id()) _, err := conn.DeleteFileSystemWithContext(ctx, input) diff --git a/internal/service/fsx/windows_file_system_test.go b/internal/service/fsx/windows_file_system_test.go index a5240072917..42b51c32f10 100644 --- a/internal/service/fsx/windows_file_system_test.go +++ b/internal/service/fsx/windows_file_system_test.go @@ -6,6 +6,7 @@ package fsx_test import ( "context" "fmt" + "os" "testing" "github.com/YakDriver/regexache" @@ -71,6 +72,7 @@ func TestAccFSxWindowsFileSystem_basic(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -147,6 +149,7 @@ func TestAccFSxWindowsFileSystem_singleAz2(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -181,6 +184,7 @@ func TestAccFSxWindowsFileSystem_storageTypeHdd(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -231,6 +235,7 @@ func TestAccFSxWindowsFileSystem_multiAz(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -265,6 +270,7 @@ func TestAccFSxWindowsFileSystem_aliases(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -317,6 +323,7 @@ func TestAccFSxWindowsFileSystem_automaticBackupRetentionDays(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -366,6 +373,7 @@ func TestAccFSxWindowsFileSystem_copyTagsToBackups(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -407,6 +415,7 @@ func TestAccFSxWindowsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -423,6 +432,62 @@ func TestAccFSxWindowsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { }) } +func TestAccFSxWindowsFileSystem_deleteConfig(t *testing.T) { + ctx := acctest.Context(t) + + if os.Getenv("FSX_CREATE_FINAL_BACKUP") != acctest.CtTrue { + t.Skip("Environment variable FSX_CREATE_FINAL_BACKUP is not set to true") + } + + var filesystem fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domainName := acctest.RandomDomainName() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWindowsFileSystemConfig_deleteConfig(rName, domainName, acctest.CtKey1, acctest.CtValue1, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckWindowsFileSystemExists(ctx, resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + names.AttrSecurityGroupIDs, + "skip_final_backup", + }, + }, + { + Config: testAccWindowsFileSystemConfig_deleteConfig(rName, domainName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckWindowsFileSystemExists(ctx, resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", ""), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + ), + }, + }, + }) +} + func TestAccFSxWindowsFileSystem_kmsKeyID(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem @@ -450,6 +515,7 @@ func TestAccFSxWindowsFileSystem_kmsKeyID(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -491,6 +557,7 @@ func TestAccFSxWindowsFileSystem_securityGroupIDs(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -532,8 +599,9 @@ func TestAccFSxWindowsFileSystem_selfManagedActiveDirectory(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ - names.AttrSecurityGroupIDs, + "final_backup_tags", "self_managed_active_directory", + names.AttrSecurityGroupIDs, "skip_final_backup", }, }, @@ -567,6 +635,7 @@ func TestAccFSxWindowsFileSystem_storageCapacity(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -609,9 +678,10 @@ func TestAccFSxWindowsFileSystem_fromBackup(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "backup_id", + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", - "backup_id", }, }, }, @@ -644,6 +714,7 @@ func TestAccFSxWindowsFileSystem_tags(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -696,6 +767,7 @@ func TestAccFSxWindowsFileSystem_throughputCapacity(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -737,6 +809,7 @@ func TestAccFSxWindowsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -781,6 +854,7 @@ func TestAccFSxWindowsFileSystem_audit(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -835,6 +909,7 @@ func TestAccFSxWindowsFileSystem_diskIops(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "final_backup_tags", names.AttrSecurityGroupIDs, "skip_final_backup", }, @@ -1035,6 +1110,26 @@ resource "aws_fsx_windows_file_system" "test" { `, rName, dailyAutomaticBackupStartTime)) } +func testAccWindowsFileSystemConfig_deleteConfig(rName, domain, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2 string) string { + return acctest.ConfigCompose(testAccWindowsFileSystemConfig_base(rName, domain), fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = aws_directory_service_directory.test.id + skip_final_backup = false + storage_capacity = 32 + subnet_ids = [aws_subnet.test[0].id] + throughput_capacity = 8 + final_backup_tags { + key = %[1]q + value = %[2]q + } + final_backup_tags { + key = %[3]q + value = %[4]q + } +} +`, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2)) +} + func testAccWindowsFileSystemConfig_kmsKeyID1(rName, domain string) string { return acctest.ConfigCompose(testAccWindowsFileSystemConfig_base(rName, domain), fmt.Sprintf(` resource "aws_kms_key" "test1" { diff --git a/website/docs/r/fsx_lustre_file_system.html.markdown b/website/docs/r/fsx_lustre_file_system.html.markdown index 420781b0d83..f925a5273d9 100644 --- a/website/docs/r/fsx_lustre_file_system.html.markdown +++ b/website/docs/r/fsx_lustre_file_system.html.markdown @@ -24,37 +24,57 @@ resource "aws_fsx_lustre_file_system" "example" { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `storage_capacity` - (Optional) The storage capacity (GiB) of the file system. Minimum of `1200`. See more details at [Allowed values for Fsx storage capacity](https://docs.aws.amazon.com/fsx/latest/APIReference/API_CreateFileSystem.html#FSx-CreateFileSystem-request-StorageCapacity). Update is allowed only for `SCRATCH_2`, `PERSISTENT_1` and `PERSISTENT_2` deployment types, See more details at [Fsx Storage Capacity Update](https://docs.aws.amazon.com/fsx/latest/APIReference/API_UpdateFileSystem.html#FSx-UpdateFileSystem-request-StorageCapacity). Required when not creating filesystem for a backup. * `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone. + +The following arguments are optional: + +* `auto_import_policy` - (Optional) How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see [Auto Import Data Repo](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) for more details. Only supported on `PERSISTENT_1` deployment types. +* `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. only valid for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. * `backup_id` - (Optional) The ID of the source backup to create the filesystem from. +* `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the file system should be copied to backups. Applicable for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. The default value is false. +* `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. only valid for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. Requires `automatic_backup_retention_days` to be set. +* `drive_cache_type` - (Optional) - The type of drive cache used by `PERSISTENT_1` filesystems that are provisioned with `HDD` storage_type. Required for `HDD` storage_type, set to either `READ` or `NONE`. +* `data_compression_type` - (Optional) Sets the data compression configuration for the file system. Valid values are `LZ4` and `NONE`. Default value is `NONE`. Unsetting this value reverts the compression type back to `NONE`. +* `deployment_type` - (Optional) - The filesystem deployment type. One of: `SCRATCH_1`, `SCRATCH_2`, `PERSISTENT_1`, `PERSISTENT_2`. * `export_path` - (Optional) S3 URI (with optional prefix) where the root of your Amazon FSx file system is exported. Can only be specified with `import_path` argument and the path must use the same Amazon S3 bucket as specified in `import_path`. Set equal to `import_path` to overwrite files on export. Defaults to `s3://{IMPORT BUCKET}/FSxLustre{CREATION TIMESTAMP}`. Only supported on `PERSISTENT_1` deployment types. -* `import_path` - (Optional) S3 URI (with optional prefix) that you're using as the data repository for your FSx for Lustre file system. For example, `s3://example-bucket/optional-prefix/`. Only supported on `PERSISTENT_1` deployment types. +* `file_system_type_version` - (Optional) Sets the Lustre version for the file system that you're creating. Valid values are 2.10 for `SCRATCH_1`, `SCRATCH_2` and `PERSISTENT_1` deployment types. Valid values for 2.12 include all deployment types. +* `final_backup_tags` - (Optional) List of tags to apply to the file system's final backup. Maximum of 50 items. See [`final_backup_tags` Block](#final_backup_tags-block) for details. + + **Note:** If the filesystem uses a Scratch deployment type, final backup during delete will always be skipped and this argument will not be used even when set. * `imported_file_chunk_size` - (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. Can only be specified with `import_path` argument. Defaults to `1024`. Minimum of `1` and maximum of `512000`. Only supported on `PERSISTENT_1` deployment types. -* `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. -* `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. -* `deployment_type` - (Optional) - The filesystem deployment type. One of: `SCRATCH_1`, `SCRATCH_2`, `PERSISTENT_1`, `PERSISTENT_2`. +* `import_path` - (Optional) S3 URI (with optional prefix) that you're using as the data repository for your FSx for Lustre file system. For example, `s3://example-bucket/optional-prefix/`. Only supported on `PERSISTENT_1` deployment types. * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, applicable for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. Defaults to an AWS managed KMS Key. +* `log_configuration` - (Optional) The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs. See [`log_configuration` Block](#log_configuration-block) for details. * `per_unit_storage_throughput` - (Optional) - Describes the amount of read and write throughput for each 1 tebibyte of storage, in MB/s/TiB, required for the `PERSISTENT_1` and `PERSISTENT_2` deployment_type. Valid values for `PERSISTENT_1` deployment_type and `SSD` storage_type are 50, 100, 200. Valid values for `PERSISTENT_1` deployment_type and `HDD` storage_type are 12, 40. Valid values for `PERSISTENT_2` deployment_type and ` SSD` storage_type are 125, 250, 500, 1000. -* `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. only valid for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. +* `root_squash_configuration` - (Optional) The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user. See [`root_squash_configuration` Block](#root_squash_configuration-block) for details. +* `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. +* `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `true`. + + **Note:** If the filesystem uses a Scratch deployment type, final backup during delete will always be skipped and this argument will not be used even when set. +* `storage_capacity` - (Optional) The storage capacity (GiB) of the file system. Minimum of `1200`. See more details at [Allowed values for Fsx storage capacity](https://docs.aws.amazon.com/fsx/latest/APIReference/API_CreateFileSystem.html#FSx-CreateFileSystem-request-StorageCapacity). Update is allowed only for `SCRATCH_2`, `PERSISTENT_1` and `PERSISTENT_2` deployment types, See more details at [Fsx Storage Capacity Update](https://docs.aws.amazon.com/fsx/latest/APIReference/API_UpdateFileSystem.html#FSx-UpdateFileSystem-request-StorageCapacity). Required when not creating filesystem for a backup. * `storage_type` - (Optional) - The filesystem storage type. Either `SSD` or `HDD`, defaults to `SSD`. `HDD` is only supported on `PERSISTENT_1` deployment types. -* `drive_cache_type` - (Optional) - The type of drive cache used by `PERSISTENT_1` filesystems that are provisioned with `HDD` storage_type. Required for `HDD` storage_type, set to either `READ` or `NONE`. -* `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. only valid for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. Requires `automatic_backup_retention_days` to be set. -* `auto_import_policy` - (Optional) How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see [Auto Import Data Repo](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) for more details. Only supported on `PERSISTENT_1` deployment types. -* `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the file system should be copied to backups. Applicable for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. The default value is false. -* `data_compression_type` - (Optional) Sets the data compression configuration for the file system. Valid values are `LZ4` and `NONE`. Default value is `NONE`. Unsetting this value reverts the compression type back to `NONE`. -* `file_system_type_version` - (Optional) Sets the Lustre version for the file system that you're creating. Valid values are 2.10 for `SCRATCH_1`, `SCRATCH_2` and `PERSISTENT_1` deployment types. Valid values for 2.12 include all deployment types. -* `log_configuration` - (Optional) The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs. -* `root_squash_configuration` - (Optional) The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user. +* `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. + +### `final_backup_tags` Block + +The `final_backup_tags` configuration block supports the following arguments: + +* `key` - (Required) The name of the tag. +* `value` - (Required) The value assigned to the corresponding tag key. To create a key-only tag, use an empty string as the value. -### log_configuration +### `log_configuration` Block + +The `log_configuration` configuration block supports the following arguments: * `destination` - (Optional) The Amazon Resource Name (ARN) that specifies the destination of the logs. The name of the Amazon CloudWatch Logs log group must begin with the `/aws/fsx` prefix. If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs `/aws/fsx/lustre` log group. * `level` - (Optional) Sets which data repository events are logged by Amazon FSx. Valid values are `WARN_ONLY`, `FAILURE_ONLY`, `ERROR_ONLY`, `WARN_ERROR` and `DISABLED`. Default value is `DISABLED`. -### root_squash_configuration +### `root_squash_configuration` Block + +The `root_squash_configuration` configuration block supports the following arguments: * `no_squash_nids` - (Optional) When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses: 1. A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, 10.0.1.6@tcp). 2. An address range is described using a dash to separate the range (for example, 10.0.[2-10].[1-255]@tcp). * `root_squash` - (Optional) You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format UID:GID (for example, 365534:65534). The UID and GID values can range from 0 to 4294967294. @@ -66,8 +86,8 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name of the file system. * `dns_name` - DNS name for the file system, e.g., `fs-12345678.fsx.us-west-2.amazonaws.com` * `id` - Identifier of the file system, e.g., `fs-12345678` -* `network_interface_ids` - Set of Elastic Network Interface identifiers from which the file system is accessible. As explained in the [documentation](https://docs.aws.amazon.com/fsx/latest/LustreGuide/mounting-on-premises.html), the first network interface returned is the primary network interface. * `mount_name` - The value to be used when mounting the filesystem. +* `network_interface_ids` - Set of Elastic Network Interface identifiers from which the file system is accessible. As explained in the [documentation](https://docs.aws.amazon.com/fsx/latest/LustreGuide/mounting-on-premises.html), the first network interface returned is the primary network interface. * `owner_id` - AWS account identifier that created the file system. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `vpc_id` - Identifier of the Virtual Private Cloud for the file system. diff --git a/website/docs/r/fsx_openzfs_file_system.html.markdown b/website/docs/r/fsx_openzfs_file_system.html.markdown index ca14db229fe..b5030240c6e 100644 --- a/website/docs/r/fsx_openzfs_file_system.html.markdown +++ b/website/docs/r/fsx_openzfs_file_system.html.markdown @@ -24,22 +24,27 @@ resource "aws_fsx_openzfs_file_system" "test" { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `deployment_type` - (Required) - The filesystem deployment type. Valid values: `SINGLE_AZ_1`, `SINGLE_AZ_2` and `MULTI_AZ_1`. +* `deployment_type` - (Required) The filesystem deployment type. Valid values: `SINGLE_AZ_1`, `SINGLE_AZ_2` and `MULTI_AZ_1`. * `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `64` and `524288`. * `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. * `throughput_capacity` - (Required) Throughput (MB/s) of the file system. Valid values depend on `deployment_type`. Must be one of `64`, `128`, `256`, `512`, `1024`, `2048`, `3072`, `4096` for `SINGLE_AZ_1`. Must be one of `160`, `320`, `640`, `1280`, `2560`, `3840`, `5120`, `7680`, `10240` for `SINGLE_AZ_2`. + +The following arguments are optional: + * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `backup_id` - (Optional) The ID of the source backup to create the filesystem from. * `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. * `copy_tags_to_volumes` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. * `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set. -* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for OpenZFS file system. See [Disk Iops Configuration](#disk-iops-configuration) below. +* `delete_options` - (Optional) List of delete options, which at present supports only one value that specifies whether to delete all child volumes and snapshots when the file system is deleted. Valid values: `DELETE_CHILD_VOLUMES_AND_SNAPSHOTS`. +* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for OpenZFS file system. See [`disk_iops_configuration` Block](#disk_iops_configuration-block) for details. * `endpoint_ip_address_range` - (Optional) (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. +* `final_backup_tags` - (Optional) List of tags to apply to the file system's final backup. Maximum of 50 items. See [`final_backup_tags` Block](#final_backup_tags-block) for details. * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `preferred_subnet_id` - (Optional) (Multi-AZ only) Required when `deployment_type` is set to `MULTI_AZ_1`. This specifies the subnet in which you want the preferred file server to be located. -* `root_volume_configuration` - (Optional) The configuration for the root volume of the file system. All other volumes are children or the root volume. See [Root Volume Configuration](#root-volume-configuration) below. +* `root_volume_configuration` - (Optional) The configuration for the root volume of the file system. All other volumes are children or the root volume. See [`root_volume_configuration` Block](#root_volume_configuration-block) for details. * `route_table_ids` - (Optional) (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. @@ -47,34 +52,51 @@ This resource supports the following arguments: * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. -### Disk Iops Configuration +### `disk_iops_configuration` Block + +The `disk_iops_configuration` configuration block supports the following arguments: + +* `iops` - (Optional) The total number of SSD IOPS provisioned for the file system. +* `mode` - (Optional) Specifies whether the number of IOPS for the file system is using the system. Valid values are `AUTOMATIC` and `USER_PROVISIONED`. Default value is `AUTOMATIC`. + +### `final_backup_tags` Block + +The `final_backup_tags` configuration block supports the following arguments: -* `iops` - (Optional) - The total number of SSD IOPS provisioned for the file system. -* `mode` - (Optional) - Specifies whether the number of IOPS for the file system is using the system. Valid values are `AUTOMATIC` and `USER_PROVISIONED`. Default value is `AUTOMATIC`. +* `key` - (Required) The name of the tag. +* `value` - (Required) The value assigned to the corresponding tag key. To create a key-only tag, use an empty string as the value. -### Root Volume Configuration +### `root_volume_configuration` Block -* `copy_tags_to_snapshots` - (Optional) - A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. -* `data_compression_type` - (Optional) - Method used to compress the data on the volume. Valid values are `LZ4`, `NONE` or `ZSTD`. Child volumes that don't specify compression option will inherit from parent volume. This option on file system applies to the root volume. -* `nfs_exports` - (Optional) - NFS export configuration for the root volume. Exactly 1 item. See [NFS Exports](#nfs-exports) Below. -* `read_only` - (Optional) - specifies whether the volume is read-only. Default is false. -* `record_size_kib` - (Optional) - Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are `4`, `8`, `16`, `32`, `64`, `128`, `256`, `512`, or `1024` KiB. The default is `128` KiB. -* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [User and Group Quotas](#user-and-group-quotas) Below. +The `root_volume_configuration` configuration block supports the following arguments: -### NFS Exports +* `copy_tags_to_snapshots` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. +* `data_compression_type` - (Optional) Method used to compress the data on the volume. Valid values are `LZ4`, `NONE` or `ZSTD`. Child volumes that don't specify compression option will inherit from parent volume. This option on file system applies to the root volume. +* `nfs_exports` - (Optional) NFS export configuration for the root volume. Exactly 1 item. See [`nfs_exports` Block](#nfs_exports-block) for details. +* `read_only` - (Optional) specifies whether the volume is read-only. Default is false. +* `record_size_kib` - (Optional) Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are `4`, `8`, `16`, `32`, `64`, `128`, `256`, `512`, or `1024` KiB. The default is `128` KiB. +* `user_and_group_quotas` - (Optional) Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [`user_and_group_quotas` Block](#user_and_group_quotas-block) for details. -* `client_configurations` - (Required) - A list of configuration objects that contain the client and options for mounting the OpenZFS file system. Maximum of 25 items. See [Client Configurations](#client configurations) Below. +### `nfs_exports` Block -### Client Configurations +The `nfs_exports` configuration block supports the following arguments: -* `clients` - (Required) - A value that specifies who can mount the file system. You can provide a wildcard character (*), an IP address (0.0.0.0), or a CIDR address (192.0.2.0/24. By default, Amazon FSx uses the wildcard character when specifying the client. -* `options` - (Required) - The options to use when mounting the file system. Maximum of 20 items. See the [Linix NFS exports man page](https://linux.die.net/man/5/exports) for more information. `crossmount` and `sync` are used by default. +* `client_configurations` - (Required) A list of configuration objects that contain the client and options for mounting the OpenZFS file system. Maximum of 25 items. See [`client_configurations` Block](#client_configurations-block) for details. -### User and Group Quotas +### `client_configurations` Block -* `id` - (Required) - The ID of the user or group. Valid values between `0` and `2147483647` -* `storage_capacity_quota_gib` - (Required) - The amount of storage that the user or group can use in gibibytes (GiB). Valid values between `0` and `2147483647` -* `type` - (Required) - A value that specifies whether the quota applies to a user or group. Valid values are `USER` or `GROUP`. +The `client_configurations` configuration block supports the following arguments: + +* `clients` - (Required) A value that specifies who can mount the file system. You can provide a wildcard character (*), an IP address (0.0.0.0), or a CIDR address (192.0.2.0/24. By default, Amazon FSx uses the wildcard character when specifying the client. +* `options` - (Required) The options to use when mounting the file system. Maximum of 20 items. See the [Linix NFS exports man page](https://linux.die.net/man/5/exports) for more information. `crossmount` and `sync` are used by default. + +### `user_and_group_quotas` Block + +The `user_and_group_quotas` configuration block supports the following arguments: + +* `id` - (Required) The ID of the user or group. Valid values between `0` and `2147483647` +* `storage_capacity_quota_gib` - (Required) The amount of storage that the user or group can use in gibibytes (GiB). Valid values between `0` and `2147483647` +* `type` - (Required) A value that specifies whether the quota applies to a user or group. Valid values are `USER` or `GROUP`. ## Attribute Reference @@ -85,8 +107,8 @@ This resource exports the following attributes in addition to the arguments abov * `endpoint_ip_address` - IP address of the endpoint that is used to access data or to manage the file system. * `id` - Identifier of the file system, e.g., `fs-12345678` * `network_interface_ids` - Set of Elastic Network Interface identifiers from which the file system is accessible The first network interface returned is the primary network interface. -* `root_volume_id` - Identifier of the root volume, e.g., `fsvol-12345678` * `owner_id` - AWS account identifier that created the file system. +* `root_volume_id` - Identifier of the root volume, e.g., `fsvol-12345678` * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `vpc_id` - Identifier of the Virtual Private Cloud for the file system. diff --git a/website/docs/r/fsx_windows_file_system.html.markdown b/website/docs/r/fsx_windows_file_system.html.markdown index d8ad576f02d..96cf67c2f5d 100644 --- a/website/docs/r/fsx_windows_file_system.html.markdown +++ b/website/docs/r/fsx_windows_file_system.html.markdown @@ -59,29 +59,47 @@ The following arguments are optional: * `active_directory_id` - (Optional) The ID for an existing Microsoft Active Directory instance that the file system should join when it's created. Cannot be specified with `self_managed_active_directory`. * `aliases` - (Optional) An array DNS alias names that you want to associate with the Amazon FSx file system. For more information, see [Working with DNS Aliases](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) -* `audit_log_configuration` - (Optional) The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. See [Audit Log Configuration](#audit-log-configuration) below. +* `audit_log_configuration` - (Optional) The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. See [`audit_log_configuration` Block](#audit_log_configuration-block) for details. * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Minimum of `0` and maximum of `90`. Defaults to `7`. Set to `0` to disable. * `backup_id` - (Optional) The ID of the source backup to create the filesystem from. * `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags on the file system should be copied to backups. Defaults to `false`. * `daily_automatic_backup_start_time` - (Optional) The preferred time (in `HH:MM` format) to take daily automatic backups, in the UTC time zone. * `deployment_type` - (Optional) Specifies the file system deployment type, valid values are `MULTI_AZ_1`, `SINGLE_AZ_1` and `SINGLE_AZ_2`. Default value is `SINGLE_AZ_1`. -* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for Windows File Server file system. See [Disk Iops Configuration](#disk-iops-configuration) below. +* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for Windows File Server file system. See [`disk_iops_configuration` Block](#disk_iops_configuration-block) for details. +* `final_backup_tags` - (Optional) List of tags to apply to the file system's final backup. Maximum of 50 items. See [`final_backup_tags` Block](#final_backup_tags-block) for details. * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest. Defaults to an AWS managed KMS Key. * `preferred_subnet_id` - (Optional) Specifies the subnet in which you want the preferred file server to be located. Required for when deployment type is `MULTI_AZ_1`. * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. -* `self_managed_active_directory` - (Optional) Configuration block that Amazon FSx uses to join the Windows File Server instance to your self-managed (including on-premises) Microsoft Active Directory (AD) directory. Cannot be specified with `active_directory_id`. See [Self-Managed Active Directory](#self-managed-active-directory) below. +* `self_managed_active_directory` - (Optional) Configuration block that Amazon FSx uses to join the Windows File Server instance to your self-managed (including on-premises) Microsoft Active Directory (AD) directory. Cannot be specified with `active_directory_id`. See [`self_managed_active_directory` Block](#self_managed_active_directory-block) for details. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `storage_capacity` - (Optional) Storage capacity (GiB) of the file system. Minimum of 32 and maximum of 65536. If the storage type is set to `HDD` the minimum value is 2000. Required when not creating filesystem for a backup. * `storage_type` - (Optional) Specifies the storage type, Valid values are `SSD` and `HDD`. `HDD` is supported on `SINGLE_AZ_2` and `MULTI_AZ_1` Windows file system deployment types. Default value is `SSD`. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. -### Disk Iops Configuration +### `audit_log_configuration` Block -* `iops` - (Optional) - The total number of SSD IOPS provisioned for the file system. -* `mode` - (Optional) - Specifies whether the number of IOPS for the file system is using the system. Valid values are `AUTOMATIC` and `USER_PROVISIONED`. Default value is `AUTOMATIC`. +The `audit_log_configuration` configuration block supports the following arguments: -### Self-Managed Active Directory +* `audit_log_destination` - (Optional) The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN. Can be specified when `file_access_audit_log_level` and `file_share_access_audit_log_level` are not set to `DISABLED`. The name of the Amazon CloudWatch Logs log group must begin with the `/aws/fsx` prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the `aws-fsx` prefix. If you do not provide a destination in `audit_log_destionation`, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group. +* `file_access_audit_log_level` - (Optional) Sets which attempt type is logged by Amazon FSx for file and folder accesses. Valid values are `SUCCESS_ONLY`, `FAILURE_ONLY`, `SUCCESS_AND_FAILURE`, and `DISABLED`. Default value is `DISABLED`. +* `file_share_access_audit_log_level` - (Optional) Sets which attempt type is logged by Amazon FSx for file share accesses. Valid values are `SUCCESS_ONLY`, `FAILURE_ONLY`, `SUCCESS_AND_FAILURE`, and `DISABLED`. Default value is `DISABLED`. + +### `disk_iops_configuration` Block + +The `disk_iops_configuration` configuration block supports the following arguments: + +* `iops` - (Optional) The total number of SSD IOPS provisioned for the file system. +* `mode` - (Optional) Specifies whether the number of IOPS for the file system is using the system. Valid values are `AUTOMATIC` and `USER_PROVISIONED`. Default value is `AUTOMATIC`. + +### `final_backup_tags` Block + +The `final_backup_tags` configuration block supports the following arguments: + +* `key` - (Required) The name of the tag. +* `value` - (Required) The value assigned to the corresponding tag key. To create a key-only tag, use an empty string as the value. + +### `self_managed_active_directory` Block The `self_managed_active_directory` configuration block supports the following arguments: @@ -92,12 +110,6 @@ The `self_managed_active_directory` configuration block supports the following a * `file_system_administrators_group` - (Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, and setting audit controls (audit ACLs) on files and folders. The group that you specify must already exist in your domain. Defaults to `Domain Admins`. * `organizational_unit_distinguished_name` - (Optional) The fully qualified distinguished name of the organizational unit within your self-managed AD directory that the Windows File Server instance will join. For example, `OU=FSx,DC=yourdomain,DC=corp,DC=com`. Only accepts OU as the direct parent of the file system. If none is provided, the FSx file system is created in the default location of your self-managed AD directory. To learn more, see [RFC 2253](https://tools.ietf.org/html/rfc2253). -### Audit Log Configuration - -* `audit_log_destination` - (Optional) The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN. Can be specified when `file_access_audit_log_level` and `file_share_access_audit_log_level` are not set to `DISABLED`. The name of the Amazon CloudWatch Logs log group must begin with the `/aws/fsx` prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the `aws-fsx` prefix. If you do not provide a destination in `audit_log_destionation`, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group. -* `file_access_audit_log_level` - (Optional) Sets which attempt type is logged by Amazon FSx for file and folder accesses. Valid values are `SUCCESS_ONLY`, `FAILURE_ONLY`, `SUCCESS_AND_FAILURE`, and `DISABLED`. Default value is `DISABLED`. -* `file_share_access_audit_log_level` - (Optional) Sets which attempt type is logged by Amazon FSx for file share accesses. Valid values are `SUCCESS_ONLY`, `FAILURE_ONLY`, `SUCCESS_AND_FAILURE`, and `DISABLED`. Default value is `DISABLED`. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: From 3acc3c714aac2ababf358959bbee230471a4e277 Mon Sep 17 00:00:00 2001 From: Anthony Wat Date: Sun, 26 May 2024 21:08:20 -0400 Subject: [PATCH 03/38] feat: Add final_backup_tags arg to aws_fsx_ontap_volume --- .changelog/37717.txt | 3 + internal/service/fsx/ontap_volume.go | 45 ++- internal/service/fsx/ontap_volume_test.go | 256 ++++++++++++++---- website/docs/r/fsx_ontap_volume.html.markdown | 71 +++-- 4 files changed, 301 insertions(+), 74 deletions(-) diff --git a/.changelog/37717.txt b/.changelog/37717.txt index 7a05d499056..86f71cf2404 100644 --- a/.changelog/37717.txt +++ b/.changelog/37717.txt @@ -6,4 +6,7 @@ resource/aws_fsx_openzfs_file_system: Add `delete_options` and `final_backup_tag ``` ```release-note:enhancement resource/aws_fsx_windows_file_system: Add `final_backup_tags` argument +``` +```release-note:enhancement +resource/aws_fsx_ontap_volume: Add `final_backup_tags` argument ``` \ No newline at end of file diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 87c83aafe0b..0916bd40fbf 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -105,6 +105,32 @@ func resourceONTAPVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "final_backup_tags": { + Type: schema.TypeSet, + Optional: true, + MinItems: 1, + MaxItems: 50, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 128), + validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag key"), + ), + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(0, 128), + validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag value"), + ), + }, + }, + }, + }, "flexcache_endpoint_type": { Type: schema.TypeString, Computed: true, @@ -486,7 +512,12 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + if d.HasChangesExcept( + "final_backup_tags", + "skip_final_backup", + names.AttrTags, + names.AttrTagsAll, + ) { ontapConfig := &fsx.UpdateOntapVolumeConfiguration{} if d.HasChange("copy_tags_to_backups") { @@ -560,14 +591,20 @@ func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - log.Printf("[DEBUG] Deleting FSx for NetApp ONTAP Volume: %s", d.Id()) - _, err := conn.DeleteVolumeWithContext(ctx, &fsx.DeleteVolumeInput{ + input := &fsx.DeleteVolumeInput{ OntapConfiguration: &fsx.DeleteVolumeOntapConfiguration{ BypassSnaplockEnterpriseRetention: aws.Bool(d.Get("bypass_snaplock_enterprise_retention").(bool)), SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), }, VolumeId: aws.String(d.Id()), - }) + } + + if v, ok := d.GetOk("final_backup_tags"); ok { + input.OntapConfiguration.FinalBackupTags = expandFinalBackupTags(v.(*schema.Set)) + } + + log.Printf("[DEBUG] Deleting FSx for NetApp ONTAP Volume: %s", d.Id()) + _, err := conn.DeleteVolumeWithContext(ctx, input) if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { return diags diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 03cea8e47f6..e91dc274b1b 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -6,6 +6,7 @@ package fsx_test import ( "context" "fmt" + "os" "testing" "github.com/YakDriver/regexache" @@ -46,7 +47,7 @@ func TestAccFSxONTAPVolume_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, "security_style", ""), resource.TestCheckResourceAttr(resourceName, "size_in_megabytes", "1024"), - resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "snapshot_policy", "default"), resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", acctest.CtTrue), @@ -61,6 +62,10 @@ func TestAccFSxONTAPVolume_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_backup_tags", + "skip_final_backup", + }, }, }, }) @@ -114,10 +119,14 @@ func TestAccFSxONTAPVolume_aggregateConfiguration(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_aggregateConstituents(rName, ConstituentsPerAggregate, ConstituentsPerAggregate*204800), @@ -156,10 +165,14 @@ func TestAccFSxONTAPVolume_copyTagsToBackups(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_copyTagsToBackups(rName, false), @@ -196,10 +209,14 @@ func TestAccFSxONTAPVolume_junctionPath(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_junctionPath(rName, jPath2), @@ -235,10 +252,14 @@ func TestAccFSxONTAPVolume_name(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_basic(rName2), @@ -272,10 +293,14 @@ func TestAccFSxONTAPVolume_ontapVolumeType(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, }, }) @@ -302,10 +327,14 @@ func TestAccFSxONTAPVolume_securityStyle(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_securityStyle(rName, "NTFS"), @@ -354,10 +383,14 @@ func TestAccFSxONTAPVolume_size(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_size(rName, size2), @@ -428,10 +461,14 @@ func TestAccFSxONTAPVolume_snaplock(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, /* See https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/how-snaplock-works.html#snaplock-audit-log-volume. @@ -491,10 +528,14 @@ func TestAccFSxONTAPVolume_snapshotPolicy(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_snapshotPolicy(rName, policy2), @@ -530,10 +571,14 @@ func TestAccFSxONTAPVolume_storageEfficiency(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_storageEfficiency(rName, false), @@ -569,10 +614,14 @@ func TestAccFSxONTAPVolume_tags(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), @@ -618,10 +667,14 @@ func TestAccFSxONTAPVolume_tieringPolicy(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_tieringPolicy(rName, "SNAPSHOT_ONLY", 10), @@ -678,10 +731,14 @@ func TestAccFSxONTAPVolume_volumeStyle(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, }, { Config: testAccONTAPVolumeConfig_ontapStyle(rName, style2), @@ -694,6 +751,62 @@ func TestAccFSxONTAPVolume_volumeStyle(t *testing.T) { }) } +func TestAccFSxONTAPVolume_deleteConfig(t *testing.T) { + ctx := acctest.Context(t) + + if os.Getenv("FSX_CREATE_FINAL_BACKUP") != acctest.CtTrue { + t.Skip("Environment variable FSX_CREATE_FINAL_BACKUP is not set to true") + } + + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_ontap_volume.test" + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPVolumeConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bypass_snaplock_enterprise_retention", + "final_backup_tags", + "skip_final_backup", + }, + }, + { + Config: testAccONTAPVolumeConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", ""), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), + ), + }, + }, + }) +} + func testAccCheckONTAPVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -809,6 +922,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id } @@ -821,6 +935,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 16 * 102400 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id volume_style = "FLEXGROUP" @@ -839,6 +954,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = %[3]d + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id volume_style = "FLEXGROUP" @@ -858,6 +974,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id copy_tags_to_backups = %[2]t @@ -870,6 +987,7 @@ func testAccONTAPVolumeConfig_junctionPath(rName, junctionPath string) string { resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = %[2]q + skip_final_backup = true size_in_megabytes = 1024 storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id @@ -896,6 +1014,7 @@ resource "aws_fsx_ontap_volume" "test" { junction_path = "/%[1]s" size_in_megabytes = 1024 security_style = %[2]q + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id } @@ -908,6 +1027,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = %[2]d + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id } @@ -920,6 +1040,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_bytes = %[2]d + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id volume_style = "FLEXGROUP" @@ -933,6 +1054,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id @@ -952,6 +1074,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/snaplock_audit_log" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id @@ -996,6 +1119,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true snapshot_policy = %[2]q storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id @@ -1009,6 +1133,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = %[2]t storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id } @@ -1021,6 +1146,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id @@ -1038,6 +1164,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id @@ -1054,6 +1181,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id @@ -1070,6 +1198,7 @@ resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" size_in_megabytes = 1024 + skip_final_backup = true storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id @@ -1094,3 +1223,24 @@ resource "aws_fsx_ontap_volume" "test" { } `, rName, style)) } + +func testAccONTAPVolumeConfig_deleteConfig(rName, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2 string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/%[1]s" + size_in_megabytes = 1024 + skip_final_backup = false + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id + final_backup_tags { + key = %[2]q + value = %[3]q + } + final_backup_tags { + key = %[4]q + value = %[5]q + } +} +`, rName, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2)) +} diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index c5fd3f87ebf..0aacdf16be3 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -46,57 +46,94 @@ resource "aws_fsx_ontap_volume" "test" { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: * `name` - (Required) The name of the Volume. You can use a maximum of 203 alphanumeric characters, plus the underscore (_) special character. -* `aggregate_configuration` - (Optional) The Aggregate configuration only applies to `FLEXGROUP` volumes. See [Aggreate Configuration](#aggregate-configuration) below. +* `storage_virtual_machine_id` - (Required) Specifies the storage virtual machine in which to create the volume. + +The following arguments are optional: + +* `aggregate_configuration` - (Optional) The Aggregate configuration only applies to `FLEXGROUP` volumes. See [`aggregate_configuration` Block] for details. * `bypass_snaplock_enterprise_retention` - (Optional) Setting this to `true` allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to `false`. +* `final_backup_tags` - (Optional) List of tags to apply to the file system's final backup. Maximum of 50 items. See [`final_backup_tags` Block](#final_backup_tags-block) for details. * `junction_path` - (Optional) Specifies the location in the storage virtual machine's namespace where the volume is mounted. The junction_path must have a leading forward slash, such as `/vol3` * `ontap_volume_type` - (Optional) Specifies the type of volume, valid values are `RW`, `DP`. Default value is `RW`. These can be set by the ONTAP CLI or API. This setting is used as part of migration and replication [Migrating to Amazon FSx for NetApp ONTAP](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/migrating-fsx-ontap.html) * `security_style` - (Optional) Specifies the volume security style, Valid values are `UNIX`, `NTFS`, and `MIXED`. * `size_in_bytes` - (Optional) Specifies the size of the volume, in megabytes (MB), that you are creating. Can be used for any size but required for volumes over 2 PB. Either size_in_bytes or size_in_megabytes must be specified. Minimum size for `FLEXGROUP` volumes are 100GiB per constituent. * `size_in_megabytes` - (Optional) Specifies the size of the volume, in megabytes (MB), that you are creating. Supported when creating volumes under 2 PB. Either size_in_bytes or size_in_megabytes must be specified. Minimum size for `FLEXGROUP` volumes are 100GiB per constituent. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the volume is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. -* `snaplock_configuration` - (Optional) The SnapLock configuration for an FSx for ONTAP volume. See [SnapLock Configuration](#snaplock-configuration) below. +* `snaplock_configuration` - (Optional) The SnapLock configuration for an FSx for ONTAP volume. See [`snaplock_configuration` Block](#snaplock_configuration-block) for details. * `snapshot_policy` - (Optional) Specifies the snapshot policy for the volume. See [snapshot policies](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies) in the Amazon FSx ONTAP User Guide * `storage_efficiency_enabled` - (Optional) Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume. -* `storage_virtual_machine_id` - (Required) Specifies the storage virtual machine in which to create the volume. * `tags` - (Optional) A map of tags to assign to the volume. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `tiering_policy` - (Optional) The data tiering policy for an FSx for ONTAP volume. See [Tiering Policy](#tiering-policy) below. +* `tiering_policy` - (Optional) The data tiering policy for an FSx for ONTAP volume. See [`tiering_policy` Block](#tiering_policy-block) for details. * `volume_style` - (Optional) Specifies the styles of volume, valid values are `FLEXVOL`, `FLEXGROUP`. Default value is `FLEXVOL`. FLEXGROUPS have a larger minimum and maximum size. See Volume Styles for more details. [Volume Styles](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-styles.html) -### Aggregate Configuration +### `aggregate_configuration` Block + +The `aggregate_configuration` configuration block supports the following arguments: * `aggregates` - (Optional) Used to specify the names of the aggregates on which the volume will be created. Each aggregate needs to be in the format aggrX where X is the number of the aggregate. * `constituents_per_aggregate` - (Optional) Used to explicitly set the number of constituents within the FlexGroup per storage aggregate. the default value is `8`. -### SnapLock Configuration +### `final_backup_tags` Block + +The `final_backup_tags` configuration block supports the following arguments: + +* `key` - (Required) The name of the tag. +* `value` - (Required) The value assigned to the corresponding tag key. To create a key-only tag, use an empty string as the value. +### `snaplock_configuration` Block + +The `snaplock_configuration` configuration block supports the following arguments: + +* `snaplock_type` - (Required) Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. Valid values: `COMPLIANCE`, `ENTERPRISE`. * `audit_log_volume` - (Optional) Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default value is `false`. -* `autocommit_period` - (Optional) The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume. See [Autocommit Period](#autocommit-period) below. +* `autocommit_period` - (Optional) The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume. See [`autocommit_period` Block](#autocommit_period-block) for details. * `privileged_delete` - (Optional) Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Valid values: `DISABLED`, `ENABLED`, `PERMANENTLY_DISABLED`. The default value is `DISABLED`. -* `retention_period` - (Optional) The retention period of an FSx for ONTAP SnapLock volume. See [SnapLock Retention Period](#snaplock-retention-period) below. -* `snaplock_type` - (Required) Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. Valid values: `COMPLIANCE`, `ENTERPRISE`. +* `retention_period` - (Optional) The retention period of an FSx for ONTAP SnapLock volume. See [`retention_period` Block](#retention_period-block) for details. * `volume_append_mode_enabled` - (Optional) Enables or disables volume-append mode on an FSx for ONTAP SnapLock volume. The default value is `false`. -### Autocommit Period +### `autocommit_period` Block + +The `autocommit_period` configuration block supports the following arguments: * `type` - (Required) The type of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. Setting this value to `NONE` disables autocommit. Valid values: `MINUTES`, `HOURS`, `DAYS`, `MONTHS`, `YEARS`, `NONE`. * `value` - (Optional) The amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. -### SnapLock Retention Period +### `retention_period` Block + +The `retention_period` configuration block supports the following arguments: -* `default_retention` - (Required) The retention period assigned to a write once, read many (WORM) file by default if an explicit retention period is not set for an FSx for ONTAP SnapLock volume. The default retention period must be greater than or equal to the minimum retention period and less than or equal to the maximum retention period. See [Retention Period](#retention-period) below. -* `maximum_retention` - (Required) The longest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [Retention Period](#retention-period) below. -* `minimum_retention` - (Required) The shortest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [Retention Period](#retention-period) below. +* `default_retention` - (Required) The retention period assigned to a write once, read many (WORM) file by default if an explicit retention period is not set for an FSx for ONTAP SnapLock volume. The default retention period must be greater than or equal to the minimum retention period and less than or equal to the maximum retention period. See [`default_retention` Block](#default_retention-block) for details. +* `maximum_retention` - (Required) The longest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [`maximum_retention` Block](#maximum_retention-block) for details. +* `minimum_retention` - (Required) The shortest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [`minimum_retention` Block](#minimum_retention-block) for details. -### Retention Period +### `default_retention` Block + +The `default_retention` configuration block supports the following arguments: + +* `type` - (Required) The type of time for the retention period of an FSx for ONTAP SnapLock volume. Set it to one of the valid types. If you set it to `INFINITE`, the files are retained forever. If you set it to `UNSPECIFIED`, the files are retained until you set an explicit retention period. Valid values: `SECONDS`, `MINUTES`, `HOURS`, `DAYS`, `MONTHS`, `YEARS`, `INFINITE`, `UNSPECIFIED`. +* `value` - (Optional) The amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. + +### `maximum_retention` Block + +The `maximum_retention` configuration block supports the following arguments: * `type` - (Required) The type of time for the retention period of an FSx for ONTAP SnapLock volume. Set it to one of the valid types. If you set it to `INFINITE`, the files are retained forever. If you set it to `UNSPECIFIED`, the files are retained until you set an explicit retention period. Valid values: `SECONDS`, `MINUTES`, `HOURS`, `DAYS`, `MONTHS`, `YEARS`, `INFINITE`, `UNSPECIFIED`. * `value` - (Optional) The amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. -### Tiering Policy +### `minimum_retention` Block + +The `minimum_retention` configuration block supports the following arguments: + +* `type` - (Required) The type of time for the retention period of an FSx for ONTAP SnapLock volume. Set it to one of the valid types. If you set it to `INFINITE`, the files are retained forever. If you set it to `UNSPECIFIED`, the files are retained until you set an explicit retention period. Valid values: `SECONDS`, `MINUTES`, `HOURS`, `DAYS`, `MONTHS`, `YEARS`, `INFINITE`, `UNSPECIFIED`. +* `value` - (Optional) The amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. + +### `tiering_policy` Block + +The `tiering_policy` configuration block supports the following arguments: * `name` - (Required) Specifies the tiering policy for the ONTAP volume for moving data to the capacity pool storage. Valid values are `SNAPSHOT_ONLY`, `AUTO`, `ALL`, `NONE`. Default value is `SNAPSHOT_ONLY`. * `cooling_period` - (Optional) Specifies the number of days that user data in a volume must remain inactive before it is considered "cold" and moved to the capacity pool. Used with `AUTO` and `SNAPSHOT_ONLY` tiering policies only. Valid values are whole numbers between 2 and 183. Default values are 31 days for `AUTO` and 2 days for `SNAPSHOT_ONLY`. From accf28b037f275ef438f09f779d4b413b1747b8b Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Mon, 17 Jun 2024 23:22:04 +0100 Subject: [PATCH 04/38] elastictranscoder: Migrate to AWS SDK v2 --- go.mod | 7 +- go.sum | 14 +- internal/conns/awsclient_gen.go | 6 +- .../service/elastictranscoder/pipeline.go | 73 ++++----- .../elastictranscoder/pipeline_test.go | 47 +++--- internal/service/elastictranscoder/preset.go | 151 +++++++++--------- .../service/elastictranscoder/preset_test.go | 37 ++--- .../service_endpoints_gen_test.go | 147 ++++++++++++++--- .../elastictranscoder/service_package_gen.go | 36 ++--- names/data/names_data.hcl | 6 +- names/names.go | 1 + 11 files changed, 310 insertions(+), 215 deletions(-) diff --git a/go.mod b/go.mod index 56b93522468..9dda7818567 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/YakDriver/go-version v0.1.0 github.com/YakDriver/regexache v0.23.0 github.com/aws/aws-sdk-go v1.54.2 - github.com/aws/aws-sdk-go-v2 v1.27.2 + github.com/aws/aws-sdk-go-v2 v1.28.0 github.com/aws/aws-sdk-go-v2/config v1.27.18 github.com/aws/aws-sdk-go-v2/credentials v1.17.18 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 @@ -90,6 +90,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/elasticache v1.38.8 github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10 github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3 + github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.22.11 github.com/aws/aws-sdk-go-v2/service/emr v1.39.11 github.com/aws/aws-sdk-go-v2/service/emrserverless v1.21.2 github.com/aws/aws-sdk-go-v2/service/eventbridge v1.31.5 @@ -245,8 +246,8 @@ require ( github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.10 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect diff --git a/go.sum b/go.sum index f73f5823c5c..59d2bc3fa2c 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,8 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.54.2 h1:Wo6AVWcleNHrYa48YzfYz60hzxGRqsJrK5s/qePe+3I= github.com/aws/aws-sdk-go v1.54.2/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8= -github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2 v1.28.0 h1:ne6ftNhY0lUvlazMUQF15FF6NH80wKmPRFG7g2q6TCw= +github.com/aws/aws-sdk-go-v2 v1.28.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= github.com/aws/aws-sdk-go-v2/config v1.27.18 h1:wFvAnwOKKe7QAyIxziwSKjmer9JBMH1vzIL6W+fYuKk= @@ -36,10 +36,10 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 h1:dDgptDO9dxeFkXy+tEgVkzS github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5/go.mod h1:gjvE2KBUgUQhcv89jqxrIxH9GaKs1JbZzWejj/DaHGA= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24 h1:FzNwpVTZDCvm597Ty6mGYvxTolyC1oup0waaKntZI4E= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24/go.mod h1:wM9NElT/Wn6n3CT1eyVcXtfCy8lSVjjQXfdawQbSShc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 h1:cy8ahBJuhtM8GTTSyOkfy6WVPV1IE+SS5/wfXUYuulw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9/go.mod h1:CZBXGLaJnEZI6EVNcPd7a6B5IC5cA/GkRWtu9fp3S6Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 h1:A4SYk07ef04+vxZToz9LWvAXl9LW0NClpPpMsi31cz0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9/go.mod h1:5jJcHuwDagxN+ErjQ3PU3ocf6Ylc/p9x+BLO/+X4iXw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.10 h1:LZIUb8sQG2cb89QaVFtMSnER10gyKkqU1k3hP3g9das= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.10/go.mod h1:BRIqay//vnIOCZjoXWSLffL2uzbtxEmnSlfbvVh7Z/4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.10 h1:HY7CXLA0GiQUo3WYxOP7WYkLcwvRX4cLPf5joUcrQGk= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.10/go.mod h1:kfRBSxRa+I+VyON7el3wLZdrO91oxUxEwdAaWgFqN90= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9 h1:vHyZxoLVOgrI8GqX7OMHLXp4YYoxeEsrjweXKpye+ds= @@ -200,6 +200,8 @@ github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10 h1:6MoPaz2J4C47Gi github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10/go.mod h1:uW7bugGF+vIsQdE22S+akMpsB+eZsSjJ6Kv/1lKQT50= github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3 h1:Avh8YS+sgb2OKRht0wdNwY8tqtsCzVrmc8dG8Wfy9LI= github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3/go.mod h1:HbtHaw/hnNPaiqcyYnheILVyn81wOZiX9n2gYF5tPmM= +github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.22.11 h1:kyGPCefx2yRozrfn6jsWHwzMpm/DvTjVJYwo7Lndtbo= +github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.22.11/go.mod h1:4JsmTxKCGkcGGvJZXESavEnkpe7DfhUQ0RcwWj1xe5w= github.com/aws/aws-sdk-go-v2/service/emr v1.39.11 h1:PLsio+PhcBMUVjRypTYnZUAZ3qPYVWKmIgp3B8ZZxRM= github.com/aws/aws-sdk-go-v2/service/emr v1.39.11/go.mod h1:c4P6499AxhWdFqbnZ25WX77JfVEWFHWqWj9wITeFqlI= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.21.2 h1:kl5gXTCoi2dEUplPE+p+dpdD/BiOWsp1zKNfd3Onhn4= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index fa56803e051..1fb5d21a9f7 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -82,6 +82,7 @@ import ( elasticache_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticache" elasticbeanstalk_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk" elasticloadbalancingv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + elastictranscoder_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" emr_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emr" emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" eventbridge_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eventbridge" @@ -202,7 +203,6 @@ import ( efs_sdkv1 "github.com/aws/aws-sdk-go/service/efs" elasticache_sdkv1 "github.com/aws/aws-sdk-go/service/elasticache" elasticsearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/elasticsearchservice" - elastictranscoder_sdkv1 "github.com/aws/aws-sdk-go/service/elastictranscoder" elb_sdkv1 "github.com/aws/aws-sdk-go/service/elb" elbv2_sdkv1 "github.com/aws/aws-sdk-go/service/elbv2" emr_sdkv1 "github.com/aws/aws-sdk-go/service/emr" @@ -655,8 +655,8 @@ func (c *AWSClient) ElasticBeanstalkClient(ctx context.Context) *elasticbeanstal return errs.Must(client[*elasticbeanstalk_sdkv2.Client](ctx, c, names.ElasticBeanstalk, make(map[string]any))) } -func (c *AWSClient) ElasticTranscoderConn(ctx context.Context) *elastictranscoder_sdkv1.ElasticTranscoder { - return errs.Must(conn[*elastictranscoder_sdkv1.ElasticTranscoder](ctx, c, names.ElasticTranscoder, make(map[string]any))) +func (c *AWSClient) ElasticTranscoderClient(ctx context.Context) *elastictranscoder_sdkv2.Client { + return errs.Must(client[*elastictranscoder_sdkv2.Client](ctx, c, names.ElasticTranscoder, make(map[string]any))) } func (c *AWSClient) ElasticsearchConn(ctx context.Context) *elasticsearchservice_sdkv1.ElasticsearchService { diff --git a/internal/service/elastictranscoder/pipeline.go b/internal/service/elastictranscoder/pipeline.go index 9f97f2f84bf..89ed029398a 100644 --- a/internal/service/elastictranscoder/pipeline.go +++ b/internal/service/elastictranscoder/pipeline.go @@ -8,14 +8,15 @@ import ( "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" + awstypes "github.com/aws/aws-sdk-go-v2/service/elastictranscoder/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -236,7 +237,7 @@ func ResourcePipeline() *schema.Resource { func resourcePipelineCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := meta.(*conns.AWSClient).ElasticTranscoderClient(ctx) req := &elastictranscoder.CreatePipelineInput{ AwsKmsKeyArn: aws.String(d.Get("aws_kms_key_arn").(string)), @@ -265,12 +266,12 @@ func resourcePipelineCreate(ctx context.Context, d *schema.ResourceData, meta in } log.Printf("[DEBUG] Elastic Transcoder Pipeline create opts: %s", req) - resp, err := conn.CreatePipelineWithContext(ctx, req) + resp, err := conn.CreatePipeline(ctx, req) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Elastic Transcoder Pipeline: %s", err) } - d.SetId(aws.StringValue(resp.Pipeline.Id)) + d.SetId(aws.ToString(resp.Pipeline.Id)) for _, w := range resp.Warnings { log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", *w.Code, *w.Message) @@ -279,7 +280,7 @@ func resourcePipelineCreate(ctx context.Context, d *schema.ResourceData, meta in return append(diags, resourcePipelineRead(ctx, d, meta)...) } -func expandETNotifications(d *schema.ResourceData) *elastictranscoder.Notifications { +func expandETNotifications(d *schema.ResourceData) *awstypes.Notifications { list, ok := d.GetOk("notifications") if !ok { return nil @@ -297,7 +298,7 @@ func expandETNotifications(d *schema.ResourceData) *elastictranscoder.Notificati rN := l[0].(map[string]interface{}) - return &elastictranscoder.Notifications{ + return &awstypes.Notifications{ Completed: aws.String(rN["completed"].(string)), Error: aws.String(rN["error"].(string)), Progressing: aws.String(rN["progressing"].(string)), @@ -305,14 +306,14 @@ func expandETNotifications(d *schema.ResourceData) *elastictranscoder.Notificati } } -func flattenETNotifications(n *elastictranscoder.Notifications) []map[string]interface{} { +func flattenETNotifications(n *awstypes.Notifications) []map[string]interface{} { if n == nil { return nil } allEmpty := func(s ...*string) bool { for _, s := range s { - if aws.StringValue(s) != "" { + if aws.ToString(s) != "" { return false } } @@ -325,16 +326,16 @@ func flattenETNotifications(n *elastictranscoder.Notifications) []map[string]int } result := map[string]interface{}{ - "completed": aws.StringValue(n.Completed), - "error": aws.StringValue(n.Error), - "progressing": aws.StringValue(n.Progressing), - "warning": aws.StringValue(n.Warning), + "completed": aws.ToString(n.Completed), + "error": aws.ToString(n.Error), + "progressing": aws.ToString(n.Progressing), + "warning": aws.ToString(n.Warning), } return []map[string]interface{}{result} } -func expandETPiplineOutputConfig(d *schema.ResourceData, key string) *elastictranscoder.PipelineOutputConfig { +func expandETPiplineOutputConfig(d *schema.ResourceData, key string) *awstypes.PipelineOutputConfig { list, ok := d.GetOk(key) if !ok { return nil @@ -347,7 +348,7 @@ func expandETPiplineOutputConfig(d *schema.ResourceData, key string) *elastictra cc := l[0].(map[string]interface{}) - cfg := &elastictranscoder.PipelineOutputConfig{ + cfg := &awstypes.PipelineOutputConfig{ Bucket: aws.String(cc[names.AttrBucket].(string)), StorageClass: aws.String(cc[names.AttrStorageClass].(string)), } @@ -362,21 +363,21 @@ func expandETPiplineOutputConfig(d *schema.ResourceData, key string) *elastictra return cfg } -func flattenETPipelineOutputConfig(cfg *elastictranscoder.PipelineOutputConfig) []map[string]interface{} { +func flattenETPipelineOutputConfig(cfg *awstypes.PipelineOutputConfig) []map[string]interface{} { if cfg == nil { return nil } result := map[string]interface{}{ - names.AttrBucket: aws.StringValue(cfg.Bucket), - names.AttrStorageClass: aws.StringValue(cfg.StorageClass), + names.AttrBucket: aws.ToString(cfg.Bucket), + names.AttrStorageClass: aws.ToString(cfg.StorageClass), } return []map[string]interface{}{result} } -func expandETPermList(permissions *schema.Set) []*elastictranscoder.Permission { - var perms []*elastictranscoder.Permission +func expandETPermList(permissions *schema.Set) []awstypes.Permission { + var perms []awstypes.Permission for _, p := range permissions.List() { if p == nil { @@ -385,8 +386,8 @@ func expandETPermList(permissions *schema.Set) []*elastictranscoder.Permission { m := p.(map[string]interface{}) - perm := &elastictranscoder.Permission{ - Access: flex.ExpandStringList(m["access"].([]interface{})), + perm := awstypes.Permission{ + Access: flex.ExpandStringValueList(m["access"].([]interface{})), Grantee: aws.String(m["grantee"].(string)), GranteeType: aws.String(m["grantee_type"].(string)), } @@ -396,14 +397,14 @@ func expandETPermList(permissions *schema.Set) []*elastictranscoder.Permission { return perms } -func flattenETPermList(perms []*elastictranscoder.Permission) []map[string]interface{} { +func flattenETPermList(perms []awstypes.Permission) []map[string]interface{} { var set []map[string]interface{} for _, p := range perms { result := map[string]interface{}{ - "access": flex.FlattenStringList(p.Access), - "grantee": aws.StringValue(p.Grantee), - "grantee_type": aws.StringValue(p.GranteeType), + "access": flex.FlattenStringValueList(p.Access), + "grantee": aws.ToString(p.Grantee), + "grantee_type": aws.ToString(p.GranteeType), } set = append(set, result) @@ -413,7 +414,7 @@ func flattenETPermList(perms []*elastictranscoder.Permission) []map[string]inter func resourcePipelineUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := meta.(*conns.AWSClient).ElasticTranscoderClient(ctx) req := &elastictranscoder.UpdatePipelineInput{ Id: aws.String(d.Id()), @@ -448,14 +449,14 @@ func resourcePipelineUpdate(ctx context.Context, d *schema.ResourceData, meta in } log.Printf("[DEBUG] Updating Elastic Transcoder Pipeline: %#v", req) - output, err := conn.UpdatePipelineWithContext(ctx, req) + output, err := conn.UpdatePipeline(ctx, req) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Elastic Transcoder pipeline: %s", err) } for _, w := range output.Warnings { - log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", aws.StringValue(w.Code), - aws.StringValue(w.Message)) + log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", aws.ToString(w.Code), + aws.ToString(w.Message)) } return append(diags, resourcePipelineRead(ctx, d, meta)...) @@ -463,14 +464,14 @@ func resourcePipelineUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourcePipelineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := meta.(*conns.AWSClient).ElasticTranscoderClient(ctx) - resp, err := conn.ReadPipelineWithContext(ctx, &elastictranscoder.ReadPipelineInput{ + resp, err := conn.ReadPipeline(ctx, &elastictranscoder.ReadPipelineInput{ Id: aws.String(d.Id()), }) if err != nil { - if tfawserr.ErrCodeEquals(err, elastictranscoder.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { log.Printf("[WARN] Elastic Transcoder Pipeline (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -529,10 +530,10 @@ func resourcePipelineRead(ctx context.Context, d *schema.ResourceData, meta inte func resourcePipelineDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := meta.(*conns.AWSClient).ElasticTranscoderClient(ctx) log.Printf("[DEBUG] Elastic Transcoder Delete Pipeline: %s", d.Id()) - _, err := conn.DeletePipelineWithContext(ctx, &elastictranscoder.DeletePipelineInput{ + _, err := conn.DeletePipeline(ctx, &elastictranscoder.DeletePipelineInput{ Id: aws.String(d.Id()), }) if err != nil { diff --git a/internal/service/elastictranscoder/pipeline_test.go b/internal/service/elastictranscoder/pipeline_test.go index e0e45aa008c..6c1822a8ec2 100644 --- a/internal/service/elastictranscoder/pipeline_test.go +++ b/internal/service/elastictranscoder/pipeline_test.go @@ -11,21 +11,22 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" + awstypes "github.com/aws/aws-sdk-go-v2/service/elastictranscoder/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfelastictranscoder "github.com/hashicorp/terraform-provider-aws/internal/service/elastictranscoder" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccElasticTranscoderPipeline_basic(t *testing.T) { ctx := acctest.Context(t) - pipeline := &elastictranscoder.Pipeline{} + pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -53,7 +54,7 @@ func TestAccElasticTranscoderPipeline_basic(t *testing.T) { func TestAccElasticTranscoderPipeline_kmsKey(t *testing.T) { ctx := acctest.Context(t) - pipeline := &elastictranscoder.Pipeline{} + pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) keyResourceName := "aws_kms_key.test" @@ -82,7 +83,7 @@ func TestAccElasticTranscoderPipeline_kmsKey(t *testing.T) { func TestAccElasticTranscoderPipeline_notifications(t *testing.T) { ctx := acctest.Context(t) - pipeline := elastictranscoder.Pipeline{} + pipeline := awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -119,19 +120,19 @@ func TestAccElasticTranscoderPipeline_notifications(t *testing.T) { // testAccCheckTags can be used to check the tags on a resource. func testAccCheckPipeline_notifications( - p *elastictranscoder.Pipeline, notifications []string) resource.TestCheckFunc { + p *awstypes.Pipeline, notifications []string) resource.TestCheckFunc { return func(s *terraform.State) error { var notes []string - if aws.StringValue(p.Notifications.Completed) != "" { + if aws.ToString(p.Notifications.Completed) != "" { notes = append(notes, "completed") } - if aws.StringValue(p.Notifications.Error) != "" { + if aws.ToString(p.Notifications.Error) != "" { notes = append(notes, "error") } - if aws.StringValue(p.Notifications.Progressing) != "" { + if aws.ToString(p.Notifications.Progressing) != "" { notes = append(notes, "progressing") } - if aws.StringValue(p.Notifications.Warning) != "" { + if aws.ToString(p.Notifications.Warning) != "" { notes = append(notes, "warning") } @@ -152,7 +153,7 @@ func testAccCheckPipeline_notifications( func TestAccElasticTranscoderPipeline_withContent(t *testing.T) { ctx := acctest.Context(t) - pipeline := &elastictranscoder.Pipeline{} + pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -186,7 +187,7 @@ func TestAccElasticTranscoderPipeline_withContent(t *testing.T) { func TestAccElasticTranscoderPipeline_withPermissions(t *testing.T) { ctx := acctest.Context(t) - pipeline := &elastictranscoder.Pipeline{} + pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -214,7 +215,7 @@ func TestAccElasticTranscoderPipeline_withPermissions(t *testing.T) { func TestAccElasticTranscoderPipeline_disappears(t *testing.T) { ctx := acctest.Context(t) - pipeline := &elastictranscoder.Pipeline{} + pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -236,7 +237,7 @@ func TestAccElasticTranscoderPipeline_disappears(t *testing.T) { }) } -func testAccCheckPipelineExists(ctx context.Context, n string, res *elastictranscoder.Pipeline) resource.TestCheckFunc { +func testAccCheckPipelineExists(ctx context.Context, n string, res *awstypes.Pipeline) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -247,9 +248,9 @@ func testAccCheckPipelineExists(ctx context.Context, n string, res *elastictrans return fmt.Errorf("No Pipeline ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) - out, err := conn.ReadPipelineWithContext(ctx, &elastictranscoder.ReadPipelineInput{ + out, err := conn.ReadPipeline(ctx, &elastictranscoder.ReadPipelineInput{ Id: aws.String(rs.Primary.ID), }) @@ -265,24 +266,24 @@ func testAccCheckPipelineExists(ctx context.Context, n string, res *elastictrans func testAccCheckPipelineDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elastictranscoder_pipline" { continue } - out, err := conn.ReadPipelineWithContext(ctx, &elastictranscoder.ReadPipelineInput{ + out, err := conn.ReadPipeline(ctx, &elastictranscoder.ReadPipelineInput{ Id: aws.String(rs.Primary.ID), }) - if tfawserr.ErrCodeEquals(err, elastictranscoder.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { continue } if err != nil { return fmt.Errorf("unexpected error: %w", err) } - if out.Pipeline != nil && aws.StringValue(out.Pipeline.Id) == rs.Primary.ID { + if out.Pipeline != nil && aws.ToString(out.Pipeline.Id) == rs.Primary.ID { return fmt.Errorf("Elastic Transcoder Pipeline still exists") } } @@ -291,11 +292,11 @@ func testAccCheckPipelineDestroy(ctx context.Context) resource.TestCheckFunc { } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) input := &elastictranscoder.ListPipelinesInput{} - _, err := conn.ListPipelinesWithContext(ctx, input) + _, err := conn.ListPipelines(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/elastictranscoder/preset.go b/internal/service/elastictranscoder/preset.go index 6d08446043c..c87833fa3e7 100644 --- a/internal/service/elastictranscoder/preset.go +++ b/internal/service/elastictranscoder/preset.go @@ -7,14 +7,15 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" + awstypes "github.com/aws/aws-sdk-go-v2/service/elastictranscoder/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/names" @@ -42,7 +43,7 @@ func ResourcePreset() *schema.Resource { ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ - // elastictranscoder.AudioParameters + // awstypes.AudioParameters Schema: map[string]*schema.Schema{ "audio_packing_mode": { Type: schema.TypeString, @@ -281,7 +282,7 @@ func ResourcePreset() *schema.Resource { ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ - // elastictranscoder.VideoParameters + // awstypes.VideoParameters Schema: map[string]*schema.Schema{ "aspect_ratio": { Type: schema.TypeString, @@ -420,7 +421,7 @@ func ResourcePreset() *schema.Resource { Optional: true, ForceNew: true, Elem: &schema.Resource{ - // elastictranscoder.PresetWatermark + // awstypes.PresetWatermark Schema: map[string]*schema.Schema{ "horizontal_align": { Type: schema.TypeString, @@ -508,7 +509,7 @@ func ResourcePreset() *schema.Resource { func resourcePresetCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := meta.(*conns.AWSClient).ElasticTranscoderClient(ctx) req := &elastictranscoder.CreatePresetInput{ Audio: expandETAudioParams(d), @@ -527,21 +528,21 @@ func resourcePresetCreate(ctx context.Context, d *schema.ResourceData, meta inte } log.Printf("[DEBUG] Elastic Transcoder Preset create opts: %s", req) - resp, err := conn.CreatePresetWithContext(ctx, req) + resp, err := conn.CreatePreset(ctx, req) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Elastic Transcoder Preset: %s", err) } - if aws.StringValue(resp.Warning) != "" { - log.Printf("[WARN] Elastic Transcoder Preset: %s", aws.StringValue(resp.Warning)) + if aws.ToString(resp.Warning) != "" { + log.Printf("[WARN] Elastic Transcoder Preset: %s", aws.ToString(resp.Warning)) } - d.SetId(aws.StringValue(resp.Preset.Id)) + d.SetId(aws.ToString(resp.Preset.Id)) return append(diags, resourcePresetRead(ctx, d, meta)...) } -func expandETThumbnails(d *schema.ResourceData) *elastictranscoder.Thumbnails { +func expandETThumbnails(d *schema.ResourceData) *awstypes.Thumbnails { list, ok := d.GetOk("thumbnails") if !ok { return nil @@ -553,7 +554,7 @@ func expandETThumbnails(d *schema.ResourceData) *elastictranscoder.Thumbnails { } t := l[0].(map[string]interface{}) - thumbnails := &elastictranscoder.Thumbnails{} + thumbnails := &awstypes.Thumbnails{} if v, ok := t["aspect_ratio"]; ok && v.(string) != "" { thumbnails.AspectRatio = aws.String(v.(string)) @@ -590,7 +591,7 @@ func expandETThumbnails(d *schema.ResourceData) *elastictranscoder.Thumbnails { return thumbnails } -func expandETAudioParams(d *schema.ResourceData) *elastictranscoder.AudioParameters { +func expandETAudioParams(d *schema.ResourceData) *awstypes.AudioParameters { list, ok := d.GetOk("audio") if !ok { return nil @@ -602,7 +603,7 @@ func expandETAudioParams(d *schema.ResourceData) *elastictranscoder.AudioParamet } audio := l[0].(map[string]interface{}) - ap := &elastictranscoder.AudioParameters{ + ap := &awstypes.AudioParameters{ AudioPackingMode: aws.String(audio["audio_packing_mode"].(string)), Channels: aws.String(audio["channels"].(string)), Codec: aws.String(audio["codec"].(string)), @@ -617,7 +618,7 @@ func expandETAudioParams(d *schema.ResourceData) *elastictranscoder.AudioParamet return ap } -func expandETAudioCodecOptions(d *schema.ResourceData) *elastictranscoder.AudioCodecOptions { +func expandETAudioCodecOptions(d *schema.ResourceData) *awstypes.AudioCodecOptions { l := d.Get("audio_codec_options").([]interface{}) if len(l) == 0 || l[0] == nil { return nil @@ -625,7 +626,7 @@ func expandETAudioCodecOptions(d *schema.ResourceData) *elastictranscoder.AudioC codec := l[0].(map[string]interface{}) - codecOpts := &elastictranscoder.AudioCodecOptions{} + codecOpts := &awstypes.AudioCodecOptions{} if v, ok := codec["signed"]; ok && v.(string) != "" { codecOpts.Signed = aws.String(v.(string)) @@ -646,21 +647,21 @@ func expandETAudioCodecOptions(d *schema.ResourceData) *elastictranscoder.AudioC return codecOpts } -func expandETVideoParams(d *schema.ResourceData) *elastictranscoder.VideoParameters { +func expandETVideoParams(d *schema.ResourceData) *awstypes.VideoParameters { l := d.Get("video").([]interface{}) if len(l) == 0 || l[0] == nil { return nil } p := l[0].(map[string]interface{}) - etVideoParams := &elastictranscoder.VideoParameters{ + etVideoParams := &awstypes.VideoParameters{ Watermarks: expandETVideoWatermarks(d), } if v, ok := d.GetOk("video_codec_options"); ok && len(v.(map[string]interface{})) > 0 { - etVideoParams.CodecOptions = flex.ExpandStringMap(v.(map[string]interface{})) + etVideoParams.CodecOptions = flex.ExpandStringValueMap(v.(map[string]interface{})) } else { - etVideoParams.CodecOptions = aws.StringMap(make(map[string]string)) + etVideoParams.CodecOptions = make(map[string]string) } if v, ok := p["aspect_ratio"]; ok && v.(string) != "" { @@ -722,12 +723,12 @@ func expandETVideoParams(d *schema.ResourceData) *elastictranscoder.VideoParamet return etVideoParams } -func expandETVideoWatermarks(d *schema.ResourceData) []*elastictranscoder.PresetWatermark { +func expandETVideoWatermarks(d *schema.ResourceData) []awstypes.PresetWatermark { s := d.Get("video_watermarks").(*schema.Set) if s == nil || s.Len() == 0 { return nil } - var watermarks []*elastictranscoder.PresetWatermark + var watermarks []awstypes.PresetWatermark for _, w := range s.List() { if w == nil { @@ -735,7 +736,7 @@ func expandETVideoWatermarks(d *schema.ResourceData) []*elastictranscoder.Preset } p := w.(map[string]interface{}) - watermark := &elastictranscoder.PresetWatermark{ + watermark := awstypes.PresetWatermark{ HorizontalAlign: aws.String(p["horizontal_align"].(string)), HorizontalOffset: aws.String(p["horizontal_offset"].(string)), Id: aws.String(p[names.AttrID].(string)), @@ -755,14 +756,14 @@ func expandETVideoWatermarks(d *schema.ResourceData) []*elastictranscoder.Preset func resourcePresetRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := meta.(*conns.AWSClient).ElasticTranscoderClient(ctx) - resp, err := conn.ReadPresetWithContext(ctx, &elastictranscoder.ReadPresetInput{ + resp, err := conn.ReadPreset(ctx, &elastictranscoder.ReadPresetInput{ Id: aws.String(d.Id()), }) if err != nil { - if tfawserr.ErrCodeEquals(err, elastictranscoder.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { log.Printf("[WARN] Elastic Transcoder Preset (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -805,7 +806,7 @@ func resourcePresetRead(ctx context.Context, d *schema.ResourceData, meta interf } if preset.Video.CodecOptions != nil { - if err := d.Set("video_codec_options", aws.StringValueMap(preset.Video.CodecOptions)); err != nil { + if err := d.Set("video_codec_options", preset.Video.CodecOptions); err != nil { return sdkdiag.AppendErrorf(diags, "reading Elastic Transcoder Preset (%s): setting video_codec_options: %s", d.Id(), err) } } @@ -820,98 +821,98 @@ func resourcePresetRead(ctx context.Context, d *schema.ResourceData, meta interf return diags } -func flattenETAudioParameters(audio *elastictranscoder.AudioParameters) []map[string]interface{} { +func flattenETAudioParameters(audio *awstypes.AudioParameters) []map[string]interface{} { if audio == nil { return nil } result := map[string]interface{}{ - "audio_packing_mode": aws.StringValue(audio.AudioPackingMode), - "channels": aws.StringValue(audio.Channels), - "codec": aws.StringValue(audio.Codec), - "sample_rate": aws.StringValue(audio.SampleRate), + "audio_packing_mode": aws.ToString(audio.AudioPackingMode), + "channels": aws.ToString(audio.Channels), + "codec": aws.ToString(audio.Codec), + "sample_rate": aws.ToString(audio.SampleRate), } if audio.BitRate != nil { - result["bit_rate"] = aws.StringValue(audio.BitRate) + result["bit_rate"] = aws.ToString(audio.BitRate) } return []map[string]interface{}{result} } -func flattenETAudioCodecOptions(opts *elastictranscoder.AudioCodecOptions) []map[string]interface{} { +func flattenETAudioCodecOptions(opts *awstypes.AudioCodecOptions) []map[string]interface{} { if opts == nil { return nil } result := map[string]interface{}{ - "bit_depth": aws.StringValue(opts.BitDepth), - "bit_order": aws.StringValue(opts.BitOrder), - names.AttrProfile: aws.StringValue(opts.Profile), - "signed": aws.StringValue(opts.Signed), + "bit_depth": aws.ToString(opts.BitDepth), + "bit_order": aws.ToString(opts.BitOrder), + names.AttrProfile: aws.ToString(opts.Profile), + "signed": aws.ToString(opts.Signed), } return []map[string]interface{}{result} } -func flattenETThumbnails(thumbs *elastictranscoder.Thumbnails) []map[string]interface{} { +func flattenETThumbnails(thumbs *awstypes.Thumbnails) []map[string]interface{} { if thumbs == nil { return nil } result := map[string]interface{}{ - "aspect_ratio": aws.StringValue(thumbs.AspectRatio), - names.AttrFormat: aws.StringValue(thumbs.Format), - names.AttrInterval: aws.StringValue(thumbs.Interval), - "max_height": aws.StringValue(thumbs.MaxHeight), - "max_width": aws.StringValue(thumbs.MaxWidth), - "padding_policy": aws.StringValue(thumbs.PaddingPolicy), - "resolution": aws.StringValue(thumbs.Resolution), - "sizing_policy": aws.StringValue(thumbs.SizingPolicy), + "aspect_ratio": aws.ToString(thumbs.AspectRatio), + names.AttrFormat: aws.ToString(thumbs.Format), + names.AttrInterval: aws.ToString(thumbs.Interval), + "max_height": aws.ToString(thumbs.MaxHeight), + "max_width": aws.ToString(thumbs.MaxWidth), + "padding_policy": aws.ToString(thumbs.PaddingPolicy), + "resolution": aws.ToString(thumbs.Resolution), + "sizing_policy": aws.ToString(thumbs.SizingPolicy), } return []map[string]interface{}{result} } -func flattenETVideoParams(video *elastictranscoder.VideoParameters) []map[string]interface{} { +func flattenETVideoParams(video *awstypes.VideoParameters) []map[string]interface{} { if video == nil { return nil } result := map[string]interface{}{ - "aspect_ratio": aws.StringValue(video.AspectRatio), - "bit_rate": aws.StringValue(video.BitRate), - "codec": aws.StringValue(video.Codec), - "display_aspect_ratio": aws.StringValue(video.DisplayAspectRatio), - "fixed_gop": aws.StringValue(video.FixedGOP), - "frame_rate": aws.StringValue(video.FrameRate), - "keyframes_max_dist": aws.StringValue(video.KeyframesMaxDist), - "max_frame_rate": aws.StringValue(video.MaxFrameRate), - "max_height": aws.StringValue(video.MaxHeight), - "max_width": aws.StringValue(video.MaxWidth), - "padding_policy": aws.StringValue(video.PaddingPolicy), - "resolution": aws.StringValue(video.Resolution), - "sizing_policy": aws.StringValue(video.SizingPolicy), + "aspect_ratio": aws.ToString(video.AspectRatio), + "bit_rate": aws.ToString(video.BitRate), + "codec": aws.ToString(video.Codec), + "display_aspect_ratio": aws.ToString(video.DisplayAspectRatio), + "fixed_gop": aws.ToString(video.FixedGOP), + "frame_rate": aws.ToString(video.FrameRate), + "keyframes_max_dist": aws.ToString(video.KeyframesMaxDist), + "max_frame_rate": aws.ToString(video.MaxFrameRate), + "max_height": aws.ToString(video.MaxHeight), + "max_width": aws.ToString(video.MaxWidth), + "padding_policy": aws.ToString(video.PaddingPolicy), + "resolution": aws.ToString(video.Resolution), + "sizing_policy": aws.ToString(video.SizingPolicy), } return []map[string]interface{}{result} } -func flattenETWatermarks(watermarks []*elastictranscoder.PresetWatermark) []map[string]interface{} { +func flattenETWatermarks(watermarks []awstypes.PresetWatermark) []map[string]interface{} { var watermarkSet []map[string]interface{} for _, w := range watermarks { watermark := map[string]interface{}{ - "horizontal_align": aws.StringValue(w.HorizontalAlign), - "horizontal_offset": aws.StringValue(w.HorizontalOffset), - names.AttrID: aws.StringValue(w.Id), - "max_height": aws.StringValue(w.MaxHeight), - "max_width": aws.StringValue(w.MaxWidth), - "opacity": aws.StringValue(w.Opacity), - "sizing_policy": aws.StringValue(w.SizingPolicy), - names.AttrTarget: aws.StringValue(w.Target), - "vertical_align": aws.StringValue(w.VerticalAlign), - "vertical_offset": aws.StringValue(w.VerticalOffset), + "horizontal_align": aws.ToString(w.HorizontalAlign), + "horizontal_offset": aws.ToString(w.HorizontalOffset), + names.AttrID: aws.ToString(w.Id), + "max_height": aws.ToString(w.MaxHeight), + "max_width": aws.ToString(w.MaxWidth), + "opacity": aws.ToString(w.Opacity), + "sizing_policy": aws.ToString(w.SizingPolicy), + names.AttrTarget: aws.ToString(w.Target), + "vertical_align": aws.ToString(w.VerticalAlign), + "vertical_offset": aws.ToString(w.VerticalOffset), } watermarkSet = append(watermarkSet, watermark) @@ -922,10 +923,10 @@ func flattenETWatermarks(watermarks []*elastictranscoder.PresetWatermark) []map[ func resourcePresetDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := meta.(*conns.AWSClient).ElasticTranscoderClient(ctx) log.Printf("[DEBUG] Elastic Transcoder Delete Preset: %s", d.Id()) - _, err := conn.DeletePresetWithContext(ctx, &elastictranscoder.DeletePresetInput{ + _, err := conn.DeletePreset(ctx, &elastictranscoder.DeletePresetInput{ Id: aws.String(d.Id()), }) diff --git a/internal/service/elastictranscoder/preset_test.go b/internal/service/elastictranscoder/preset_test.go index 5516d461cea..b24bfb343c0 100644 --- a/internal/service/elastictranscoder/preset_test.go +++ b/internal/service/elastictranscoder/preset_test.go @@ -9,21 +9,22 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elastictranscoder" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" + awstypes "github.com/aws/aws-sdk-go-v2/service/elastictranscoder/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfet "github.com/hashicorp/terraform-provider-aws/internal/service/elastictranscoder" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccElasticTranscoderPreset_basic(t *testing.T) { ctx := acctest.Context(t) - var preset elastictranscoder.Preset + var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -51,7 +52,7 @@ func TestAccElasticTranscoderPreset_basic(t *testing.T) { func TestAccElasticTranscoderPreset_video_noCodec(t *testing.T) { ctx := acctest.Context(t) - var preset elastictranscoder.Preset + var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -79,7 +80,7 @@ func TestAccElasticTranscoderPreset_video_noCodec(t *testing.T) { // https://github.com/terraform-providers/terraform-provider-aws/issues/14090 func TestAccElasticTranscoderPreset_audio_noBitRate(t *testing.T) { ctx := acctest.Context(t) - var preset elastictranscoder.Preset + var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -106,7 +107,7 @@ func TestAccElasticTranscoderPreset_audio_noBitRate(t *testing.T) { func TestAccElasticTranscoderPreset_disappears(t *testing.T) { ctx := acctest.Context(t) - var preset elastictranscoder.Preset + var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -131,7 +132,7 @@ func TestAccElasticTranscoderPreset_disappears(t *testing.T) { // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/14087 func TestAccElasticTranscoderPreset_AudioCodecOptions_empty(t *testing.T) { ctx := acctest.Context(t) - var preset elastictranscoder.Preset + var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -159,7 +160,7 @@ func TestAccElasticTranscoderPreset_AudioCodecOptions_empty(t *testing.T) { func TestAccElasticTranscoderPreset_description(t *testing.T) { ctx := acctest.Context(t) - var preset elastictranscoder.Preset + var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -188,7 +189,7 @@ func TestAccElasticTranscoderPreset_description(t *testing.T) { // Tests all configuration blocks func TestAccElasticTranscoderPreset_full(t *testing.T) { ctx := acctest.Context(t) - var preset elastictranscoder.Preset + var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -239,7 +240,7 @@ func TestAccElasticTranscoderPreset_full(t *testing.T) { // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/695 func TestAccElasticTranscoderPreset_Video_frameRate(t *testing.T) { ctx := acctest.Context(t) - var preset elastictranscoder.Preset + var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -265,9 +266,9 @@ func TestAccElasticTranscoderPreset_Video_frameRate(t *testing.T) { }) } -func testAccCheckPresetExists(ctx context.Context, name string, preset *elastictranscoder.Preset) resource.TestCheckFunc { +func testAccCheckPresetExists(ctx context.Context, name string, preset *awstypes.Preset) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) rs, ok := s.RootModule().Resources[name] if !ok { @@ -277,7 +278,7 @@ func testAccCheckPresetExists(ctx context.Context, name string, preset *elastict return fmt.Errorf("No Preset ID is set") } - out, err := conn.ReadPresetWithContext(ctx, &elastictranscoder.ReadPresetInput{ + out, err := conn.ReadPreset(ctx, &elastictranscoder.ReadPresetInput{ Id: aws.String(rs.Primary.ID), }) @@ -293,24 +294,24 @@ func testAccCheckPresetExists(ctx context.Context, name string, preset *elastict func testAccCheckPresetDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elastictranscoder_preset" { continue } - out, err := conn.ReadPresetWithContext(ctx, &elastictranscoder.ReadPresetInput{ + out, err := conn.ReadPreset(ctx, &elastictranscoder.ReadPresetInput{ Id: aws.String(rs.Primary.ID), }) if err == nil { - if out.Preset != nil && aws.StringValue(out.Preset.Id) == rs.Primary.ID { + if out.Preset != nil && aws.ToString(out.Preset.Id) == rs.Primary.ID { return fmt.Errorf("Elastic Transcoder Preset still exists") } } - if !tfawserr.ErrCodeEquals(err, elastictranscoder.ErrCodeResourceNotFoundException) { + if !errs.IsA[*awstypes.ResourceNotFoundException](err) { return fmt.Errorf("unexpected error: %s", err) } } diff --git a/internal/service/elastictranscoder/service_endpoints_gen_test.go b/internal/service/elastictranscoder/service_endpoints_gen_test.go index 6ad3a92103e..adce04cf928 100644 --- a/internal/service/elastictranscoder/service_endpoints_gen_test.go +++ b/internal/service/elastictranscoder/service_endpoints_gen_test.go @@ -4,17 +4,20 @@ package elastictranscoder_test import ( "context" + "errors" "fmt" "maps" - "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - elastictranscoder_sdkv1 "github.com/aws/aws-sdk-go/service/elastictranscoder" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + elastictranscoder_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -239,54 +242,63 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() + r := elastictranscoder_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(elastictranscoder_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), elastictranscoder_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return err.Error() } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI.String() } func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() + r := elastictranscoder_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(elastictranscoder_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), elastictranscoder_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { return err.Error() } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI.String() } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.ElasticTranscoderConn(ctx) + client := meta.ElasticTranscoderClient(ctx) - req, _ := client.ListPipelinesRequest(&elastictranscoder_sdkv1.ListPipelinesInput{}) + var result apiCallParams - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListPipelines(ctx, &elastictranscoder_sdkv2.ListPipelinesInput{}, + func(opts *elastictranscoder_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -443,6 +455,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/elastictranscoder/service_package_gen.go b/internal/service/elastictranscoder/service_package_gen.go index 632576761a2..d7ed700dd53 100644 --- a/internal/service/elastictranscoder/service_package_gen.go +++ b/internal/service/elastictranscoder/service_package_gen.go @@ -5,10 +5,8 @@ package elastictranscoder import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - elastictranscoder_sdkv1 "github.com/aws/aws-sdk-go/service/elastictranscoder" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + elastictranscoder_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" @@ -46,25 +44,23 @@ func (p *servicePackage) ServicePackageName() string { return names.ElasticTranscoder } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*elastictranscoder_sdkv1.ElasticTranscoder, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*elastictranscoder_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} + return elastictranscoder_sdkv2.NewFromConfig(cfg, func(o *elastictranscoder_sdkv2.Options) { + if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + o.BaseEndpoint = aws_sdkv2.String(endpoint) - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled + } } - } - - return elastictranscoder_sdkv1.New(sess.Copy(&cfg)), nil + }), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl index 0168a7dc585..35d92c6e4fc 100644 --- a/names/data/names_data.hcl +++ b/names/data/names_data.hcl @@ -3339,7 +3339,7 @@ service "elastictranscoder" { sdk { id = "Elastic Transcoder" - client_version = [1] + client_version = [2] } names { @@ -3347,10 +3347,6 @@ service "elastictranscoder" { human_friendly = "Elastic Transcoder" } - client { - go_v1_client_typename = "ElasticTranscoder" - } - endpoint_info { endpoint_api_call = "ListPipelines" } diff --git a/names/names.go b/names/names.go index 39923e10b49..75dcd8c677a 100644 --- a/names/names.go +++ b/names/names.go @@ -64,6 +64,7 @@ const ( ECREndpointID = "api.ecr" EKSEndpointID = "eks" EMREndpointID = "elasticmapreduce" + ElasticTranscoderEndpointID = "elastictranscoder" EventsEndpointID = "events" EvidentlyEndpointID = "evidently" FMSEndpointID = "fms" From d9ad2b72c9f014010ac4ad9bfad60169c9f5d642 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:07:43 +0100 Subject: [PATCH 05/38] Fix lint --- internal/service/elastictranscoder/pipeline.go | 2 +- internal/service/elastictranscoder/preset.go | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/internal/service/elastictranscoder/pipeline.go b/internal/service/elastictranscoder/pipeline.go index 89ed029398a..d380e6f2786 100644 --- a/internal/service/elastictranscoder/pipeline.go +++ b/internal/service/elastictranscoder/pipeline.go @@ -265,7 +265,7 @@ func resourcePipelineCreate(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "you must specify only one of output_bucket or content_config.bucket") } - log.Printf("[DEBUG] Elastic Transcoder Pipeline create opts: %s", req) + log.Printf("[DEBUG] Elastic Transcoder Pipeline create opts: %+v", req) resp, err := conn.CreatePipeline(ctx, req) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Elastic Transcoder Pipeline: %s", err) diff --git a/internal/service/elastictranscoder/preset.go b/internal/service/elastictranscoder/preset.go index c87833fa3e7..de6cd7f6322 100644 --- a/internal/service/elastictranscoder/preset.go +++ b/internal/service/elastictranscoder/preset.go @@ -527,7 +527,7 @@ func resourcePresetCreate(ctx context.Context, d *schema.ResourceData, meta inte req.Name = aws.String(name) } - log.Printf("[DEBUG] Elastic Transcoder Preset create opts: %s", req) + log.Printf("[DEBUG] Elastic Transcoder Preset create opts: %+v", req) resp, err := conn.CreatePreset(ctx, req) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Elastic Transcoder Preset: %s", err) @@ -805,10 +805,8 @@ func resourcePresetRead(ctx context.Context, d *schema.ResourceData, meta interf return sdkdiag.AppendErrorf(diags, "reading Elastic Transcoder Preset (%s): setting video: %s", d.Id(), err) } - if preset.Video.CodecOptions != nil { - if err := d.Set("video_codec_options", preset.Video.CodecOptions); err != nil { - return sdkdiag.AppendErrorf(diags, "reading Elastic Transcoder Preset (%s): setting video_codec_options: %s", d.Id(), err) - } + if err := d.Set("video_codec_options", preset.Video.CodecOptions); err != nil { + return sdkdiag.AppendErrorf(diags, "reading Elastic Transcoder Preset (%s): setting video_codec_options: %s", d.Id(), err) } if preset.Video.Watermarks != nil { From c63c8c30b37fecd179b725d8a02650e0d5ebc191 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:39:23 +0100 Subject: [PATCH 06/38] emrcontainers: Migrate to AWS SDK v2 --- go.mod | 1 + go.sum | 2 ++ internal/conns/awsclient_gen.go | 6 +++--- names/data/names_data.hcl | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 99a0249afea..07a9e10efd6 100644 --- a/go.mod +++ b/go.mod @@ -98,6 +98,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.26.3 github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3 github.com/aws/aws-sdk-go-v2/service/emr v1.42.2 + github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.30.3 github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3 github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 diff --git a/go.sum b/go.sum index c1d6d6acb1e..efcc82218cf 100644 --- a/go.sum +++ b/go.sum @@ -216,6 +216,8 @@ github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3 h1:yiBmRRlVw github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3/go.mod h1:L5bVuO4PeXuDuMYZfL3IW69E6mz6PDCYpp6IKDlcLMA= github.com/aws/aws-sdk-go-v2/service/emr v1.42.2 h1:j3aHjEsxFGCNGOCJjJM6AtPhdvn1pw2i2hGqxLU0qeI= github.com/aws/aws-sdk-go-v2/service/emr v1.42.2/go.mod h1:rN91rXF7gucnSnArDWbv9xDdZjBEetO4LFoJgGK/Wqw= +github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.30.3 h1:ZLa8EkZfs004Xr1M8esLjAhq4hUdG1LTq28XrBnjSdY= +github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.30.3/go.mod h1:JzEDBk3bq/xt5PM+OG+B6abbT/fBsoK3ia4EyLh3JMA= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3 h1:zxpxkpY1h+kPWquiUSG8u2CJ3AtEJPqqBqiMKxLwPjI= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3/go.mod h1:9+NjcAre2lLrpGvCrb9V+TUDii5D+Z8xER/vCPZdZFg= github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 h1:pjZzcXU25gsD2WmlmlayEsyXIWMVOK3//x4BXvK9c0U= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 4692319a34b..d20b1e643fb 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -90,6 +90,7 @@ import ( elasticloadbalancing_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" elasticloadbalancingv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" emr_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emr" + emrcontainers_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrcontainers" emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" eventbridge_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eventbridge" evidently_sdkv2 "github.com/aws/aws-sdk-go-v2/service/evidently" @@ -216,7 +217,6 @@ import ( elastictranscoder_sdkv1 "github.com/aws/aws-sdk-go/service/elastictranscoder" elbv2_sdkv1 "github.com/aws/aws-sdk-go/service/elbv2" emr_sdkv1 "github.com/aws/aws-sdk-go/service/emr" - emrcontainers_sdkv1 "github.com/aws/aws-sdk-go/service/emrcontainers" fsx_sdkv1 "github.com/aws/aws-sdk-go/service/fsx" gamelift_sdkv1 "github.com/aws/aws-sdk-go/service/gamelift" glue_sdkv1 "github.com/aws/aws-sdk-go/service/glue" @@ -641,8 +641,8 @@ func (c *AWSClient) EMRClient(ctx context.Context) *emr_sdkv2.Client { return errs.Must(client[*emr_sdkv2.Client](ctx, c, names.EMR, make(map[string]any))) } -func (c *AWSClient) EMRContainersConn(ctx context.Context) *emrcontainers_sdkv1.EMRContainers { - return errs.Must(conn[*emrcontainers_sdkv1.EMRContainers](ctx, c, names.EMRContainers, make(map[string]any))) +func (c *AWSClient) EMRContainersClient(ctx context.Context) *emrcontainers_sdkv2.Client { + return errs.Must(client[*emrcontainers_sdkv2.Client](ctx, c, names.EMRContainers, make(map[string]any))) } func (c *AWSClient) EMRServerlessClient(ctx context.Context) *emrserverless_sdkv2.Client { diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl index 52f677d1a5a..4857209e2b1 100644 --- a/names/data/names_data.hcl +++ b/names/data/names_data.hcl @@ -3808,7 +3808,7 @@ service "emrcontainers" { sdk { id = "EMR containers" - client_version = [1] + client_version = [2] } names { From 69da4b216450629690a84930db7af304a2b79200 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:42:05 +0100 Subject: [PATCH 07/38] gopatch --- .../service/emrcontainers/job_template.go | 123 +++++++++--------- .../emrcontainers/job_template_test.go | 15 ++- internal/service/emrcontainers/sweep.go | 15 ++- .../service/emrcontainers/virtual_cluster.go | 73 ++++++----- .../virtual_cluster_data_source.go | 6 +- .../emrcontainers/virtual_cluster_test.go | 15 ++- 6 files changed, 128 insertions(+), 119 deletions(-) diff --git a/internal/service/emrcontainers/job_template.go b/internal/service/emrcontainers/job_template.go index 8f34a3da7f3..647b69d8c69 100644 --- a/internal/service/emrcontainers/job_template.go +++ b/internal/service/emrcontainers/job_template.go @@ -9,15 +9,18 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/emrcontainers" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/emrcontainers" + awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -133,7 +136,7 @@ func ResourceJobTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validation.StringInSlice(emrcontainers.PersistentAppUI_Values(), false), + ValidateFunc: enum.Validate[awstypes.PersistentAppUI](), }, "s3_monitoring_configuration": { Type: schema.TypeList, @@ -260,7 +263,7 @@ func ResourceJobTemplate() *schema.Resource { func resourceJobTemplateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersConn(ctx) + conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) name := d.Get(names.AttrName).(string) input := &emrcontainers.CreateJobTemplateInput{ @@ -277,13 +280,13 @@ func resourceJobTemplateCreate(ctx context.Context, d *schema.ResourceData, meta input.KmsKeyArn = aws.String(v.(string)) } - output, err := conn.CreateJobTemplateWithContext(ctx, input) + output, err := conn.CreateJobTemplate(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating EMR Containers Job Template (%s): %s", name, err) } - d.SetId(aws.StringValue(output.Id)) + d.SetId(aws.ToString(output.Id)) return append(diags, resourceJobTemplateRead(ctx, d, meta)...) } @@ -291,7 +294,7 @@ func resourceJobTemplateCreate(ctx context.Context, d *schema.ResourceData, meta func resourceJobTemplateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersConn(ctx) + conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) vc, err := FindJobTemplateByID(ctx, conn, d.Id()) @@ -324,19 +327,19 @@ func resourceJobTemplateRead(ctx context.Context, d *schema.ResourceData, meta i func resourceJobTemplateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersConn(ctx) + conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) log.Printf("[INFO] Deleting EMR Containers Job Template: %s", d.Id()) - _, err := conn.DeleteJobTemplateWithContext(ctx, &emrcontainers.DeleteJobTemplateInput{ + _, err := conn.DeleteJobTemplate(ctx, &emrcontainers.DeleteJobTemplateInput{ Id: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, emrcontainers.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } // Not actually a validation exception - if tfawserr.ErrMessageContains(err, emrcontainers.ErrCodeValidationException, "Template does not exist") { + if tfawserr.ErrMessageContains(err, awstypes.ErrCodeValidationException, "Template does not exist") { return diags } @@ -351,12 +354,12 @@ func resourceJobTemplateDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func expandJobTemplateData(tfMap map[string]interface{}) *emrcontainers.JobTemplateData { +func expandJobTemplateData(tfMap map[string]interface{}) *awstypes.JobTemplateData { if tfMap == nil { return nil } - apiObject := &emrcontainers.JobTemplateData{} + apiObject := &awstypes.JobTemplateData{} if v, ok := tfMap["configuration_overrides"].([]interface{}); ok && len(v) > 0 { apiObject.ConfigurationOverrides = expandConfigurationOverrides(v[0].(map[string]interface{})) @@ -381,12 +384,12 @@ func expandJobTemplateData(tfMap map[string]interface{}) *emrcontainers.JobTempl return apiObject } -func expandConfigurationOverrides(tfMap map[string]interface{}) *emrcontainers.ParametricConfigurationOverrides { +func expandConfigurationOverrides(tfMap map[string]interface{}) *awstypes.ParametricConfigurationOverrides { if tfMap == nil { return nil } - apiObject := &emrcontainers.ParametricConfigurationOverrides{} + apiObject := &awstypes.ParametricConfigurationOverrides{} if v, ok := tfMap["application_configuration"].([]interface{}); ok && len(v) > 0 { apiObject.ApplicationConfiguration = expandConfigurations(v) @@ -398,12 +401,12 @@ func expandConfigurationOverrides(tfMap map[string]interface{}) *emrcontainers.P return apiObject } -func expandConfigurations(tfList []interface{}) []*emrcontainers.Configuration { +func expandConfigurations(tfList []interface{}) []*awstypes.Configuration { if len(tfList) == 0 { return nil } - var apiObjects []*emrcontainers.Configuration + var apiObjects []*awstypes.Configuration for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -424,12 +427,12 @@ func expandConfigurations(tfList []interface{}) []*emrcontainers.Configuration { return apiObjects } -func expandConfiguration(tfMap map[string]interface{}) *emrcontainers.Configuration { +func expandConfiguration(tfMap map[string]interface{}) *awstypes.Configuration { if tfMap == nil { return nil } - apiObject := &emrcontainers.Configuration{} + apiObject := &awstypes.Configuration{} if v, ok := tfMap["classification"].(string); ok && v != "" { apiObject.Classification = aws.String(v) @@ -446,19 +449,19 @@ func expandConfiguration(tfMap map[string]interface{}) *emrcontainers.Configurat return apiObject } -func expandMonitoringConfiguration(tfMap map[string]interface{}) *emrcontainers.ParametricMonitoringConfiguration { +func expandMonitoringConfiguration(tfMap map[string]interface{}) *awstypes.ParametricMonitoringConfiguration { if tfMap == nil { return nil } - apiObject := &emrcontainers.ParametricMonitoringConfiguration{} + apiObject := &awstypes.ParametricMonitoringConfiguration{} if v, ok := tfMap["cloud_watch_monitoring_configuration"].([]interface{}); ok && len(v) > 0 { apiObject.CloudWatchMonitoringConfiguration = expandCloudWatchMonitoringConfiguration(v[0].(map[string]interface{})) } if v, ok := tfMap["persistent_app_ui"].(string); ok && v != "" { - apiObject.PersistentAppUI = aws.String(v) + apiObject.PersistentAppUI = awstypes.PersistentAppUI(v) } if v, ok := tfMap["s3_monitoring_configuration"].([]interface{}); ok && len(v) > 0 { @@ -468,12 +471,12 @@ func expandMonitoringConfiguration(tfMap map[string]interface{}) *emrcontainers. return apiObject } -func expandCloudWatchMonitoringConfiguration(tfMap map[string]interface{}) *emrcontainers.ParametricCloudWatchMonitoringConfiguration { +func expandCloudWatchMonitoringConfiguration(tfMap map[string]interface{}) *awstypes.ParametricCloudWatchMonitoringConfiguration { if tfMap == nil { return nil } - apiObject := &emrcontainers.ParametricCloudWatchMonitoringConfiguration{} + apiObject := &awstypes.ParametricCloudWatchMonitoringConfiguration{} if v, ok := tfMap["log_group_mame"].(string); ok && v != "" { apiObject.LogGroupName = aws.String(v) @@ -486,12 +489,12 @@ func expandCloudWatchMonitoringConfiguration(tfMap map[string]interface{}) *emrc return apiObject } -func expandS3MonitoringConfiguration(tfMap map[string]interface{}) *emrcontainers.ParametricS3MonitoringConfiguration { +func expandS3MonitoringConfiguration(tfMap map[string]interface{}) *awstypes.ParametricS3MonitoringConfiguration { if tfMap == nil { return nil } - apiObject := &emrcontainers.ParametricS3MonitoringConfiguration{} + apiObject := &awstypes.ParametricS3MonitoringConfiguration{} if v, ok := tfMap["log_uri"].(string); ok && v != "" { apiObject.LogUri = aws.String(v) @@ -500,12 +503,12 @@ func expandS3MonitoringConfiguration(tfMap map[string]interface{}) *emrcontainer return apiObject } -func expandJobDriver(tfMap map[string]interface{}) *emrcontainers.JobDriver { +func expandJobDriver(tfMap map[string]interface{}) *awstypes.JobDriver { if tfMap == nil { return nil } - apiObject := &emrcontainers.JobDriver{} + apiObject := &awstypes.JobDriver{} if v, ok := tfMap["spark_sql_job_driver"].([]interface{}); ok && len(v) > 0 { apiObject.SparkSqlJobDriver = expandSparkSQLJobDriver(v[0].(map[string]interface{})) @@ -518,12 +521,12 @@ func expandJobDriver(tfMap map[string]interface{}) *emrcontainers.JobDriver { return apiObject } -func expandSparkSQLJobDriver(tfMap map[string]interface{}) *emrcontainers.SparkSqlJobDriver { +func expandSparkSQLJobDriver(tfMap map[string]interface{}) *awstypes.SparkSqlJobDriver { if tfMap == nil { return nil } - apiObject := &emrcontainers.SparkSqlJobDriver{} + apiObject := &awstypes.SparkSqlJobDriver{} if v, ok := tfMap["entry_point"].(string); ok && v != "" { apiObject.EntryPoint = aws.String(v) @@ -536,12 +539,12 @@ func expandSparkSQLJobDriver(tfMap map[string]interface{}) *emrcontainers.SparkS return apiObject } -func expandSparkSubmitJobDriver(tfMap map[string]interface{}) *emrcontainers.SparkSubmitJobDriver { +func expandSparkSubmitJobDriver(tfMap map[string]interface{}) *awstypes.SparkSubmitJobDriver { if tfMap == nil { return nil } - apiObject := &emrcontainers.SparkSubmitJobDriver{} + apiObject := &awstypes.SparkSubmitJobDriver{} if v, ok := tfMap["entry_point"].(string); ok && v != "" { apiObject.EntryPoint = aws.String(v) @@ -558,7 +561,7 @@ func expandSparkSubmitJobDriver(tfMap map[string]interface{}) *emrcontainers.Spa return apiObject } -func flattenJobTemplateData(apiObject *emrcontainers.JobTemplateData) map[string]interface{} { +func flattenJobTemplateData(apiObject *awstypes.JobTemplateData) map[string]interface{} { if apiObject == nil { return nil } @@ -570,7 +573,7 @@ func flattenJobTemplateData(apiObject *emrcontainers.JobTemplateData) map[string } if v := apiObject.ExecutionRoleArn; v != nil { - tfMap[names.AttrExecutionRoleARN] = aws.StringValue(v) + tfMap[names.AttrExecutionRoleARN] = aws.ToString(v) } if v := apiObject.JobDriver; v != nil { @@ -578,17 +581,17 @@ func flattenJobTemplateData(apiObject *emrcontainers.JobTemplateData) map[string } if v := apiObject.JobTags; v != nil { - tfMap["job_tags"] = aws.StringValueMap(v) + tfMap["job_tags"] = v } if v := apiObject.ReleaseLabel; v != nil { - tfMap["release_label"] = aws.StringValue(v) + tfMap["release_label"] = aws.ToString(v) } return tfMap } -func flattenConfigurationOverrides(apiObject *emrcontainers.ParametricConfigurationOverrides) map[string]interface{} { +func flattenConfigurationOverrides(apiObject *awstypes.ParametricConfigurationOverrides) map[string]interface{} { if apiObject == nil { return nil } @@ -606,7 +609,7 @@ func flattenConfigurationOverrides(apiObject *emrcontainers.ParametricConfigurat return tfMap } -func flattenConfigurations(apiObjects []*emrcontainers.Configuration) []interface{} { +func flattenConfigurations(apiObjects []*awstypes.Configuration) []interface{} { if len(apiObjects) == 0 { return nil } @@ -624,7 +627,7 @@ func flattenConfigurations(apiObjects []*emrcontainers.Configuration) []interfac return tfList } -func flattenConfiguration(apiObject *emrcontainers.Configuration) map[string]interface{} { +func flattenConfiguration(apiObject *awstypes.Configuration) map[string]interface{} { if apiObject == nil { return nil } @@ -632,17 +635,17 @@ func flattenConfiguration(apiObject *emrcontainers.Configuration) map[string]int tfMap := map[string]interface{}{} if v := apiObject.Classification; v != nil { - tfMap["classification"] = aws.StringValue(v) + tfMap["classification"] = aws.ToString(v) } if v := apiObject.Properties; v != nil { - tfMap[names.AttrProperties] = aws.StringValueMap(v) + tfMap[names.AttrProperties] = v } return tfMap } -func flattenMonitoringConfiguration(apiObject *emrcontainers.ParametricMonitoringConfiguration) map[string]interface{} { +func flattenMonitoringConfiguration(apiObject *awstypes.ParametricMonitoringConfiguration) map[string]interface{} { if apiObject == nil { return nil } @@ -654,7 +657,7 @@ func flattenMonitoringConfiguration(apiObject *emrcontainers.ParametricMonitorin } if v := apiObject.PersistentAppUI; v != nil { - tfMap["persistent_app_ui"] = aws.StringValue(v) + tfMap["persistent_app_ui"] = aws.ToString(v) } if v := apiObject.S3MonitoringConfiguration; v != nil { @@ -664,7 +667,7 @@ func flattenMonitoringConfiguration(apiObject *emrcontainers.ParametricMonitorin return tfMap } -func flattenCloudWatchMonitoringConfiguration(apiObject *emrcontainers.ParametricCloudWatchMonitoringConfiguration) map[string]interface{} { +func flattenCloudWatchMonitoringConfiguration(apiObject *awstypes.ParametricCloudWatchMonitoringConfiguration) map[string]interface{} { if apiObject == nil { return nil } @@ -672,17 +675,17 @@ func flattenCloudWatchMonitoringConfiguration(apiObject *emrcontainers.Parametri tfMap := map[string]interface{}{} if v := apiObject.LogGroupName; v != nil { - tfMap[names.AttrLogGroupName] = aws.StringValue(v) + tfMap[names.AttrLogGroupName] = aws.ToString(v) } if v := apiObject.LogStreamNamePrefix; v != nil { - tfMap["log_stream_name_prefix"] = aws.StringValue(v) + tfMap["log_stream_name_prefix"] = aws.ToString(v) } return tfMap } -func flattenS3MonitoringConfiguration(apiObject *emrcontainers.ParametricS3MonitoringConfiguration) map[string]interface{} { +func flattenS3MonitoringConfiguration(apiObject *awstypes.ParametricS3MonitoringConfiguration) map[string]interface{} { if apiObject == nil { return nil } @@ -690,13 +693,13 @@ func flattenS3MonitoringConfiguration(apiObject *emrcontainers.ParametricS3Monit tfMap := map[string]interface{}{} if v := apiObject.LogUri; v != nil { - tfMap["log_uri"] = aws.StringValue(v) + tfMap["log_uri"] = aws.ToString(v) } return tfMap } -func flattenJobDriver(apiObject *emrcontainers.JobDriver) map[string]interface{} { +func flattenJobDriver(apiObject *awstypes.JobDriver) map[string]interface{} { if apiObject == nil { return nil } @@ -714,7 +717,7 @@ func flattenJobDriver(apiObject *emrcontainers.JobDriver) map[string]interface{} return tfMap } -func flattenSparkSQLJobDriver(apiObject *emrcontainers.SparkSqlJobDriver) map[string]interface{} { +func flattenSparkSQLJobDriver(apiObject *awstypes.SparkSqlJobDriver) map[string]interface{} { if apiObject == nil { return nil } @@ -722,17 +725,17 @@ func flattenSparkSQLJobDriver(apiObject *emrcontainers.SparkSqlJobDriver) map[st tfMap := map[string]interface{}{} if v := apiObject.EntryPoint; v != nil { - tfMap["entry_point"] = aws.StringValue(v) + tfMap["entry_point"] = aws.ToString(v) } if v := apiObject.SparkSqlParameters; v != nil { - tfMap["spark_sql_parameters"] = aws.StringValue(v) + tfMap["spark_sql_parameters"] = aws.ToString(v) } return tfMap } -func flattenSparkSubmitJobDriver(apiObject *emrcontainers.SparkSubmitJobDriver) map[string]interface{} { +func flattenSparkSubmitJobDriver(apiObject *awstypes.SparkSubmitJobDriver) map[string]interface{} { if apiObject == nil { return nil } @@ -740,7 +743,7 @@ func flattenSparkSubmitJobDriver(apiObject *emrcontainers.SparkSubmitJobDriver) tfMap := map[string]interface{}{} if v := apiObject.EntryPoint; v != nil { - tfMap["entry_point"] = aws.StringValue(v) + tfMap["entry_point"] = aws.ToString(v) } if v := apiObject.EntryPointArguments; v != nil { @@ -748,16 +751,16 @@ func flattenSparkSubmitJobDriver(apiObject *emrcontainers.SparkSubmitJobDriver) } if v := apiObject.SparkSubmitParameters; v != nil { - tfMap["spark_submit_parameters"] = aws.StringValue(v) + tfMap["spark_submit_parameters"] = aws.ToString(v) } return tfMap } -func findJobTemplate(ctx context.Context, conn *emrcontainers.EMRContainers, input *emrcontainers.DescribeJobTemplateInput) (*emrcontainers.JobTemplate, error) { - output, err := conn.DescribeJobTemplateWithContext(ctx, input) +func findJobTemplate(ctx context.Context, conn *emrcontainers.Client, input *emrcontainers.DescribeJobTemplateInput) (*awstypes.JobTemplate, error) { + output, err := conn.DescribeJobTemplate(ctx, input) - if tfawserr.ErrCodeEquals(err, emrcontainers.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -775,7 +778,7 @@ func findJobTemplate(ctx context.Context, conn *emrcontainers.EMRContainers, inp return output.JobTemplate, nil } -func FindJobTemplateByID(ctx context.Context, conn *emrcontainers.EMRContainers, id string) (*emrcontainers.JobTemplate, error) { +func FindJobTemplateByID(ctx context.Context, conn *emrcontainers.Client, id string) (*awstypes.JobTemplate, error) { input := &emrcontainers.DescribeJobTemplateInput{ Id: aws.String(id), } diff --git a/internal/service/emrcontainers/job_template_test.go b/internal/service/emrcontainers/job_template_test.go index 8991e059b2c..ae831dac864 100644 --- a/internal/service/emrcontainers/job_template_test.go +++ b/internal/service/emrcontainers/job_template_test.go @@ -8,7 +8,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/emrcontainers" + "github.com/aws/aws-sdk-go-v2/service/emrcontainers" + awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +22,7 @@ import ( func TestAccEMRContainersJobTemplate_basic(t *testing.T) { ctx := acctest.Context(t) - var v emrcontainers.JobTemplate + var v awstypes.JobTemplate rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_emrcontainers_job_template.test" @@ -58,7 +59,7 @@ func TestAccEMRContainersJobTemplate_basic(t *testing.T) { func TestAccEMRContainersJobTemplate_disappears(t *testing.T) { ctx := acctest.Context(t) - var v emrcontainers.JobTemplate + var v awstypes.JobTemplate rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_emrcontainers_job_template.test" @@ -84,7 +85,7 @@ func TestAccEMRContainersJobTemplate_disappears(t *testing.T) { func TestAccEMRContainersJobTemplate_tags(t *testing.T) { ctx := acctest.Context(t) - var v emrcontainers.JobTemplate + var v awstypes.JobTemplate rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_emrcontainers_job_template.test" @@ -113,7 +114,7 @@ func TestAccEMRContainersJobTemplate_tags(t *testing.T) { }) } -func testAccCheckJobTemplateExists(ctx context.Context, n string, v *emrcontainers.JobTemplate) resource.TestCheckFunc { +func testAccCheckJobTemplateExists(ctx context.Context, n string, v *awstypes.JobTemplate) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -124,7 +125,7 @@ func testAccCheckJobTemplateExists(ctx context.Context, n string, v *emrcontaine return fmt.Errorf("No EMR Containers Job Template ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).EMRContainersConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EMRContainersClient(ctx) output, err := tfemrcontainers.FindJobTemplateByID(ctx, conn, rs.Primary.ID) @@ -140,7 +141,7 @@ func testAccCheckJobTemplateExists(ctx context.Context, n string, v *emrcontaine func testAccCheckJobTemplateDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EMRContainersConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EMRContainersClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_emrcontainers_job_template" { diff --git a/internal/service/emrcontainers/sweep.go b/internal/service/emrcontainers/sweep.go index a355f85f0f8..ec821f1c7f8 100644 --- a/internal/service/emrcontainers/sweep.go +++ b/internal/service/emrcontainers/sweep.go @@ -7,8 +7,9 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/emrcontainers" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/emrcontainers" + awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" @@ -32,7 +33,7 @@ func sweepVirtualClusters(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.EMRContainersConn(ctx) + conn := client.EMRContainersClient(ctx) input := &emrcontainers.ListVirtualClustersInput{} sweepResources := make([]sweep.Sweepable, 0) @@ -42,13 +43,13 @@ func sweepVirtualClusters(region string) error { } for _, v := range page.VirtualClusters { - if aws.StringValue(v.State) == emrcontainers.VirtualClusterStateTerminated { + if aws.ToString(v.State) == awstypes.VirtualClusterStateTerminated { continue } r := ResourceVirtualCluster() d := r.Data(nil) - d.SetId(aws.StringValue(v.Id)) + d.SetId(aws.ToString(v.Id)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } @@ -80,7 +81,7 @@ func sweepJobTemplates(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.EMRContainersConn(ctx) + conn := client.EMRContainersClient(ctx) input := &emrcontainers.ListJobTemplatesInput{} sweepResources := make([]sweep.Sweepable, 0) @@ -92,7 +93,7 @@ func sweepJobTemplates(region string) error { for _, v := range page.Templates { r := ResourceJobTemplate() d := r.Data(nil) - d.SetId(aws.StringValue(v.Id)) + d.SetId(aws.ToString(v.Id)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } diff --git a/internal/service/emrcontainers/virtual_cluster.go b/internal/service/emrcontainers/virtual_cluster.go index 8f85f7ac99f..68c105633a3 100644 --- a/internal/service/emrcontainers/virtual_cluster.go +++ b/internal/service/emrcontainers/virtual_cluster.go @@ -9,14 +9,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/emrcontainers" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/emrcontainers" + awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -89,7 +92,7 @@ func ResourceVirtualCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringInSlice(emrcontainers.ContainerProviderType_Values(), false), + ValidateFunc: enum.Validate[awstypes.ContainerProviderType](), }, }, }, @@ -114,7 +117,7 @@ func ResourceVirtualCluster() *schema.Resource { func resourceVirtualClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersConn(ctx) + conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) name := d.Get(names.AttrName).(string) input := &emrcontainers.CreateVirtualClusterInput{ @@ -126,13 +129,13 @@ func resourceVirtualClusterCreate(ctx context.Context, d *schema.ResourceData, m input.ContainerProvider = expandContainerProvider(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.CreateVirtualClusterWithContext(ctx, input) + output, err := conn.CreateVirtualCluster(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating EMR Containers Virtual Cluster (%s): %s", name, err) } - d.SetId(aws.StringValue(output.Id)) + d.SetId(aws.ToString(output.Id)) return append(diags, resourceVirtualClusterRead(ctx, d, meta)...) } @@ -140,7 +143,7 @@ func resourceVirtualClusterCreate(ctx context.Context, d *schema.ResourceData, m func resourceVirtualClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersConn(ctx) + conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) vc, err := FindVirtualClusterByID(ctx, conn, d.Id()) @@ -177,24 +180,24 @@ func resourceVirtualClusterUpdate(ctx context.Context, d *schema.ResourceData, m func resourceVirtualClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersConn(ctx) + conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) log.Printf("[INFO] Deleting EMR Containers Virtual Cluster: %s", d.Id()) - _, err := conn.DeleteVirtualClusterWithContext(ctx, &emrcontainers.DeleteVirtualClusterInput{ + _, err := conn.DeleteVirtualCluster(ctx, &emrcontainers.DeleteVirtualClusterInput{ Id: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, emrcontainers.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } // Not actually a validation exception - if tfawserr.ErrMessageContains(err, emrcontainers.ErrCodeValidationException, "not found") { + if tfawserr.ErrMessageContains(err, awstypes.ErrCodeValidationException, "not found") { return diags } // Not actually a validation exception - if tfawserr.ErrMessageContains(err, emrcontainers.ErrCodeValidationException, "already terminated") { + if tfawserr.ErrMessageContains(err, awstypes.ErrCodeValidationException, "already terminated") { return diags } @@ -209,12 +212,12 @@ func resourceVirtualClusterDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func expandContainerProvider(tfMap map[string]interface{}) *emrcontainers.ContainerProvider { +func expandContainerProvider(tfMap map[string]interface{}) *awstypes.ContainerProvider { if tfMap == nil { return nil } - apiObject := &emrcontainers.ContainerProvider{} + apiObject := &awstypes.ContainerProvider{} if v, ok := tfMap[names.AttrID].(string); ok && v != "" { apiObject.Id = aws.String(v) @@ -231,12 +234,12 @@ func expandContainerProvider(tfMap map[string]interface{}) *emrcontainers.Contai return apiObject } -func expandContainerInfo(tfMap map[string]interface{}) *emrcontainers.ContainerInfo { +func expandContainerInfo(tfMap map[string]interface{}) *awstypes.ContainerInfo { if tfMap == nil { return nil } - apiObject := &emrcontainers.ContainerInfo{} + apiObject := &awstypes.ContainerInfo{} if v, ok := tfMap["eks_info"].([]interface{}); ok && len(v) > 0 { apiObject.EksInfo = expandEKSInfo(v[0].(map[string]interface{})) @@ -245,12 +248,12 @@ func expandContainerInfo(tfMap map[string]interface{}) *emrcontainers.ContainerI return apiObject } -func expandEKSInfo(tfMap map[string]interface{}) *emrcontainers.EksInfo { +func expandEKSInfo(tfMap map[string]interface{}) *awstypes.EksInfo { if tfMap == nil { return nil } - apiObject := &emrcontainers.EksInfo{} + apiObject := &awstypes.EksInfo{} if v, ok := tfMap[names.AttrNamespace].(string); ok && v != "" { apiObject.Namespace = aws.String(v) @@ -259,7 +262,7 @@ func expandEKSInfo(tfMap map[string]interface{}) *emrcontainers.EksInfo { return apiObject } -func flattenContainerProvider(apiObject *emrcontainers.ContainerProvider) map[string]interface{} { +func flattenContainerProvider(apiObject *awstypes.ContainerProvider) map[string]interface{} { if apiObject == nil { return nil } @@ -267,7 +270,7 @@ func flattenContainerProvider(apiObject *emrcontainers.ContainerProvider) map[st tfMap := map[string]interface{}{} if v := apiObject.Id; v != nil { - tfMap[names.AttrID] = aws.StringValue(v) + tfMap[names.AttrID] = aws.ToString(v) } if v := apiObject.Info; v != nil { @@ -275,13 +278,13 @@ func flattenContainerProvider(apiObject *emrcontainers.ContainerProvider) map[st } if v := apiObject.Type; v != nil { - tfMap[names.AttrType] = aws.StringValue(v) + tfMap[names.AttrType] = aws.ToString(v) } return tfMap } -func flattenContainerInfo(apiObject *emrcontainers.ContainerInfo) map[string]interface{} { +func flattenContainerInfo(apiObject *awstypes.ContainerInfo) map[string]interface{} { if apiObject == nil { return nil } @@ -295,7 +298,7 @@ func flattenContainerInfo(apiObject *emrcontainers.ContainerInfo) map[string]int return tfMap } -func flattenEKSInfo(apiObject *emrcontainers.EksInfo) map[string]interface{} { +func flattenEKSInfo(apiObject *awstypes.EksInfo) map[string]interface{} { if apiObject == nil { return nil } @@ -303,16 +306,16 @@ func flattenEKSInfo(apiObject *emrcontainers.EksInfo) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Namespace; v != nil { - tfMap[names.AttrNamespace] = aws.StringValue(v) + tfMap[names.AttrNamespace] = aws.ToString(v) } return tfMap } -func findVirtualCluster(ctx context.Context, conn *emrcontainers.EMRContainers, input *emrcontainers.DescribeVirtualClusterInput) (*emrcontainers.VirtualCluster, error) { - output, err := conn.DescribeVirtualClusterWithContext(ctx, input) +func findVirtualCluster(ctx context.Context, conn *emrcontainers.Client, input *emrcontainers.DescribeVirtualClusterInput) (*awstypes.VirtualCluster, error) { + output, err := conn.DescribeVirtualCluster(ctx, input) - if tfawserr.ErrCodeEquals(err, emrcontainers.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -330,7 +333,7 @@ func findVirtualCluster(ctx context.Context, conn *emrcontainers.EMRContainers, return output.VirtualCluster, nil } -func FindVirtualClusterByID(ctx context.Context, conn *emrcontainers.EMRContainers, id string) (*emrcontainers.VirtualCluster, error) { +func FindVirtualClusterByID(ctx context.Context, conn *emrcontainers.Client, id string) (*awstypes.VirtualCluster, error) { input := &emrcontainers.DescribeVirtualClusterInput{ Id: aws.String(id), } @@ -341,7 +344,7 @@ func FindVirtualClusterByID(ctx context.Context, conn *emrcontainers.EMRContaine return nil, err } - if state := aws.StringValue(output.State); state == emrcontainers.VirtualClusterStateTerminated { + if state := aws.ToString(output.State); state == awstypes.VirtualClusterStateTerminated { return nil, &retry.NotFoundError{ Message: state, LastRequest: input, @@ -351,7 +354,7 @@ func FindVirtualClusterByID(ctx context.Context, conn *emrcontainers.EMRContaine return output, nil } -func statusVirtualCluster(ctx context.Context, conn *emrcontainers.EMRContainers, id string) retry.StateRefreshFunc { +func statusVirtualCluster(ctx context.Context, conn *emrcontainers.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindVirtualClusterByID(ctx, conn, id) @@ -363,13 +366,13 @@ func statusVirtualCluster(ctx context.Context, conn *emrcontainers.EMRContainers return nil, "", err } - return output, aws.StringValue(output.State), nil + return output, aws.ToString(output.State), nil } } -func waitVirtualClusterDeleted(ctx context.Context, conn *emrcontainers.EMRContainers, id string, timeout time.Duration) (*emrcontainers.VirtualCluster, error) { +func waitVirtualClusterDeleted(ctx context.Context, conn *emrcontainers.Client, id string, timeout time.Duration) (*awstypes.VirtualCluster, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{emrcontainers.VirtualClusterStateTerminating}, + Pending: []string{awstypes.VirtualClusterStateTerminating}, Target: []string{}, Refresh: statusVirtualCluster(ctx, conn, id), Timeout: timeout, @@ -378,7 +381,7 @@ func waitVirtualClusterDeleted(ctx context.Context, conn *emrcontainers.EMRConta outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*emrcontainers.VirtualCluster); ok { + if v, ok := outputRaw.(*awstypes.VirtualCluster); ok { return v, err } diff --git a/internal/service/emrcontainers/virtual_cluster_data_source.go b/internal/service/emrcontainers/virtual_cluster_data_source.go index 0781a864ba7..c15d7d69b3d 100644 --- a/internal/service/emrcontainers/virtual_cluster_data_source.go +++ b/internal/service/emrcontainers/virtual_cluster_data_source.go @@ -6,7 +6,7 @@ package emrcontainers import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -85,7 +85,7 @@ func DataSourceVirtualCluster() *schema.Resource { func dataSourceVirtualClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersConn(ctx) + conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig id := d.Get("virtual_cluster_id").(string) @@ -95,7 +95,7 @@ func dataSourceVirtualClusterRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "reading EMR Containers Virtual Cluster (%s): %s", id, err) } - d.SetId(aws.StringValue(vc.Id)) + d.SetId(aws.ToString(vc.Id)) d.Set(names.AttrARN, vc.Arn) if vc.ContainerProvider != nil { if err := d.Set("container_provider", []interface{}{flattenContainerProvider(vc.ContainerProvider)}); err != nil { diff --git a/internal/service/emrcontainers/virtual_cluster_test.go b/internal/service/emrcontainers/virtual_cluster_test.go index 1a0e4e221d0..c1937dd8b5b 100644 --- a/internal/service/emrcontainers/virtual_cluster_test.go +++ b/internal/service/emrcontainers/virtual_cluster_test.go @@ -8,7 +8,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/emrcontainers" + "github.com/aws/aws-sdk-go-v2/service/emrcontainers" + awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +22,7 @@ import ( func TestAccEMRContainersVirtualCluster_basic(t *testing.T) { ctx := acctest.Context(t) - var v emrcontainers.VirtualCluster + var v awstypes.VirtualCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_emrcontainers_virtual_cluster.test" testExternalProviders := map[string]resource.ExternalProvider{ @@ -77,7 +78,7 @@ func TestAccEMRContainersVirtualCluster_basic(t *testing.T) { func TestAccEMRContainersVirtualCluster_disappears(t *testing.T) { ctx := acctest.Context(t) - var v emrcontainers.VirtualCluster + var v awstypes.VirtualCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_emrcontainers_virtual_cluster.test" testExternalProviders := map[string]resource.ExternalProvider{ @@ -111,7 +112,7 @@ func TestAccEMRContainersVirtualCluster_disappears(t *testing.T) { func TestAccEMRContainersVirtualCluster_tags(t *testing.T) { ctx := acctest.Context(t) - var v emrcontainers.VirtualCluster + var v awstypes.VirtualCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_emrcontainers_virtual_cluster.test" testExternalProviders := map[string]resource.ExternalProvider{ @@ -160,7 +161,7 @@ func TestAccEMRContainersVirtualCluster_tags(t *testing.T) { }) } -func testAccCheckVirtualClusterExists(ctx context.Context, n string, v *emrcontainers.VirtualCluster) resource.TestCheckFunc { +func testAccCheckVirtualClusterExists(ctx context.Context, n string, v *awstypes.VirtualCluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -171,7 +172,7 @@ func testAccCheckVirtualClusterExists(ctx context.Context, n string, v *emrconta return fmt.Errorf("No EMR Containers Virtual Cluster ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).EMRContainersConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EMRContainersClient(ctx) output, err := tfemrcontainers.FindVirtualClusterByID(ctx, conn, rs.Primary.ID) @@ -187,7 +188,7 @@ func testAccCheckVirtualClusterExists(ctx context.Context, n string, v *emrconta func testAccCheckVirtualClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EMRContainersConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EMRContainersClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_emrcontainers_virtual_cluster" { From fc116c7b7a563f1d63231d9bd14d38411ff57e33 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Tue, 2 Jul 2024 23:15:50 +0100 Subject: [PATCH 08/38] make gen --- internal/service/emrcontainers/generate.go | 2 +- .../service_endpoint_resolver_gen.go | 66 ++++---- .../service_endpoints_gen_test.go | 146 +++++++++++++++--- .../emrcontainers/service_package_gen.go | 28 ++-- internal/service/emrcontainers/tags_gen.go | 33 ++-- 5 files changed, 183 insertions(+), 92 deletions(-) diff --git a/internal/service/emrcontainers/generate.go b/internal/service/emrcontainers/generate.go index 3a048572da2..9ec4e069b45 100644 --- a/internal/service/emrcontainers/generate.go +++ b/internal/service/emrcontainers/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -ListTags -ServiceTagsMap -SkipTypesImp -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/emrcontainers/service_endpoint_resolver_gen.go b/internal/service/emrcontainers/service_endpoint_resolver_gen.go index 9f03022d390..2836eabb49a 100644 --- a/internal/service/emrcontainers/service_endpoint_resolver_gen.go +++ b/internal/service/emrcontainers/service_endpoint_resolver_gen.go @@ -6,65 +6,63 @@ import ( "context" "fmt" "net" - "net/url" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + emrcontainers_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrcontainers" + smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -var _ endpoints_sdkv1.Resolver = resolverSDKv1{} +var _ emrcontainers_sdkv2.EndpointResolverV2 = resolverSDKv2{} -type resolverSDKv1 struct { - ctx context.Context +type resolverSDKv2 struct { + defaultResolver emrcontainers_sdkv2.EndpointResolverV2 } -func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { - return resolverSDKv1{ - ctx: ctx, +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: emrcontainers_sdkv2.NewDefaultEndpointResolverV2(), } } -func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { - ctx := r.ctx +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params emrcontainers_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) - var opt endpoints_sdkv1.Options - opt.Set(opts...) - - useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) - defaultResolver := endpoints_sdkv1.DefaultResolver() + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } - if useFIPS { + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) - endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) if err != nil { return endpoint, err } tflog.Debug(ctx, "endpoint resolved", map[string]any{ - "tf_aws.endpoint": endpoint.URL, + "tf_aws.endpoint": endpoint.URI.String(), }) - var endpointURL *url.URL - endpointURL, err = url.Parse(endpoint.URL) - if err != nil { - return endpoint, err - } - - hostname := endpointURL.Hostname() + hostname := endpoint.URI.Hostname() _, err = net.LookupHost(hostname) if err != nil { if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ "tf_aws.hostname": hostname, }) - opts = append(opts, func(o *endpoints_sdkv1.Options) { - o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - }) + params.UseFIPS = aws_sdkv2.Bool(false) } else { - err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up emrcontainers endpoint %q: %s", hostname, err) return } } else { @@ -72,5 +70,13 @@ func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoin } } - return defaultResolver.EndpointFor(service, region, opts...) + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*emrcontainers_sdkv2.Options) { + return func(o *emrcontainers_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } } diff --git a/internal/service/emrcontainers/service_endpoints_gen_test.go b/internal/service/emrcontainers/service_endpoints_gen_test.go index 5acb2473bcd..6ea12550bf7 100644 --- a/internal/service/emrcontainers/service_endpoints_gen_test.go +++ b/internal/service/emrcontainers/service_endpoints_gen_test.go @@ -4,18 +4,22 @@ package emrcontainers_test import ( "context" + "errors" "fmt" "maps" "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - emrcontainers_sdkv1 "github.com/aws/aws-sdk-go/service/emrcontainers" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + emrcontainers_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrcontainers" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -240,54 +244,63 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := emrcontainers_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(emrcontainers_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), emrcontainers_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func defaultFIPSEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := emrcontainers_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(emrcontainers_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), emrcontainers_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.EMRContainersConn(ctx) + client := meta.EMRContainersClient(ctx) - req, _ := client.ListVirtualClustersRequest(&emrcontainers_sdkv1.ListVirtualClustersInput{}) + var result apiCallParams - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListVirtualClusters(ctx, &emrcontainers_sdkv2.ListVirtualClustersInput{}, + func(opts *emrcontainers_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -466,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/emrcontainers/service_package_gen.go b/internal/service/emrcontainers/service_package_gen.go index f247f7faa4f..500ca69d480 100644 --- a/internal/service/emrcontainers/service_package_gen.go +++ b/internal/service/emrcontainers/service_package_gen.go @@ -5,10 +5,8 @@ package emrcontainers import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - emrcontainers_sdkv1 "github.com/aws/aws-sdk-go/service/emrcontainers" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + emrcontainers_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrcontainers" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -58,22 +56,14 @@ func (p *servicePackage) ServicePackageName() string { return names.EMRContainers } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*emrcontainers_sdkv1.EMRContainers, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*emrcontainers_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - } else { - cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) - } - - return emrcontainers_sdkv1.New(sess.Copy(&cfg)), nil + return emrcontainers_sdkv2.NewFromConfig(cfg, + emrcontainers_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/emrcontainers/tags_gen.go b/internal/service/emrcontainers/tags_gen.go index fe3aa0070f9..23cc8c83739 100644 --- a/internal/service/emrcontainers/tags_gen.go +++ b/internal/service/emrcontainers/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/emrcontainers" - "github.com/aws/aws-sdk-go/service/emrcontainers/emrcontainersiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/emrcontainers" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists emrcontainers service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn emrcontainersiface.EMRContainersAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *emrcontainers.Client, identifier string, optFns ...func(*emrcontainers.Options)) (tftags.KeyValueTags, error) { input := &emrcontainers.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn emrcontainersiface.EMRContainersAPI, ide // ListTags lists emrcontainers service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).EMRContainersConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).EMRContainersClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns emrcontainers service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from emrcontainers service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns emrcontainers service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets emrcontainers service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates emrcontainers service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn emrcontainersiface.EMRContainersAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *emrcontainers.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*emrcontainers.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn emrcontainersiface.EMRContainersAPI, i if len(removedTags) > 0 { input := &emrcontainers.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn emrcontainersiface.EMRContainersAPI, i Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn emrcontainersiface.EMRContainersAPI, i // UpdateTags updates emrcontainers service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).EMRContainersConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).EMRContainersClient(ctx), identifier, oldTags, newTags) } From 302ddde13c1beef48f797faf0c1ba6ede0bfd358 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Tue, 2 Jul 2024 23:17:03 +0100 Subject: [PATCH 09/38] ermcontainers sweeper: Migrate to AWS SDK v2 --- internal/service/emrcontainers/sweep.go | 55 ++++++++++++------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/internal/service/emrcontainers/sweep.go b/internal/service/emrcontainers/sweep.go index ec821f1c7f8..a9ae4ae07e4 100644 --- a/internal/service/emrcontainers/sweep.go +++ b/internal/service/emrcontainers/sweep.go @@ -12,7 +12,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -37,13 +37,22 @@ func sweepVirtualClusters(region string) error { input := &emrcontainers.ListVirtualClustersInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListVirtualClustersPagesWithContext(ctx, input, func(page *emrcontainers.ListVirtualClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := emrcontainers.NewListVirtualClustersPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EMR Containers Virtual Cluster sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing EMR Containers Virtual Clusters (%s): %w", region, err) } for _, v := range page.VirtualClusters { - if aws.ToString(v.State) == awstypes.VirtualClusterStateTerminated { + if v.State == awstypes.VirtualClusterStateTerminated { continue } @@ -54,16 +63,6 @@ func sweepVirtualClusters(region string) error { sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EMR Containers Virtual Cluster sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing EMR Containers Virtual Clusters (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -85,9 +84,18 @@ func sweepJobTemplates(region string) error { input := &emrcontainers.ListJobTemplatesInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListJobTemplatesPagesWithContext(ctx, input, func(page *emrcontainers.ListJobTemplatesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := emrcontainers.NewListJobTemplatesPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EMR Containers Job Template sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing EMR Containers Job Templates (%s): %w", region, err) } for _, v := range page.Templates { @@ -97,17 +105,6 @@ func sweepJobTemplates(region string) error { sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EMR Containers Job Template sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing EMR Containers Job Templates (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) From 9abd10aee69ddac5865c4d9ac644db667dba4b98 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Tue, 2 Jul 2024 23:17:32 +0100 Subject: [PATCH 10/38] r/emrcontainers_job_template.go: Migrate to AWS SDK v2 --- .../service/emrcontainers/job_template.go | 49 ++++++------------- .../emrcontainers/job_template_test.go | 1 - 2 files changed, 16 insertions(+), 34 deletions(-) diff --git a/internal/service/emrcontainers/job_template.go b/internal/service/emrcontainers/job_template.go index 647b69d8c69..390060cded0 100644 --- a/internal/service/emrcontainers/job_template.go +++ b/internal/service/emrcontainers/job_template.go @@ -12,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/emrcontainers" awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -133,10 +132,10 @@ func ResourceJobTemplate() *schema.Resource { }, }, "persistent_app_ui": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: enum.Validate[awstypes.PersistentAppUI](), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.PersistentAppUI](), }, "s3_monitoring_configuration": { Type: schema.TypeList, @@ -339,7 +338,7 @@ func resourceJobTemplateDelete(ctx context.Context, d *schema.ResourceData, meta } // Not actually a validation exception - if tfawserr.ErrMessageContains(err, awstypes.ErrCodeValidationException, "Template does not exist") { + if errs.IsAErrorMessageContains[*awstypes.ValidationException](err, "Template does not exist") { return diags } @@ -374,7 +373,7 @@ func expandJobTemplateData(tfMap map[string]interface{}) *awstypes.JobTemplateDa } if v, ok := tfMap["job_tags"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.JobTags = flex.ExpandStringMap(v) + apiObject.JobTags = flex.ExpandStringValueMap(v) } if v, ok := tfMap["release_label"].(string); ok && v != "" { @@ -401,12 +400,12 @@ func expandConfigurationOverrides(tfMap map[string]interface{}) *awstypes.Parame return apiObject } -func expandConfigurations(tfList []interface{}) []*awstypes.Configuration { +func expandConfigurations(tfList []interface{}) []awstypes.Configuration { if len(tfList) == 0 { return nil } - var apiObjects []*awstypes.Configuration + var apiObjects []awstypes.Configuration for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -417,22 +416,14 @@ func expandConfigurations(tfList []interface{}) []*awstypes.Configuration { apiObject := expandConfiguration(tfMap) - if apiObject == nil { - continue - } - apiObjects = append(apiObjects, apiObject) } return apiObjects } -func expandConfiguration(tfMap map[string]interface{}) *awstypes.Configuration { - if tfMap == nil { - return nil - } - - apiObject := &awstypes.Configuration{} +func expandConfiguration(tfMap map[string]interface{}) awstypes.Configuration { + apiObject := awstypes.Configuration{} if v, ok := tfMap["classification"].(string); ok && v != "" { apiObject.Classification = aws.String(v) @@ -443,7 +434,7 @@ func expandConfiguration(tfMap map[string]interface{}) *awstypes.Configuration { } if v, ok := tfMap[names.AttrProperties].(map[string]interface{}); ok && len(v) > 0 { - apiObject.Properties = flex.ExpandStringMap(v) + apiObject.Properties = flex.ExpandStringValueMap(v) } return apiObject @@ -461,7 +452,7 @@ func expandMonitoringConfiguration(tfMap map[string]interface{}) *awstypes.Param } if v, ok := tfMap["persistent_app_ui"].(string); ok && v != "" { - apiObject.PersistentAppUI = awstypes.PersistentAppUI(v) + apiObject.PersistentAppUI = aws.String(v) } if v, ok := tfMap["s3_monitoring_configuration"].([]interface{}); ok && len(v) > 0 { @@ -551,7 +542,7 @@ func expandSparkSubmitJobDriver(tfMap map[string]interface{}) *awstypes.SparkSub } if v, ok := tfMap["entry_point_arguments"].(*schema.Set); ok && v.Len() > 0 { - apiObject.EntryPointArguments = flex.ExpandStringSet(v) + apiObject.EntryPointArguments = flex.ExpandStringValueSet(v) } if v, ok := tfMap["spark_submit_parameters"].(string); ok && v != "" { @@ -609,7 +600,7 @@ func flattenConfigurationOverrides(apiObject *awstypes.ParametricConfigurationOv return tfMap } -func flattenConfigurations(apiObjects []*awstypes.Configuration) []interface{} { +func flattenConfigurations(apiObjects []awstypes.Configuration) []interface{} { if len(apiObjects) == 0 { return nil } @@ -617,21 +608,13 @@ func flattenConfigurations(apiObjects []*awstypes.Configuration) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenConfiguration(apiObject)) } return tfList } -func flattenConfiguration(apiObject *awstypes.Configuration) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenConfiguration(apiObject awstypes.Configuration) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Classification; v != nil { @@ -747,7 +730,7 @@ func flattenSparkSubmitJobDriver(apiObject *awstypes.SparkSubmitJobDriver) map[s } if v := apiObject.EntryPointArguments; v != nil { - tfMap["entry_point_arguments"] = flex.FlattenStringSet(v) + tfMap["entry_point_arguments"] = flex.FlattenStringValueSet(v) } if v := apiObject.SparkSubmitParameters; v != nil { diff --git a/internal/service/emrcontainers/job_template_test.go b/internal/service/emrcontainers/job_template_test.go index ae831dac864..636a1a34e8b 100644 --- a/internal/service/emrcontainers/job_template_test.go +++ b/internal/service/emrcontainers/job_template_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/emrcontainers" awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" From d2901cf543c105817091f85688377f84ecfbd6e1 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 11 Jul 2024 20:13:15 +0100 Subject: [PATCH 11/38] d/virtual_cluster, r/virtual_cluster: Migrate to AWS SDK v2 --- .../service/emrcontainers/virtual_cluster.go | 48 ++++++++----------- .../virtual_cluster_data_source.go | 2 +- .../emrcontainers/virtual_cluster_test.go | 1 - 3 files changed, 22 insertions(+), 29 deletions(-) diff --git a/internal/service/emrcontainers/virtual_cluster.go b/internal/service/emrcontainers/virtual_cluster.go index 68c105633a3..4c2ec118f1f 100644 --- a/internal/service/emrcontainers/virtual_cluster.go +++ b/internal/service/emrcontainers/virtual_cluster.go @@ -12,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/emrcontainers" awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -89,10 +88,10 @@ func ResourceVirtualCluster() *schema.Resource { }, }, names.AttrType: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: enum.Validate[awstypes.ContainerProviderType](), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.ContainerProviderType](), }, }, }, @@ -192,12 +191,12 @@ func resourceVirtualClusterDelete(ctx context.Context, d *schema.ResourceData, m } // Not actually a validation exception - if tfawserr.ErrMessageContains(err, awstypes.ErrCodeValidationException, "not found") { + if errs.IsAErrorMessageContains[*awstypes.ValidationException](err, "not found") { return diags } // Not actually a validation exception - if tfawserr.ErrMessageContains(err, awstypes.ErrCodeValidationException, "already terminated") { + if errs.IsAErrorMessageContains[*awstypes.ValidationException](err, "already terminated") { return diags } @@ -228,32 +227,28 @@ func expandContainerProvider(tfMap map[string]interface{}) *awstypes.ContainerPr } if v, ok := tfMap[names.AttrType].(string); ok && v != "" { - apiObject.Type = aws.String(v) + apiObject.Type = awstypes.ContainerProviderType(v) } return apiObject } -func expandContainerInfo(tfMap map[string]interface{}) *awstypes.ContainerInfo { +func expandContainerInfo(tfMap map[string]interface{}) awstypes.ContainerInfo { if tfMap == nil { return nil } - apiObject := &awstypes.ContainerInfo{} + apiObject := &awstypes.ContainerInfoMemberEksInfo{} if v, ok := tfMap["eks_info"].([]interface{}); ok && len(v) > 0 { - apiObject.EksInfo = expandEKSInfo(v[0].(map[string]interface{})) + apiObject.Value = expandEKSInfo(v[0].(map[string]interface{})) } return apiObject } -func expandEKSInfo(tfMap map[string]interface{}) *awstypes.EksInfo { - if tfMap == nil { - return nil - } - - apiObject := &awstypes.EksInfo{} +func expandEKSInfo(tfMap map[string]interface{}) awstypes.EksInfo { + apiObject := awstypes.EksInfo{} if v, ok := tfMap[names.AttrNamespace].(string); ok && v != "" { apiObject.Namespace = aws.String(v) @@ -277,22 +272,21 @@ func flattenContainerProvider(apiObject *awstypes.ContainerProvider) map[string] tfMap["info"] = []interface{}{flattenContainerInfo(v)} } - if v := apiObject.Type; v != nil { - tfMap[names.AttrType] = aws.ToString(v) - } + tfMap[names.AttrType] = string(apiObject.Type) return tfMap } -func flattenContainerInfo(apiObject *awstypes.ContainerInfo) map[string]interface{} { +func flattenContainerInfo(apiObject awstypes.ContainerInfo) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.EksInfo; v != nil { - tfMap["eks_info"] = []interface{}{flattenEKSInfo(v)} + switch v := apiObject.(type) { + case *awstypes.ContainerInfoMemberEksInfo: + tfMap["eks_info"] = []interface{}{flattenEKSInfo(&v.Value)} } return tfMap @@ -344,9 +338,9 @@ func FindVirtualClusterByID(ctx context.Context, conn *emrcontainers.Client, id return nil, err } - if state := aws.ToString(output.State); state == awstypes.VirtualClusterStateTerminated { + if output.State == awstypes.VirtualClusterStateTerminated { return nil, &retry.NotFoundError{ - Message: state, + Message: string(output.State), LastRequest: input, } } @@ -366,13 +360,13 @@ func statusVirtualCluster(ctx context.Context, conn *emrcontainers.Client, id st return nil, "", err } - return output, aws.ToString(output.State), nil + return output, string(output.State), nil } } func waitVirtualClusterDeleted(ctx context.Context, conn *emrcontainers.Client, id string, timeout time.Duration) (*awstypes.VirtualCluster, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{awstypes.VirtualClusterStateTerminating}, + Pending: enum.Slice(awstypes.VirtualClusterStateTerminating), Target: []string{}, Refresh: statusVirtualCluster(ctx, conn, id), Timeout: timeout, diff --git a/internal/service/emrcontainers/virtual_cluster_data_source.go b/internal/service/emrcontainers/virtual_cluster_data_source.go index c15d7d69b3d..067dd0e04c8 100644 --- a/internal/service/emrcontainers/virtual_cluster_data_source.go +++ b/internal/service/emrcontainers/virtual_cluster_data_source.go @@ -104,7 +104,7 @@ func dataSourceVirtualClusterRead(ctx context.Context, d *schema.ResourceData, m } else { d.Set("container_provider", nil) } - d.Set(names.AttrCreatedAt, aws.TimeValue(vc.CreatedAt).String()) + d.Set(names.AttrCreatedAt, aws.ToTime(vc.CreatedAt).String()) d.Set(names.AttrName, vc.Name) d.Set(names.AttrState, vc.State) d.Set("virtual_cluster_id", vc.Id) diff --git a/internal/service/emrcontainers/virtual_cluster_test.go b/internal/service/emrcontainers/virtual_cluster_test.go index c1937dd8b5b..00fd339d298 100644 --- a/internal/service/emrcontainers/virtual_cluster_test.go +++ b/internal/service/emrcontainers/virtual_cluster_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/emrcontainers" awstypes "github.com/aws/aws-sdk-go-v2/service/emrcontainers/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" From fd61ba8147b542ce30532b189fd3fb3e19d2f565 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 11 Jul 2024 21:13:31 +0100 Subject: [PATCH 12/38] Fix lint --- internal/service/emrcontainers/sweep.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/service/emrcontainers/sweep.go b/internal/service/emrcontainers/sweep.go index a9ae4ae07e4..548707ba16d 100644 --- a/internal/service/emrcontainers/sweep.go +++ b/internal/service/emrcontainers/sweep.go @@ -62,7 +62,6 @@ func sweepVirtualClusters(region string) error { sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - } err = sweep.SweepOrchestrator(ctx, sweepResources) From 8434e28454c53260f834aeff1d5ee3279c34cf57 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Jul 2024 13:57:34 -0400 Subject: [PATCH 13/38] fsx: Simplify 'final_backup_tags' validation. --- internal/service/fsx/lustre_file_system.go | 18 ++++++------------ internal/service/fsx/ontap_volume.go | 18 ++++++------------ internal/service/fsx/openzfs_file_system.go | 18 ++++++------------ internal/service/fsx/windows_file_system.go | 18 ++++++------------ 4 files changed, 24 insertions(+), 48 deletions(-) diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index f67b1f5928f..19e22ceebed 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -143,20 +143,14 @@ func resourceLustreFileSystem() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 128), - validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag key"), - ), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), }, names.AttrValue: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(0, 128), - validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag value"), - ), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 128), }, }, }, diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index 0916bd40fbf..bae93bbd472 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -113,20 +113,14 @@ func resourceONTAPVolume() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 128), - validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag key"), - ), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), }, names.AttrValue: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(0, 128), - validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag value"), - ), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 128), }, }, }, diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index bdc30f635a6..5a95f09c182 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -143,20 +143,14 @@ func resourceOpenZFSFileSystem() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 128), - validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag key"), - ), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), }, names.AttrValue: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(0, 128), - validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag value"), - ), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 128), }, }, }, diff --git a/internal/service/fsx/windows_file_system.go b/internal/service/fsx/windows_file_system.go index 5c8e9ddc82d..3f55a656be0 100644 --- a/internal/service/fsx/windows_file_system.go +++ b/internal/service/fsx/windows_file_system.go @@ -173,20 +173,14 @@ func resourceWindowsFileSystem() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 128), - validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag key"), - ), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), }, names.AttrValue: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(0, 128), - validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must be a valid tag value"), - ), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 128), }, }, }, From 1d0b2f03b0010114bce186a6e1d733fa16196ce8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Jul 2024 14:11:56 -0400 Subject: [PATCH 14/38] r/aws_fsx_lustre_file_system: 'final_backup_tags' is a map. --- internal/service/fsx/lustre_file_system.go | 25 ++--------- .../service/fsx/lustre_file_system_test.go | 44 +++++-------------- .../r/fsx_lustre_file_system.html.markdown | 9 +--- 3 files changed, 15 insertions(+), 63 deletions(-) diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index 19e22ceebed..b8c3ab8c44c 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -135,26 +135,7 @@ func resourceLustreFileSystem() *schema.Resource { validation.StringMatch(regexache.MustCompile(`^[0-9].[0-9]+$`), "must be in format x.y"), ), }, - "final_backup_tags": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - MaxItems: 50, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 128), - }, - }, - }, - }, + "final_backup_tags": tftags.TagsSchema(), "import_path": { Type: schema.TypeString, Optional: true, @@ -683,8 +664,8 @@ func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), } - if v, ok := d.GetOk("final_backup_tags"); ok { - lustreConfig.FinalBackupTags = expandFinalBackupTags(v.(*schema.Set)) + if v, ok := d.GetOk("final_backup_tags"); ok && len(v.(map[string]interface{})) > 0 { + lustreConfig.FinalBackupTags = Tags(tftags.New(ctx, v)) } input.LustreConfiguration = lustreConfig diff --git a/internal/service/fsx/lustre_file_system_test.go b/internal/service/fsx/lustre_file_system_test.go index 3fd7479b010..89f1ec2f64f 100644 --- a/internal/service/fsx/lustre_file_system_test.go +++ b/internal/service/fsx/lustre_file_system_test.go @@ -6,7 +6,6 @@ package fsx_test import ( "context" "fmt" - "os" "testing" "github.com/YakDriver/regexache" @@ -155,15 +154,12 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { func TestAccFSxLustreFileSystem_deleteConfig(t *testing.T) { ctx := acctest.Context(t) - - if os.Getenv("FSX_CREATE_FINAL_BACKUP") != acctest.CtTrue { - t.Skip("Environment variable FSX_CREATE_FINAL_BACKUP is not set to true") - } - - var filesystem1, filesystem2 fsx.FileSystem + var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + acctest.SkipIfEnvVarNotSet(t, "AWS_FSX_CREATE_FINAL_BACKUP") + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), @@ -173,12 +169,10 @@ func TestAccFSxLustreFileSystem_deleteConfig(t *testing.T) { { Config: testAccLustreFileSystemConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", acctest.CtValue2), + testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.%", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags."+acctest.CtKey1, acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags."+acctest.CtKey2, acctest.CtValue2), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), ), }, @@ -192,19 +186,6 @@ func TestAccFSxLustreFileSystem_deleteConfig(t *testing.T) { "skip_final_backup", }, }, - { - Config: testAccLustreFileSystemConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, ""), - Check: resource.ComposeTestCheckFunc( - testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", ""), - resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), - ), - }, }, }) } @@ -1412,13 +1393,10 @@ resource "aws_fsx_lustre_file_system" "test" { subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" per_unit_storage_throughput = 50 - final_backup_tags { - key = %[1]q - value = %[2]q - } - final_backup_tags { - key = %[3]q - value = %[4]q + + final_backup_tags = { + %[1]q = %[2]q + %[3]q = %[4]q } } `, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2)) diff --git a/website/docs/r/fsx_lustre_file_system.html.markdown b/website/docs/r/fsx_lustre_file_system.html.markdown index 99bdd9b8abe..3338f5da358 100644 --- a/website/docs/r/fsx_lustre_file_system.html.markdown +++ b/website/docs/r/fsx_lustre_file_system.html.markdown @@ -40,7 +40,7 @@ The following arguments are optional: * `deployment_type` - (Optional) - The filesystem deployment type. One of: `SCRATCH_1`, `SCRATCH_2`, `PERSISTENT_1`, `PERSISTENT_2`. * `export_path` - (Optional) S3 URI (with optional prefix) where the root of your Amazon FSx file system is exported. Can only be specified with `import_path` argument and the path must use the same Amazon S3 bucket as specified in `import_path`. Set equal to `import_path` to overwrite files on export. Defaults to `s3://{IMPORT BUCKET}/FSxLustre{CREATION TIMESTAMP}`. Only supported on `PERSISTENT_1` deployment types. * `file_system_type_version` - (Optional) Sets the Lustre version for the file system that you're creating. Valid values are 2.10 for `SCRATCH_1`, `SCRATCH_2` and `PERSISTENT_1` deployment types. Valid values for 2.12 include all deployment types. -* `final_backup_tags` - (Optional) List of tags to apply to the file system's final backup. Maximum of 50 items. See [`final_backup_tags` Block](#final_backup_tags-block) for details. +* `final_backup_tags` - (Optional) A map of tags to apply to the file system's final backup. **Note:** If the filesystem uses a Scratch deployment type, final backup during delete will always be skipped and this argument will not be used even when set. * `imported_file_chunk_size` - (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. Can only be specified with `import_path` argument. Defaults to `1024`. Minimum of `1` and maximum of `512000`. Only supported on `PERSISTENT_1` deployment types. @@ -59,13 +59,6 @@ The following arguments are optional: * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. -### `final_backup_tags` Block - -The `final_backup_tags` configuration block supports the following arguments: - -* `key` - (Required) The name of the tag. -* `value` - (Required) The value assigned to the corresponding tag key. To create a key-only tag, use an empty string as the value. - ### `log_configuration` Block The `log_configuration` configuration block supports the following arguments: From 82c1d447b61d3e7ab9d88133849b726ef374b37c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Jul 2024 14:17:21 -0400 Subject: [PATCH 15/38] r/aws_fsx_ontap_volume: 'final_backup_tags' is a map. --- internal/service/fsx/ontap_volume.go | 25 ++--------- internal/service/fsx/ontap_volume_test.go | 44 +++++-------------- website/docs/r/fsx_ontap_volume.html.markdown | 9 +--- 3 files changed, 15 insertions(+), 63 deletions(-) diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index bae93bbd472..dda4d21d64c 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -105,26 +105,7 @@ func resourceONTAPVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "final_backup_tags": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - MaxItems: 50, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 128), - }, - }, - }, - }, + "final_backup_tags": tftags.TagsSchema(), "flexcache_endpoint_type": { Type: schema.TypeString, Computed: true, @@ -593,8 +574,8 @@ func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta VolumeId: aws.String(d.Id()), } - if v, ok := d.GetOk("final_backup_tags"); ok { - input.OntapConfiguration.FinalBackupTags = expandFinalBackupTags(v.(*schema.Set)) + if v, ok := d.GetOk("final_backup_tags"); ok && len(v.(map[string]interface{})) > 0 { + input.OntapConfiguration.FinalBackupTags = Tags(tftags.New(ctx, v)) } log.Printf("[DEBUG] Deleting FSx for NetApp ONTAP Volume: %s", d.Id()) diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index e91dc274b1b..e434a09d040 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -6,7 +6,6 @@ package fsx_test import ( "context" "fmt" - "os" "testing" "github.com/YakDriver/regexache" @@ -753,15 +752,12 @@ func TestAccFSxONTAPVolume_volumeStyle(t *testing.T) { func TestAccFSxONTAPVolume_deleteConfig(t *testing.T) { ctx := acctest.Context(t) - - if os.Getenv("FSX_CREATE_FINAL_BACKUP") != acctest.CtTrue { - t.Skip("Environment variable FSX_CREATE_FINAL_BACKUP is not set to true") - } - - var volume1, volume2 fsx.Volume + var volume fsx.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + acctest.SkipIfEnvVarNotSet(t, "AWS_FSX_CREATE_FINAL_BACKUP") + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), @@ -771,12 +767,10 @@ func TestAccFSxONTAPVolume_deleteConfig(t *testing.T) { { Config: testAccONTAPVolumeConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", acctest.CtValue2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.%", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags."+acctest.CtKey1, acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags."+acctest.CtKey2, acctest.CtValue2), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), ), }, @@ -790,19 +784,6 @@ func TestAccFSxONTAPVolume_deleteConfig(t *testing.T) { "skip_final_backup", }, }, - { - Config: testAccONTAPVolumeConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, ""), - Check: resource.ComposeTestCheckFunc( - testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), - testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", ""), - resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), - ), - }, }, }) } @@ -1233,13 +1214,10 @@ resource "aws_fsx_ontap_volume" "test" { skip_final_backup = false storage_efficiency_enabled = true storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id - final_backup_tags { - key = %[2]q - value = %[3]q - } - final_backup_tags { - key = %[4]q - value = %[5]q + + final_backup_tags = { + %[2]q = %[3]q + %[4]q = %[5]q } } `, rName, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2)) diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index 0aacdf16be3..e6c1a0d7865 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -56,7 +56,7 @@ The following arguments are optional: * `aggregate_configuration` - (Optional) The Aggregate configuration only applies to `FLEXGROUP` volumes. See [`aggregate_configuration` Block] for details. * `bypass_snaplock_enterprise_retention` - (Optional) Setting this to `true` allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to `false`. -* `final_backup_tags` - (Optional) List of tags to apply to the file system's final backup. Maximum of 50 items. See [`final_backup_tags` Block](#final_backup_tags-block) for details. +* `final_backup_tags` - (Optional) A map of tags to apply to the volume's final backup. * `junction_path` - (Optional) Specifies the location in the storage virtual machine's namespace where the volume is mounted. The junction_path must have a leading forward slash, such as `/vol3` * `ontap_volume_type` - (Optional) Specifies the type of volume, valid values are `RW`, `DP`. Default value is `RW`. These can be set by the ONTAP CLI or API. This setting is used as part of migration and replication [Migrating to Amazon FSx for NetApp ONTAP](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/migrating-fsx-ontap.html) * `security_style` - (Optional) Specifies the volume security style, Valid values are `UNIX`, `NTFS`, and `MIXED`. @@ -77,13 +77,6 @@ The `aggregate_configuration` configuration block supports the following argumen * `aggregates` - (Optional) Used to specify the names of the aggregates on which the volume will be created. Each aggregate needs to be in the format aggrX where X is the number of the aggregate. * `constituents_per_aggregate` - (Optional) Used to explicitly set the number of constituents within the FlexGroup per storage aggregate. the default value is `8`. -### `final_backup_tags` Block - -The `final_backup_tags` configuration block supports the following arguments: - -* `key` - (Required) The name of the tag. -* `value` - (Required) The value assigned to the corresponding tag key. To create a key-only tag, use an empty string as the value. - ### `snaplock_configuration` Block The `snaplock_configuration` configuration block supports the following arguments: From d8ffc608f310aa3061b44c765eb287114e0d1499 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Jul 2024 14:21:13 -0400 Subject: [PATCH 16/38] r/aws_fsx_openzfs_file_system: 'final_backup_tags' is a map. --- internal/service/fsx/openzfs_file_system.go | 25 ++--------- .../service/fsx/openzfs_file_system_test.go | 41 ++++--------------- .../r/fsx_openzfs_file_system.html.markdown | 9 +--- 3 files changed, 13 insertions(+), 62 deletions(-) diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index 5a95f09c182..cd83f1fdf3b 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -135,26 +135,7 @@ func resourceOpenZFSFileSystem() *schema.Resource { Computed: true, ForceNew: true, }, - "final_backup_tags": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - MaxItems: 50, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 128), - }, - }, - }, - }, + "final_backup_tags": tftags.TagsSchema(), names.AttrKMSKeyID: { Type: schema.TypeString, Optional: true, @@ -678,8 +659,8 @@ func resourceOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData input.OpenZFSConfiguration.Options = flex.ExpandStringSet(v.(*schema.Set)) } - if v, ok := d.GetOk("final_backup_tags"); ok { - input.OpenZFSConfiguration.FinalBackupTags = expandFinalBackupTags(v.(*schema.Set)) + if v, ok := d.GetOk("final_backup_tags"); ok && len(v.(map[string]interface{})) > 0 { + input.OpenZFSConfiguration.FinalBackupTags = Tags(tftags.New(ctx, v)) } log.Printf("[DEBUG] Deleting FSx for OpenZFS File System: %s", d.Id()) diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go index 6bbe8b6221a..5d230f8eb03 100644 --- a/internal/service/fsx/openzfs_file_system_test.go +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -6,7 +6,6 @@ package fsx_test import ( "context" "fmt" - "os" "testing" "github.com/YakDriver/regexache" @@ -1003,15 +1002,12 @@ func TestAccFSxOpenZFSFileSystem_routeTableIDs(t *testing.T) { func TestAccFSxOpenZFSFileSystem_deleteConfig(t *testing.T) { ctx := acctest.Context(t) - - if os.Getenv("FSX_CREATE_FINAL_BACKUP") != acctest.CtTrue { - t.Skip("Environment variable FSX_CREATE_FINAL_BACKUP is not set to true") - } - var filesystem fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + acctest.SkipIfEnvVarNotSet(t, "AWS_FSX_CREATE_FINAL_BACKUP") + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), @@ -1024,11 +1020,9 @@ func TestAccFSxOpenZFSFileSystem_deleteConfig(t *testing.T) { testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "delete_options.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "delete_options.0", "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.%", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags."+acctest.CtKey1, acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags."+acctest.CtKey2, acctest.CtValue2), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), ), }, @@ -1043,20 +1037,6 @@ func TestAccFSxOpenZFSFileSystem_deleteConfig(t *testing.T) { "skip_final_backup", }, }, - { - Config: testAccOpenZFSFileSystemConfig_deleteConfig(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, ""), - Check: resource.ComposeTestCheckFunc( - testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "delete_options.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "delete_options.0", "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", ""), - resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), - ), - }, }, }) } @@ -1765,13 +1745,10 @@ resource "aws_fsx_openzfs_file_system" "test" { deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 delete_options = ["DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"] - final_backup_tags { - key = %[1]q - value = %[2]q - } - final_backup_tags { - key = %[3]q - value = %[4]q + + final_backup_tags = { + %[1]q = %[2]q + %[3]q = %[4]q } } `, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2)) diff --git a/website/docs/r/fsx_openzfs_file_system.html.markdown b/website/docs/r/fsx_openzfs_file_system.html.markdown index b5030240c6e..35a528a69f3 100644 --- a/website/docs/r/fsx_openzfs_file_system.html.markdown +++ b/website/docs/r/fsx_openzfs_file_system.html.markdown @@ -41,7 +41,7 @@ The following arguments are optional: * `delete_options` - (Optional) List of delete options, which at present supports only one value that specifies whether to delete all child volumes and snapshots when the file system is deleted. Valid values: `DELETE_CHILD_VOLUMES_AND_SNAPSHOTS`. * `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for OpenZFS file system. See [`disk_iops_configuration` Block](#disk_iops_configuration-block) for details. * `endpoint_ip_address_range` - (Optional) (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. -* `final_backup_tags` - (Optional) List of tags to apply to the file system's final backup. Maximum of 50 items. See [`final_backup_tags` Block](#final_backup_tags-block) for details. +* `final_backup_tags` - (Optional) A map of tags to apply to the file system's final backup. * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `preferred_subnet_id` - (Optional) (Multi-AZ only) Required when `deployment_type` is set to `MULTI_AZ_1`. This specifies the subnet in which you want the preferred file server to be located. * `root_volume_configuration` - (Optional) The configuration for the root volume of the file system. All other volumes are children or the root volume. See [`root_volume_configuration` Block](#root_volume_configuration-block) for details. @@ -59,13 +59,6 @@ The `disk_iops_configuration` configuration block supports the following argumen * `iops` - (Optional) The total number of SSD IOPS provisioned for the file system. * `mode` - (Optional) Specifies whether the number of IOPS for the file system is using the system. Valid values are `AUTOMATIC` and `USER_PROVISIONED`. Default value is `AUTOMATIC`. -### `final_backup_tags` Block - -The `final_backup_tags` configuration block supports the following arguments: - -* `key` - (Required) The name of the tag. -* `value` - (Required) The value assigned to the corresponding tag key. To create a key-only tag, use an empty string as the value. - ### `root_volume_configuration` Block The `root_volume_configuration` configuration block supports the following arguments: From 5616f09b7dbf15a109b3b4fc5f7edb105716a01c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Jul 2024 14:24:33 -0400 Subject: [PATCH 17/38] r/aws_fsx_windows_file_system: 'final_backup_tags' is a map. --- internal/service/fsx/lustre_file_system.go | 26 ------------------ internal/service/fsx/windows_file_system.go | 25 +++-------------- .../service/fsx/windows_file_system_test.go | 27 +++++++------------ .../r/fsx_windows_file_system.html.markdown | 9 +------ 4 files changed, 13 insertions(+), 74 deletions(-) diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index b8c3ab8c44c..f530e9cff50 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -689,32 +689,6 @@ func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, return diags } -func expandFinalBackupTags(cfg *schema.Set) []*fsx.Tag { - tags := []*fsx.Tag{} - - for _, tag := range cfg.List() { - expandedTag := expandFinalBackupTag(tag.(map[string]interface{})) - if expandedTag != nil { - tags = append(tags, expandedTag) - } - } - - return tags -} - -func expandFinalBackupTag(cfg map[string]interface{}) *fsx.Tag { - out := fsx.Tag{} - - if v, ok := cfg[names.AttrKey].(string); ok { - out.Key = aws.String(v) - } - if v, ok := cfg[names.AttrValue].(string); ok { - out.Value = aws.String(v) - } - - return &out -} - func expandLustreRootSquashConfiguration(l []interface{}) *fsx.LustreRootSquashConfiguration { if len(l) == 0 || l[0] == nil { return nil diff --git a/internal/service/fsx/windows_file_system.go b/internal/service/fsx/windows_file_system.go index 3f55a656be0..943f31801ab 100644 --- a/internal/service/fsx/windows_file_system.go +++ b/internal/service/fsx/windows_file_system.go @@ -165,26 +165,7 @@ func resourceWindowsFileSystem() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "final_backup_tags": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - MaxItems: 50, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 128), - }, - }, - }, - }, + "final_backup_tags": tftags.TagsSchema(), names.AttrKMSKeyID: { Type: schema.TypeString, Optional: true, @@ -637,8 +618,8 @@ func resourceWindowsFileSystemDelete(ctx context.Context, d *schema.ResourceData }, } - if v, ok := d.GetOk("final_backup_tags"); ok { - input.WindowsConfiguration.FinalBackupTags = expandFinalBackupTags(v.(*schema.Set)) + if v, ok := d.GetOk("final_backup_tags"); ok && len(v.(map[string]interface{})) > 0 { + input.WindowsConfiguration.FinalBackupTags = Tags(tftags.New(ctx, v)) } log.Printf("[DEBUG] Deleting FSx for Windows File Server File System: %s", d.Id()) diff --git a/internal/service/fsx/windows_file_system_test.go b/internal/service/fsx/windows_file_system_test.go index 42b51c32f10..a29a157926a 100644 --- a/internal/service/fsx/windows_file_system_test.go +++ b/internal/service/fsx/windows_file_system_test.go @@ -6,7 +6,6 @@ package fsx_test import ( "context" "fmt" - "os" "testing" "github.com/YakDriver/regexache" @@ -434,16 +433,13 @@ func TestAccFSxWindowsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { func TestAccFSxWindowsFileSystem_deleteConfig(t *testing.T) { ctx := acctest.Context(t) - - if os.Getenv("FSX_CREATE_FINAL_BACKUP") != acctest.CtTrue { - t.Skip("Environment variable FSX_CREATE_FINAL_BACKUP is not set to true") - } - var filesystem fsx.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() + acctest.SkipIfEnvVarNotSet(t, "AWS_FSX_CREATE_FINAL_BACKUP") + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), @@ -454,11 +450,9 @@ func TestAccFSxWindowsFileSystem_deleteConfig(t *testing.T) { Config: testAccWindowsFileSystemConfig_deleteConfig(rName, domainName, acctest.CtKey1, acctest.CtValue1, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( testAccCheckWindowsFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags.%", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags."+acctest.CtKey1, acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "final_backup_tags."+acctest.CtKey2, acctest.CtValue2), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), ), }, @@ -1118,13 +1112,10 @@ resource "aws_fsx_windows_file_system" "test" { storage_capacity = 32 subnet_ids = [aws_subnet.test[0].id] throughput_capacity = 8 - final_backup_tags { - key = %[1]q - value = %[2]q - } - final_backup_tags { - key = %[3]q - value = %[4]q + + final_backup_tags = { + %[1]q = %[2]q + %[3]q = %[4]q } } `, finalTagKey1, finalTagValue1, finalTagKey2, finalTagValue2)) diff --git a/website/docs/r/fsx_windows_file_system.html.markdown b/website/docs/r/fsx_windows_file_system.html.markdown index 96cf67c2f5d..69a4ac941b3 100644 --- a/website/docs/r/fsx_windows_file_system.html.markdown +++ b/website/docs/r/fsx_windows_file_system.html.markdown @@ -66,7 +66,7 @@ The following arguments are optional: * `daily_automatic_backup_start_time` - (Optional) The preferred time (in `HH:MM` format) to take daily automatic backups, in the UTC time zone. * `deployment_type` - (Optional) Specifies the file system deployment type, valid values are `MULTI_AZ_1`, `SINGLE_AZ_1` and `SINGLE_AZ_2`. Default value is `SINGLE_AZ_1`. * `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for Windows File Server file system. See [`disk_iops_configuration` Block](#disk_iops_configuration-block) for details. -* `final_backup_tags` - (Optional) List of tags to apply to the file system's final backup. Maximum of 50 items. See [`final_backup_tags` Block](#final_backup_tags-block) for details. +* `final_backup_tags` - (Optional) A map of tags to apply to the file system's final backup. * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest. Defaults to an AWS managed KMS Key. * `preferred_subnet_id` - (Optional) Specifies the subnet in which you want the preferred file server to be located. Required for when deployment type is `MULTI_AZ_1`. * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. @@ -92,13 +92,6 @@ The `disk_iops_configuration` configuration block supports the following argumen * `iops` - (Optional) The total number of SSD IOPS provisioned for the file system. * `mode` - (Optional) Specifies whether the number of IOPS for the file system is using the system. Valid values are `AUTOMATIC` and `USER_PROVISIONED`. Default value is `AUTOMATIC`. -### `final_backup_tags` Block - -The `final_backup_tags` configuration block supports the following arguments: - -* `key` - (Required) The name of the tag. -* `value` - (Required) The value assigned to the corresponding tag key. To create a key-only tag, use an empty string as the value. - ### `self_managed_active_directory` Block The `self_managed_active_directory` configuration block supports the following arguments: From 7617d146bcb85356a2e68875deae58eb0351e115 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 18 Jul 2024 16:11:34 -0400 Subject: [PATCH 18/38] Fix 'TestAccFSxWindowsFileSystem_deleteConfig'. --- internal/service/fsx/windows_file_system_test.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/internal/service/fsx/windows_file_system_test.go b/internal/service/fsx/windows_file_system_test.go index a29a157926a..489f687c18b 100644 --- a/internal/service/fsx/windows_file_system_test.go +++ b/internal/service/fsx/windows_file_system_test.go @@ -466,18 +466,6 @@ func TestAccFSxWindowsFileSystem_deleteConfig(t *testing.T) { "skip_final_backup", }, }, - { - Config: testAccWindowsFileSystemConfig_deleteConfig(rName, domainName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, ""), - Check: resource.ComposeTestCheckFunc( - testAccCheckWindowsFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.key", acctest.CtKey1), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.0.value", acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.key", acctest.CtKey2), - resource.TestCheckResourceAttr(resourceName, "final_backup_tags.1.value", ""), - resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtFalse), - ), - }, }, }) } From 8360130353768e95ee31607ebf3b24425d276bc9 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:21:58 +0100 Subject: [PATCH 19/38] ivs: Migrate to AWS SDK v2 --- go.mod | 1 + go.sum | 2 ++ internal/conns/awsclient_gen.go | 6 +++--- names/data/names_data.hcl | 2 +- names/names.go | 1 + 5 files changed, 8 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index a06ee9a2225..2a2d1ff937a 100644 --- a/go.mod +++ b/go.mod @@ -123,6 +123,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/iot v1.55.3 github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 + github.com/aws/aws-sdk-go-v2/service/ivs v1.37.3 github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 diff --git a/go.sum b/go.sum index 6b34fe84473..28b577daf4a 100644 --- a/go.sum +++ b/go.sum @@ -276,6 +276,8 @@ github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 h1:SEt8SRvlGvnOkqDV5PJ github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3/go.mod h1:XDi19IK0UluaSVnm1mu2AakZKHtWjg6gksitvH7+LQw= github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 h1:9Lao6kmD9P+yywuIn9I8hrraJ2jHIztU/GJspIxn6lA= github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3/go.mod h1:V2BDVrnP+Tn+MM1xxFI7Qcb+YPhiGgY5PUoKzrKHaCQ= +github.com/aws/aws-sdk-go-v2/service/ivs v1.37.3 h1:gBK4NPueWRWK/zma5K3Hc3/hZhoYuKw00Ed2GzOVhg8= +github.com/aws/aws-sdk-go-v2/service/ivs v1.37.3/go.mod h1:gDKrqL8b+YVQ7C/Y152MZ0AxXP6FaksuQ3dbzGCpD7Y= github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 h1:d7y5Gs9BfO+1Jhj8y1/lZhegiJXXy/DlanzwRgYrkXM= github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3/go.mod h1:rtw6VOH+4X/TWoOKQlOC+oq/WBDJD4BqaPi930II6Mk= github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 h1:MUx27PrqicGxgsiDWo7xv/Zsl4b0X8kHCRvMpX7XrQs= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 803afa93c39..470ef94b8e3 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -115,6 +115,7 @@ import ( iot_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iot" iotanalytics_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotanalytics" iotevents_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotevents" + ivs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivs" ivschat_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivschat" kafka_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafka" kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" @@ -221,7 +222,6 @@ import ( guardduty_sdkv1 "github.com/aws/aws-sdk-go/service/guardduty" imagebuilder_sdkv1 "github.com/aws/aws-sdk-go/service/imagebuilder" inspector_sdkv1 "github.com/aws/aws-sdk-go/service/inspector" - ivs_sdkv1 "github.com/aws/aws-sdk-go/service/ivs" kinesisanalytics_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisanalytics" kinesisanalyticsv2_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" kinesisvideo_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisvideo" @@ -726,8 +726,8 @@ func (c *AWSClient) IAMClient(ctx context.Context) *iam_sdkv2.Client { return errs.Must(client[*iam_sdkv2.Client](ctx, c, names.IAM, make(map[string]any))) } -func (c *AWSClient) IVSConn(ctx context.Context) *ivs_sdkv1.IVS { - return errs.Must(conn[*ivs_sdkv1.IVS](ctx, c, names.IVS, make(map[string]any))) +func (c *AWSClient) IVSClient(ctx context.Context) *ivs_sdkv2.Client { + return errs.Must(client[*ivs_sdkv2.Client](ctx, c, names.IVS, make(map[string]any))) } func (c *AWSClient) IVSChatClient(ctx context.Context) *ivschat_sdkv2.Client { diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl index c0a14595d88..9c05cace0dd 100644 --- a/names/data/names_data.hcl +++ b/names/data/names_data.hcl @@ -4950,7 +4950,7 @@ service "ivs" { sdk { id = "ivs" - client_version = [1] + client_version = [2] } names { diff --git a/names/names.go b/names/names.go index e57c7a8c4ee..52513d3d423 100644 --- a/names/names.go +++ b/names/names.go @@ -78,6 +78,7 @@ const ( EvidentlyEndpointID = "evidently" FMSEndpointID = "fms" GrafanaEndpointID = "grafana" + IVSEndpointID = "ivs" IVSChatEndpointID = "ivschat" IdentityStoreEndpointID = "identitystore" Inspector2EndpointID = "inspector2" From e697948e2c8812db64e11f356be0e68fac06c55d Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:23:44 +0100 Subject: [PATCH 20/38] gopatch --- internal/service/ivs/channel.go | 31 ++++++----- internal/service/ivs/channel_test.go | 40 +++++++------- internal/service/ivs/find.go | 38 ++++++------- internal/service/ivs/playback_key_pair.go | 22 ++++---- .../service/ivs/playback_key_pair_test.go | 42 ++++++++------- .../service/ivs/recording_configuration.go | 53 ++++++++++--------- .../ivs/recording_configuration_test.go | 46 ++++++++-------- internal/service/ivs/status.go | 23 ++++---- .../service/ivs/stream_key_data_source.go | 6 +-- internal/service/ivs/wait.go | 37 ++++++------- 10 files changed, 178 insertions(+), 160 deletions(-) diff --git a/internal/service/ivs/channel.go b/internal/service/ivs/channel.go index 55fb8e4eea8..e490079ba86 100644 --- a/internal/service/ivs/channel.go +++ b/internal/service/ivs/channel.go @@ -10,14 +10,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -61,7 +64,7 @@ func ResourceChannel() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.StringInSlice(ivs.ChannelLatencyMode_Values(), false), + ValidateFunc: enum.Validate[awstypes.ChannelLatencyMode](), }, names.AttrName: { Type: schema.TypeString, @@ -85,7 +88,7 @@ func ResourceChannel() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.StringInSlice(ivs.ChannelType_Values(), false), + ValidateFunc: enum.Validate[awstypes.ChannelType](), }, }, @@ -100,7 +103,7 @@ const ( func resourceChannelCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) in := &ivs.CreateChannelInput{ Tags: getTagsIn(ctx), @@ -126,7 +129,7 @@ func resourceChannelCreate(ctx context.Context, d *schema.ResourceData, meta int in.Type = aws.String(v.(string)) } - out, err := conn.CreateChannelWithContext(ctx, in) + out, err := conn.CreateChannel(ctx, in) if err != nil { return create.AppendDiagError(diags, names.IVS, create.ErrActionCreating, ResNameChannel, d.Get(names.AttrName).(string), err) } @@ -135,7 +138,7 @@ func resourceChannelCreate(ctx context.Context, d *schema.ResourceData, meta int return create.AppendDiagError(diags, names.IVS, create.ErrActionCreating, ResNameChannel, d.Get(names.AttrName).(string), errors.New("empty output")) } - d.SetId(aws.StringValue(out.Channel.Arn)) + d.SetId(aws.ToString(out.Channel.Arn)) if _, err := waitChannelCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return create.AppendDiagError(diags, names.IVS, create.ErrActionWaitingForCreation, ResNameChannel, d.Id(), err) @@ -147,7 +150,7 @@ func resourceChannelCreate(ctx context.Context, d *schema.ResourceData, meta int func resourceChannelRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) out, err := FindChannelByID(ctx, conn, d.Id()) @@ -176,7 +179,7 @@ func resourceChannelRead(ctx context.Context, d *schema.ResourceData, meta inter func resourceChannelUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) update := false @@ -216,7 +219,7 @@ func resourceChannelUpdate(ctx context.Context, d *schema.ResourceData, meta int log.Printf("[DEBUG] Updating IVS Channel (%s): %#v", d.Id(), in) - out, err := conn.UpdateChannelWithContext(ctx, in) + out, err := conn.UpdateChannel(ctx, in) if err != nil { return create.AppendDiagError(diags, names.IVS, create.ErrActionUpdating, ResNameChannel, d.Id(), err) } @@ -231,16 +234,16 @@ func resourceChannelUpdate(ctx context.Context, d *schema.ResourceData, meta int func resourceChannelDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) log.Printf("[INFO] Deleting IVS Channel %s", d.Id()) - _, err := conn.DeleteChannelWithContext(ctx, &ivs.DeleteChannelInput{ + _, err := conn.DeleteChannel(ctx, &ivs.DeleteChannelInput{ Arn: aws.String(d.Id()), }) if err != nil { - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } diff --git a/internal/service/ivs/channel_test.go b/internal/service/ivs/channel_test.go index 6a21746635e..2150e18ab58 100644 --- a/internal/service/ivs/channel_test.go +++ b/internal/service/ivs/channel_test.go @@ -10,22 +10,24 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfivs "github.com/hashicorp/terraform-provider-aws/internal/service/ivs" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccIVSChannel_basic(t *testing.T) { ctx := acctest.Context(t) - var channel ivs.Channel + var channel awstypes.Channel resourceName := "aws_ivs_channel.test" @@ -61,7 +63,7 @@ func TestAccIVSChannel_basic(t *testing.T) { func TestAccIVSChannel_tags(t *testing.T) { ctx := acctest.Context(t) - var channel ivs.Channel + var channel awstypes.Channel resourceName := "aws_ivs_channel.test" @@ -111,7 +113,7 @@ func TestAccIVSChannel_tags(t *testing.T) { func TestAccIVSChannel_update(t *testing.T) { ctx := acctest.Context(t) - var v1, v2 ivs.Channel + var v1, v2 awstypes.Channel resourceName := "aws_ivs_channel.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -156,14 +158,14 @@ func TestAccIVSChannel_update(t *testing.T) { func TestAccIVSChannel_disappears(t *testing.T) { ctx := acctest.Context(t) - var channel ivs.Channel + var channel awstypes.Channel resourceName := "aws_ivs_channel.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccChannelPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -184,7 +186,7 @@ func TestAccIVSChannel_disappears(t *testing.T) { func TestAccIVSChannel_recordingConfiguration(t *testing.T) { ctx := acctest.Context(t) - var channel ivs.Channel + var channel awstypes.Channel bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ivs_channel.test" recordingConfigurationResourceName := "aws_ivs_recording_configuration.test" @@ -192,7 +194,7 @@ func TestAccIVSChannel_recordingConfiguration(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccChannelPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -217,7 +219,7 @@ func TestAccIVSChannel_recordingConfiguration(t *testing.T) { func testAccCheckChannelDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_ivs_channel" { @@ -227,9 +229,9 @@ func testAccCheckChannelDestroy(ctx context.Context) resource.TestCheckFunc { input := &ivs.GetChannelInput{ Arn: aws.String(rs.Primary.ID), } - _, err := conn.GetChannelWithContext(ctx, input) + _, err := conn.GetChannel(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil } @@ -243,7 +245,7 @@ func testAccCheckChannelDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckChannelExists(ctx context.Context, name string, channel *ivs.Channel) resource.TestCheckFunc { +func testAccCheckChannelExists(ctx context.Context, name string, channel *awstypes.Channel) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] @@ -255,7 +257,7 @@ func testAccCheckChannelExists(ctx context.Context, name string, channel *ivs.Ch return create.Error(names.IVS, create.ErrActionCheckingExistence, tfivs.ResNameChannel, name, errors.New("not set")) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) output, err := tfivs.FindChannelByID(ctx, conn, rs.Primary.ID) @@ -270,10 +272,10 @@ func testAccCheckChannelExists(ctx context.Context, name string, channel *ivs.Ch } func testAccChannelPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) input := &ivs.ListChannelsInput{} - _, err := conn.ListChannelsWithContext(ctx, input) + _, err := conn.ListChannels(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) @@ -284,9 +286,9 @@ func testAccChannelPreCheck(ctx context.Context, t *testing.T) { } } -func testAccCheckChannelNotRecreated(before, after *ivs.Channel) resource.TestCheckFunc { +func testAccCheckChannelNotRecreated(before, after *awstypes.Channel) resource.TestCheckFunc { return func(s *terraform.State) error { - if before, after := aws.StringValue(before.Arn), aws.StringValue(after.Arn); before != after { + if before, after := aws.ToString(before.Arn), aws.ToString(after.Arn); before != after { return create.Error(names.IVS, create.ErrActionCheckingNotRecreated, tfivs.ResNameChannel, before, errors.New("recreated")) } diff --git a/internal/service/ivs/find.go b/internal/service/ivs/find.go index d427a3c4219..561c12fddbf 100644 --- a/internal/service/ivs/find.go +++ b/internal/service/ivs/find.go @@ -6,19 +6,21 @@ package ivs import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func FindPlaybackKeyPairByID(ctx context.Context, conn *ivs.IVS, id string) (*ivs.PlaybackKeyPair, error) { +func FindPlaybackKeyPairByID(ctx context.Context, conn *ivs.Client, id string) (*awstypes.PlaybackKeyPair, error) { in := &ivs.GetPlaybackKeyPairInput{ Arn: aws.String(id), } - out, err := conn.GetPlaybackKeyPairWithContext(ctx, in) - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + out, err := conn.GetPlaybackKeyPair(ctx, in) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: in, @@ -36,12 +38,12 @@ func FindPlaybackKeyPairByID(ctx context.Context, conn *ivs.IVS, id string) (*iv return out.KeyPair, nil } -func FindRecordingConfigurationByID(ctx context.Context, conn *ivs.IVS, id string) (*ivs.RecordingConfiguration, error) { +func FindRecordingConfigurationByID(ctx context.Context, conn *ivs.Client, id string) (*awstypes.RecordingConfiguration, error) { in := &ivs.GetRecordingConfigurationInput{ Arn: aws.String(id), } - out, err := conn.GetRecordingConfigurationWithContext(ctx, in) - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + out, err := conn.GetRecordingConfiguration(ctx, in) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: in, @@ -59,13 +61,13 @@ func FindRecordingConfigurationByID(ctx context.Context, conn *ivs.IVS, id strin return out.RecordingConfiguration, nil } -func FindChannelByID(ctx context.Context, conn *ivs.IVS, arn string) (*ivs.Channel, error) { +func FindChannelByID(ctx context.Context, conn *ivs.Client, arn string) (*awstypes.Channel, error) { in := &ivs.GetChannelInput{ Arn: aws.String(arn), } - out, err := conn.GetChannelWithContext(ctx, in) + out, err := conn.GetChannel(ctx, in) if err != nil { - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: in, @@ -82,12 +84,12 @@ func FindChannelByID(ctx context.Context, conn *ivs.IVS, arn string) (*ivs.Chann return out.Channel, nil } -func FindStreamKeyByChannelID(ctx context.Context, conn *ivs.IVS, channelArn string) (*ivs.StreamKey, error) { +func FindStreamKeyByChannelID(ctx context.Context, conn *ivs.Client, channelArn string) (*awstypes.StreamKey, error) { in := &ivs.ListStreamKeysInput{ ChannelArn: aws.String(channelArn), } - out, err := conn.ListStreamKeysWithContext(ctx, in) - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + out, err := conn.ListStreamKeys(ctx, in) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: in, @@ -109,12 +111,12 @@ func FindStreamKeyByChannelID(ctx context.Context, conn *ivs.IVS, channelArn str return findStreamKeyByID(ctx, conn, *streamKeyArn) } -func findStreamKeyByID(ctx context.Context, conn *ivs.IVS, id string) (*ivs.StreamKey, error) { +func findStreamKeyByID(ctx context.Context, conn *ivs.Client, id string) (*awstypes.StreamKey, error) { in := &ivs.GetStreamKeyInput{ Arn: aws.String(id), } - out, err := conn.GetStreamKeyWithContext(ctx, in) - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + out, err := conn.GetStreamKey(ctx, in) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: in, diff --git a/internal/service/ivs/playback_key_pair.go b/internal/service/ivs/playback_key_pair.go index fb359a6777c..168d6696dc6 100644 --- a/internal/service/ivs/playback_key_pair.go +++ b/internal/service/ivs/playback_key_pair.go @@ -9,13 +9,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -74,7 +76,7 @@ const ( func resourcePlaybackKeyPairCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) in := &ivs.ImportPlaybackKeyPairInput{ PublicKeyMaterial: aws.String(d.Get(names.AttrPublicKey).(string)), @@ -85,7 +87,7 @@ func resourcePlaybackKeyPairCreate(ctx context.Context, d *schema.ResourceData, in.Name = aws.String(v.(string)) } - out, err := conn.ImportPlaybackKeyPairWithContext(ctx, in) + out, err := conn.ImportPlaybackKeyPair(ctx, in) if err != nil { return create.AppendDiagError(diags, names.IVS, create.ErrActionCreating, ResNamePlaybackKeyPair, d.Get(names.AttrName).(string), err) } @@ -94,7 +96,7 @@ func resourcePlaybackKeyPairCreate(ctx context.Context, d *schema.ResourceData, return create.AppendDiagError(diags, names.IVS, create.ErrActionCreating, ResNamePlaybackKeyPair, d.Get(names.AttrName).(string), errors.New("empty output")) } - d.SetId(aws.StringValue(out.KeyPair.Arn)) + d.SetId(aws.ToString(out.KeyPair.Arn)) if _, err := waitPlaybackKeyPairCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return create.AppendDiagError(diags, names.IVS, create.ErrActionWaitingForCreation, ResNamePlaybackKeyPair, d.Id(), err) @@ -106,7 +108,7 @@ func resourcePlaybackKeyPairCreate(ctx context.Context, d *schema.ResourceData, func resourcePlaybackKeyPairRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) out, err := FindPlaybackKeyPairByID(ctx, conn, d.Id()) @@ -130,15 +132,15 @@ func resourcePlaybackKeyPairRead(ctx context.Context, d *schema.ResourceData, me func resourcePlaybackKeyPairDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) log.Printf("[INFO] Deleting IVS PlaybackKeyPair %s", d.Id()) - _, err := conn.DeletePlaybackKeyPairWithContext(ctx, &ivs.DeletePlaybackKeyPairInput{ + _, err := conn.DeletePlaybackKeyPair(ctx, &ivs.DeletePlaybackKeyPairInput{ Arn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } diff --git a/internal/service/ivs/playback_key_pair_test.go b/internal/service/ivs/playback_key_pair_test.go index ea68a002aa9..7f115ecbc1a 100644 --- a/internal/service/ivs/playback_key_pair_test.go +++ b/internal/service/ivs/playback_key_pair_test.go @@ -10,15 +10,17 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfivs "github.com/hashicorp/terraform-provider-aws/internal/service/ivs" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -31,7 +33,7 @@ import ( func testAccPlaybackKeyPair_basic(t *testing.T) { ctx := acctest.Context(t) - var playbackKeyPair ivs.PlaybackKeyPair + var playbackKeyPair awstypes.PlaybackKeyPair resourceName := "aws_ivs_playback_key_pair.test" privateKey := acctest.TLSECDSAPrivateKeyPEM(t, "P-384") publicKeyPEM, fingerprint := acctest.TLSECDSAPublicKeyPEM(t, privateKey) @@ -39,7 +41,7 @@ func testAccPlaybackKeyPair_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccPlaybackKeyPairPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -68,7 +70,7 @@ func testAccPlaybackKeyPair_basic(t *testing.T) { func testAccPlaybackKeyPair_update(t *testing.T) { ctx := acctest.Context(t) - var v1, v2 ivs.PlaybackKeyPair + var v1, v2 awstypes.PlaybackKeyPair rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ivs_playback_key_pair.test" @@ -80,7 +82,7 @@ func testAccPlaybackKeyPair_update(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccPlaybackKeyPairPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -110,7 +112,7 @@ func testAccPlaybackKeyPair_update(t *testing.T) { func testAccPlaybackKeyPair_tags(t *testing.T) { ctx := acctest.Context(t) - var v1, v2, v3 ivs.PlaybackKeyPair + var v1, v2, v3 awstypes.PlaybackKeyPair rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ivs_playback_key_pair.test" privateKey := acctest.TLSECDSAPrivateKeyPEM(t, "P-384") @@ -119,7 +121,7 @@ func testAccPlaybackKeyPair_tags(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccPlaybackKeyPairPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -163,7 +165,7 @@ func testAccPlaybackKeyPair_tags(t *testing.T) { func testAccPlaybackKeyPair_disappears(t *testing.T) { ctx := acctest.Context(t) - var playbackkeypair ivs.PlaybackKeyPair + var playbackkeypair awstypes.PlaybackKeyPair resourceName := "aws_ivs_playback_key_pair.test" privateKey := acctest.TLSECDSAPrivateKeyPEM(t, "P-384") publicKey, _ := acctest.TLSECDSAPublicKeyPEM(t, privateKey) @@ -171,7 +173,7 @@ func testAccPlaybackKeyPair_disappears(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccPlaybackKeyPairPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -192,7 +194,7 @@ func testAccPlaybackKeyPair_disappears(t *testing.T) { func testAccCheckPlaybackKeyPairDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_ivs_playback_key_pair" { @@ -202,9 +204,9 @@ func testAccCheckPlaybackKeyPairDestroy(ctx context.Context) resource.TestCheckF input := &ivs.GetPlaybackKeyPairInput{ Arn: aws.String(rs.Primary.ID), } - _, err := conn.GetPlaybackKeyPairWithContext(ctx, input) + _, err := conn.GetPlaybackKeyPair(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil } return err @@ -217,7 +219,7 @@ func testAccCheckPlaybackKeyPairDestroy(ctx context.Context) resource.TestCheckF } } -func testAccCheckPlaybackKeyPairExists(ctx context.Context, name string, playbackkeypair *ivs.PlaybackKeyPair) resource.TestCheckFunc { +func testAccCheckPlaybackKeyPairExists(ctx context.Context, name string, playbackkeypair *awstypes.PlaybackKeyPair) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -228,7 +230,7 @@ func testAccCheckPlaybackKeyPairExists(ctx context.Context, name string, playbac return create.Error(names.IVS, create.ErrActionCheckingExistence, tfivs.ResNamePlaybackKeyPair, name, errors.New("not set")) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) resp, err := tfivs.FindPlaybackKeyPairByID(ctx, conn, rs.Primary.ID) if err != nil { @@ -242,10 +244,10 @@ func testAccCheckPlaybackKeyPairExists(ctx context.Context, name string, playbac } func testAccPlaybackKeyPairPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) input := &ivs.ListPlaybackKeyPairsInput{} - _, err := conn.ListPlaybackKeyPairsWithContext(ctx, input) + _, err := conn.ListPlaybackKeyPairs(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) @@ -256,9 +258,9 @@ func testAccPlaybackKeyPairPreCheck(ctx context.Context, t *testing.T) { } } -func testAccCheckPlaybackKeyPairRecreated(before, after *ivs.PlaybackKeyPair) resource.TestCheckFunc { +func testAccCheckPlaybackKeyPairRecreated(before, after *awstypes.PlaybackKeyPair) resource.TestCheckFunc { return func(s *terraform.State) error { - if before, after := aws.StringValue(before.Arn), aws.StringValue(after.Arn); before == after { + if before, after := aws.ToString(before.Arn), aws.ToString(after.Arn); before == after { return fmt.Errorf("Expected Playback Key Pair IDs to change, %s", before) } diff --git a/internal/service/ivs/recording_configuration.go b/internal/service/ivs/recording_configuration.go index eecb6b35e3e..cc09d4b2a7f 100644 --- a/internal/service/ivs/recording_configuration.go +++ b/internal/service/ivs/recording_configuration.go @@ -10,14 +10,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -103,7 +106,7 @@ func ResourceRecordingConfiguration() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.StringInSlice(ivs.RecordingMode_Values(), false), + ValidateFunc: enum.Validate[awstypes.RecordingMode](), }, "target_interval_seconds": { Type: schema.TypeInt, @@ -127,7 +130,7 @@ const ( func resourceRecordingConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) in := &ivs.CreateRecordingConfigurationInput{ DestinationConfiguration: expandDestinationConfiguration(d.Get("destination_configuration").([]interface{})), @@ -145,12 +148,12 @@ func resourceRecordingConfigurationCreate(ctx context.Context, d *schema.Resourc if v, ok := d.GetOk("thumbnail_configuration"); ok { in.ThumbnailConfiguration = expandThumbnailConfiguration(v.([]interface{})) - if aws.StringValue(in.ThumbnailConfiguration.RecordingMode) == ivs.RecordingModeDisabled && in.ThumbnailConfiguration.TargetIntervalSeconds != nil { + if aws.ToString(in.ThumbnailConfiguration.RecordingMode) == awstypes.RecordingModeDisabled && in.ThumbnailConfiguration.TargetIntervalSeconds != nil { return sdkdiag.AppendErrorf(diags, "thumbnail configuration target interval cannot be set if recording_mode is \"DISABLED\"") } } - out, err := conn.CreateRecordingConfigurationWithContext(ctx, in) + out, err := conn.CreateRecordingConfiguration(ctx, in) if err != nil { return create.AppendDiagError(diags, names.IVS, create.ErrActionCreating, ResNameRecordingConfiguration, d.Get(names.AttrName).(string), err) } @@ -159,7 +162,7 @@ func resourceRecordingConfigurationCreate(ctx context.Context, d *schema.Resourc return create.AppendDiagError(diags, names.IVS, create.ErrActionCreating, ResNameRecordingConfiguration, d.Get(names.AttrName).(string), errors.New("empty output")) } - d.SetId(aws.StringValue(out.RecordingConfiguration.Arn)) + d.SetId(aws.ToString(out.RecordingConfiguration.Arn)) if _, err := waitRecordingConfigurationCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return create.AppendDiagError(diags, names.IVS, create.ErrActionWaitingForCreation, ResNameRecordingConfiguration, d.Id(), err) @@ -171,7 +174,7 @@ func resourceRecordingConfigurationCreate(ctx context.Context, d *schema.Resourc func resourceRecordingConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) out, err := FindRecordingConfigurationByID(ctx, conn, d.Id()) @@ -205,15 +208,15 @@ func resourceRecordingConfigurationRead(ctx context.Context, d *schema.ResourceD func resourceRecordingConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) log.Printf("[INFO] Deleting IVS RecordingConfiguration %s", d.Id()) - _, err := conn.DeleteRecordingConfigurationWithContext(ctx, &ivs.DeleteRecordingConfigurationInput{ + _, err := conn.DeleteRecordingConfiguration(ctx, &ivs.DeleteRecordingConfigurationInput{ Arn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -228,7 +231,7 @@ func resourceRecordingConfigurationDelete(ctx context.Context, d *schema.Resourc return diags } -func flattenDestinationConfiguration(apiObject *ivs.DestinationConfiguration) []interface{} { +func flattenDestinationConfiguration(apiObject *awstypes.DestinationConfiguration) []interface{} { if apiObject == nil { return []interface{}{} } @@ -242,7 +245,7 @@ func flattenDestinationConfiguration(apiObject *ivs.DestinationConfiguration) [] return []interface{}{m} } -func flattenS3DestinationConfiguration(apiObject *ivs.S3DestinationConfiguration) []interface{} { +func flattenS3DestinationConfiguration(apiObject *awstypes.S3DestinationConfiguration) []interface{} { if apiObject == nil { return []interface{}{} } @@ -250,13 +253,13 @@ func flattenS3DestinationConfiguration(apiObject *ivs.S3DestinationConfiguration m := map[string]interface{}{} if v := apiObject.BucketName; v != nil { - m[names.AttrBucketName] = aws.StringValue(v) + m[names.AttrBucketName] = aws.ToString(v) } return []interface{}{m} } -func flattenThumbnailConfiguration(apiObject *ivs.ThumbnailConfiguration) []interface{} { +func flattenThumbnailConfiguration(apiObject *awstypes.ThumbnailConfiguration) []interface{} { if apiObject == nil { return []interface{}{} } @@ -264,22 +267,22 @@ func flattenThumbnailConfiguration(apiObject *ivs.ThumbnailConfiguration) []inte m := map[string]interface{}{} if v := apiObject.RecordingMode; v != nil { - m["recording_mode"] = aws.StringValue(v) + m["recording_mode"] = aws.ToString(v) } if v := apiObject.TargetIntervalSeconds; v != nil { - m["target_interval_seconds"] = aws.Int64Value(v) + m["target_interval_seconds"] = aws.ToInt64(v) } return []interface{}{m} } -func expandDestinationConfiguration(vSettings []interface{}) *ivs.DestinationConfiguration { +func expandDestinationConfiguration(vSettings []interface{}) *awstypes.DestinationConfiguration { if len(vSettings) == 0 || vSettings[0] == nil { return nil } tfMap := vSettings[0].(map[string]interface{}) - a := &ivs.DestinationConfiguration{} + a := &awstypes.DestinationConfiguration{} if v, ok := tfMap["s3"].([]interface{}); ok && len(v) > 0 { a.S3 = expandS3DestinationConfiguration(v) @@ -288,13 +291,13 @@ func expandDestinationConfiguration(vSettings []interface{}) *ivs.DestinationCon return a } -func expandS3DestinationConfiguration(vSettings []interface{}) *ivs.S3DestinationConfiguration { +func expandS3DestinationConfiguration(vSettings []interface{}) *awstypes.S3DestinationConfiguration { if len(vSettings) == 0 || vSettings[0] == nil { return nil } tfMap := vSettings[0].(map[string]interface{}) - a := &ivs.S3DestinationConfiguration{} + a := &awstypes.S3DestinationConfiguration{} if v, ok := tfMap[names.AttrBucketName].(string); ok && v != "" { a.BucketName = aws.String(v) @@ -303,15 +306,15 @@ func expandS3DestinationConfiguration(vSettings []interface{}) *ivs.S3Destinatio return a } -func expandThumbnailConfiguration(vSettings []interface{}) *ivs.ThumbnailConfiguration { +func expandThumbnailConfiguration(vSettings []interface{}) *awstypes.ThumbnailConfiguration { if len(vSettings) == 0 || vSettings[0] == nil { return nil } - a := &ivs.ThumbnailConfiguration{} + a := &awstypes.ThumbnailConfiguration{} tfMap := vSettings[0].(map[string]interface{}) if v, ok := tfMap["recording_mode"].(string); ok && v != "" { - a.RecordingMode = aws.String(v) + a.RecordingMode = awstypes.RecordingMode(v) } if v, ok := tfMap["target_interval_seconds"].(int); ok { diff --git a/internal/service/ivs/recording_configuration_test.go b/internal/service/ivs/recording_configuration_test.go index c024ae277c6..d383c717dc8 100644 --- a/internal/service/ivs/recording_configuration_test.go +++ b/internal/service/ivs/recording_configuration_test.go @@ -10,15 +10,17 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfivs "github.com/hashicorp/terraform-provider-aws/internal/service/ivs" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/names" @@ -26,14 +28,14 @@ import ( func TestAccIVSRecordingConfiguration_basic(t *testing.T) { ctx := acctest.Context(t) - var recordingConfiguration ivs.RecordingConfiguration + var recordingConfiguration awstypes.RecordingConfiguration bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ivs_recording_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccRecordingConfigurationPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -62,7 +64,7 @@ func TestAccIVSRecordingConfiguration_basic(t *testing.T) { func TestAccIVSRecordingConfiguration_update(t *testing.T) { ctx := acctest.Context(t) - var v1, v2 ivs.RecordingConfiguration + var v1, v2 awstypes.RecordingConfiguration rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -75,7 +77,7 @@ func TestAccIVSRecordingConfiguration_update(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccRecordingConfigurationPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -113,14 +115,14 @@ func TestAccIVSRecordingConfiguration_update(t *testing.T) { func TestAccIVSRecordingConfiguration_disappears(t *testing.T) { ctx := acctest.Context(t) - var recordingconfiguration ivs.RecordingConfiguration + var recordingconfiguration awstypes.RecordingConfiguration bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ivs_recording_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccRecordingConfigurationPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -141,7 +143,7 @@ func TestAccIVSRecordingConfiguration_disappears(t *testing.T) { func TestAccIVSRecordingConfiguration_disappears_S3Bucket(t *testing.T) { ctx := acctest.Context(t) - var recordingconfiguration ivs.RecordingConfiguration + var recordingconfiguration awstypes.RecordingConfiguration bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) parentResourceName := "aws_s3_bucket.test" resourceName := "aws_ivs_recording_configuration.test" @@ -149,7 +151,7 @@ func TestAccIVSRecordingConfiguration_disappears_S3Bucket(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccRecordingConfigurationPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -170,14 +172,14 @@ func TestAccIVSRecordingConfiguration_disappears_S3Bucket(t *testing.T) { func TestAccIVSRecordingConfiguration_tags(t *testing.T) { ctx := acctest.Context(t) - var recordingConfiguration ivs.RecordingConfiguration + var recordingConfiguration awstypes.RecordingConfiguration bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ivs_recording_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, ivs.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.IVSEndpointID) testAccRecordingConfigurationPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), @@ -220,7 +222,7 @@ func TestAccIVSRecordingConfiguration_tags(t *testing.T) { func testAccCheckRecordingConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_ivs_recording_configuration" { @@ -230,9 +232,9 @@ func testAccCheckRecordingConfigurationDestroy(ctx context.Context) resource.Tes input := &ivs.GetRecordingConfigurationInput{ Arn: aws.String(rs.Primary.ID), } - _, err := conn.GetRecordingConfigurationWithContext(ctx, input) + _, err := conn.GetRecordingConfiguration(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, ivs.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil } return err @@ -245,7 +247,7 @@ func testAccCheckRecordingConfigurationDestroy(ctx context.Context) resource.Tes } } -func testAccCheckRecordingConfigurationExists(ctx context.Context, name string, recordingconfiguration *ivs.RecordingConfiguration) resource.TestCheckFunc { +func testAccCheckRecordingConfigurationExists(ctx context.Context, name string, recordingconfiguration *awstypes.RecordingConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -256,7 +258,7 @@ func testAccCheckRecordingConfigurationExists(ctx context.Context, name string, return create.Error(names.IVS, create.ErrActionCheckingExistence, tfivs.ResNameRecordingConfiguration, name, errors.New("not set")) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) resp, err := tfivs.FindRecordingConfigurationByID(ctx, conn, rs.Primary.ID) @@ -270,9 +272,9 @@ func testAccCheckRecordingConfigurationExists(ctx context.Context, name string, } } -func testAccCheckRecordingConfigurationRecreated(before, after *ivs.RecordingConfiguration) resource.TestCheckFunc { +func testAccCheckRecordingConfigurationRecreated(before, after *awstypes.RecordingConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { - if before, after := aws.StringValue(before.Arn), aws.StringValue(after.Arn); before == after { + if before, after := aws.ToString(before.Arn), aws.ToString(after.Arn); before == after { return fmt.Errorf("Expected Recording Configuration IDs to change, %s", before) } @@ -281,10 +283,10 @@ func testAccCheckRecordingConfigurationRecreated(before, after *ivs.RecordingCon } func testAccRecordingConfigurationPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).IVSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IVSClient(ctx) input := &ivs.ListRecordingConfigurationsInput{} - _, err := conn.ListRecordingConfigurationsWithContext(ctx, input) + _, err := conn.ListRecordingConfigurations(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/ivs/status.go b/internal/service/ivs/status.go index 7e3b85b9650..252bb0da895 100644 --- a/internal/service/ivs/status.go +++ b/internal/service/ivs/status.go @@ -6,8 +6,9 @@ package ivs import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -18,7 +19,7 @@ const ( statusUpdated = "Updated" ) -func statusPlaybackKeyPair(ctx context.Context, conn *ivs.IVS, id string) retry.StateRefreshFunc { +func statusPlaybackKeyPair(ctx context.Context, conn *ivs.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { out, err := FindPlaybackKeyPairByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -33,7 +34,7 @@ func statusPlaybackKeyPair(ctx context.Context, conn *ivs.IVS, id string) retry. } } -func statusRecordingConfiguration(ctx context.Context, conn *ivs.IVS, id string) retry.StateRefreshFunc { +func statusRecordingConfiguration(ctx context.Context, conn *ivs.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { out, err := FindRecordingConfigurationByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -44,11 +45,11 @@ func statusRecordingConfiguration(ctx context.Context, conn *ivs.IVS, id string) return nil, "", err } - return out, aws.StringValue(out.State), nil + return out, aws.ToString(out.State), nil } } -func statusChannel(ctx context.Context, conn *ivs.IVS, arn string, updateDetails *ivs.UpdateChannelInput) retry.StateRefreshFunc { +func statusChannel(ctx context.Context, conn *ivs.Client, arn string, updateDetails *ivs.UpdateChannelInput) retry.StateRefreshFunc { return func() (interface{}, string, error) { out, err := FindChannelByID(ctx, conn, arn) if tfresource.NotFound(err) { @@ -62,11 +63,11 @@ func statusChannel(ctx context.Context, conn *ivs.IVS, arn string, updateDetails if updateDetails == nil { return out, statusNormal, nil } else { - if (updateDetails.Authorized != nil && aws.BoolValue(updateDetails.Authorized) == aws.BoolValue(out.Authorized)) || - (updateDetails.LatencyMode != nil && aws.StringValue(updateDetails.LatencyMode) == aws.StringValue(out.LatencyMode)) || - (updateDetails.Name != nil && aws.StringValue(updateDetails.Name) == aws.StringValue(out.Name)) || - (updateDetails.RecordingConfigurationArn != nil && aws.StringValue(updateDetails.RecordingConfigurationArn) == aws.StringValue(out.RecordingConfigurationArn)) || - (updateDetails.Type != nil && aws.StringValue(updateDetails.Type) == aws.StringValue(out.Type)) { + if (updateDetails.Authorized != nil && aws.ToBool(updateDetails.Authorized) == aws.ToBool(out.Authorized)) || + (updateDetails.LatencyMode != nil && aws.ToString(updateDetails.LatencyMode) == aws.ToString(out.LatencyMode)) || + (updateDetails.Name != nil && aws.ToString(updateDetails.Name) == aws.ToString(out.Name)) || + (updateDetails.RecordingConfigurationArn != nil && aws.ToString(updateDetails.RecordingConfigurationArn) == aws.ToString(out.RecordingConfigurationArn)) || + (updateDetails.Type != nil && aws.ToString(updateDetails.Type) == aws.ToString(out.Type)) { return out, statusUpdated, nil } return out, statusChangePending, nil diff --git a/internal/service/ivs/stream_key_data_source.go b/internal/service/ivs/stream_key_data_source.go index 57e79eb06c1..9728b8b9745 100644 --- a/internal/service/ivs/stream_key_data_source.go +++ b/internal/service/ivs/stream_key_data_source.go @@ -6,7 +6,7 @@ package ivs import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -44,7 +44,7 @@ const ( func dataSourceStreamKeyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IVSConn(ctx) + conn := meta.(*conns.AWSClient).IVSClient(ctx) channelArn := d.Get("channel_arn").(string) @@ -53,7 +53,7 @@ func dataSourceStreamKeyRead(ctx context.Context, d *schema.ResourceData, meta i return create.AppendDiagError(diags, names.IVS, create.ErrActionReading, DSNameStreamKey, channelArn, err) } - d.SetId(aws.StringValue(out.Arn)) + d.SetId(aws.ToString(out.Arn)) d.Set(names.AttrARN, out.Arn) d.Set("channel_arn", out.ChannelArn) diff --git a/internal/service/ivs/wait.go b/internal/service/ivs/wait.go index 4279c735999..92ceede4019 100644 --- a/internal/service/ivs/wait.go +++ b/internal/service/ivs/wait.go @@ -7,11 +7,12 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/service/ivs" + "github.com/aws/aws-sdk-go-v2/service/ivs" + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" ) -func waitPlaybackKeyPairCreated(ctx context.Context, conn *ivs.IVS, id string, timeout time.Duration) (*ivs.PlaybackKeyPair, error) { +func waitPlaybackKeyPairCreated(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.PlaybackKeyPair, error) { stateConf := &retry.StateChangeConf{ Pending: []string{}, Target: []string{statusNormal}, @@ -22,14 +23,14 @@ func waitPlaybackKeyPairCreated(ctx context.Context, conn *ivs.IVS, id string, t } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*ivs.PlaybackKeyPair); ok { + if out, ok := outputRaw.(*awstypes.PlaybackKeyPair); ok { return out, err } return nil, err } -func waitPlaybackKeyPairDeleted(ctx context.Context, conn *ivs.IVS, id string, timeout time.Duration) (*ivs.PlaybackKeyPair, error) { +func waitPlaybackKeyPairDeleted(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.PlaybackKeyPair, error) { stateConf := &retry.StateChangeConf{ Pending: []string{statusNormal}, Target: []string{}, @@ -38,17 +39,17 @@ func waitPlaybackKeyPairDeleted(ctx context.Context, conn *ivs.IVS, id string, t } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*ivs.PlaybackKeyPair); ok { + if out, ok := outputRaw.(*awstypes.PlaybackKeyPair); ok { return out, err } return nil, err } -func waitRecordingConfigurationCreated(ctx context.Context, conn *ivs.IVS, id string, timeout time.Duration) (*ivs.RecordingConfiguration, error) { +func waitRecordingConfigurationCreated(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.RecordingConfiguration, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{ivs.RecordingConfigurationStateCreating}, - Target: []string{ivs.RecordingConfigurationStateActive}, + Pending: []string{awstypes.RecordingConfigurationStateCreating}, + Target: []string{awstypes.RecordingConfigurationStateActive}, Refresh: statusRecordingConfiguration(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -56,30 +57,30 @@ func waitRecordingConfigurationCreated(ctx context.Context, conn *ivs.IVS, id st } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*ivs.RecordingConfiguration); ok { + if out, ok := outputRaw.(*awstypes.RecordingConfiguration); ok { return out, err } return nil, err } -func waitRecordingConfigurationDeleted(ctx context.Context, conn *ivs.IVS, id string, timeout time.Duration) (*ivs.RecordingConfiguration, error) { +func waitRecordingConfigurationDeleted(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.RecordingConfiguration, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{ivs.RecordingConfigurationStateActive}, + Pending: []string{awstypes.RecordingConfigurationStateActive}, Target: []string{}, Refresh: statusRecordingConfiguration(ctx, conn, id), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*ivs.RecordingConfiguration); ok { + if out, ok := outputRaw.(*awstypes.RecordingConfiguration); ok { return out, err } return nil, err } -func waitChannelCreated(ctx context.Context, conn *ivs.IVS, id string, timeout time.Duration) (*ivs.Channel, error) { +func waitChannelCreated(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.Channel, error) { stateConf := &retry.StateChangeConf{ Pending: []string{}, Target: []string{statusNormal}, @@ -90,14 +91,14 @@ func waitChannelCreated(ctx context.Context, conn *ivs.IVS, id string, timeout t } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*ivs.Channel); ok { + if out, ok := outputRaw.(*awstypes.Channel); ok { return out, err } return nil, err } -func waitChannelUpdated(ctx context.Context, conn *ivs.IVS, id string, timeout time.Duration, updateDetails *ivs.UpdateChannelInput) (*ivs.Channel, error) { +func waitChannelUpdated(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration, updateDetails *ivs.UpdateChannelInput) (*awstypes.Channel, error) { stateConf := &retry.StateChangeConf{ Pending: []string{statusChangePending}, Target: []string{statusUpdated}, @@ -108,14 +109,14 @@ func waitChannelUpdated(ctx context.Context, conn *ivs.IVS, id string, timeout t } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*ivs.Channel); ok { + if out, ok := outputRaw.(*awstypes.Channel); ok { return out, err } return nil, err } -func waitChannelDeleted(ctx context.Context, conn *ivs.IVS, id string, timeout time.Duration) (*ivs.Channel, error) { +func waitChannelDeleted(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.Channel, error) { stateConf := &retry.StateChangeConf{ Pending: []string{statusNormal}, Target: []string{}, @@ -124,7 +125,7 @@ func waitChannelDeleted(ctx context.Context, conn *ivs.IVS, id string, timeout t } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*ivs.Channel); ok { + if out, ok := outputRaw.(*awstypes.Channel); ok { return out, err } From 2ed3ec3b60b0672a4596a35adaee1db46d19e921 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:30:48 +0100 Subject: [PATCH 21/38] make gen --- internal/service/ivs/generate.go | 2 +- .../ivs/service_endpoint_resolver_gen.go | 66 ++++---- .../service/ivs/service_endpoints_gen_test.go | 146 +++++++++++++++--- internal/service/ivs/service_package_gen.go | 28 ++-- internal/service/ivs/tags_gen.go | 33 ++-- 5 files changed, 183 insertions(+), 92 deletions(-) diff --git a/internal/service/ivs/generate.go b/internal/service/ivs/generate.go index 5d9cc8ade7f..7807afc79bf 100644 --- a/internal/service/ivs/generate.go +++ b/internal/service/ivs/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -ListTags -ServiceTagsMap -SkipTypesImp -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/ivs/service_endpoint_resolver_gen.go b/internal/service/ivs/service_endpoint_resolver_gen.go index 3c733fde680..58495c307fe 100644 --- a/internal/service/ivs/service_endpoint_resolver_gen.go +++ b/internal/service/ivs/service_endpoint_resolver_gen.go @@ -6,65 +6,63 @@ import ( "context" "fmt" "net" - "net/url" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ivs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivs" + smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -var _ endpoints_sdkv1.Resolver = resolverSDKv1{} +var _ ivs_sdkv2.EndpointResolverV2 = resolverSDKv2{} -type resolverSDKv1 struct { - ctx context.Context +type resolverSDKv2 struct { + defaultResolver ivs_sdkv2.EndpointResolverV2 } -func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { - return resolverSDKv1{ - ctx: ctx, +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: ivs_sdkv2.NewDefaultEndpointResolverV2(), } } -func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { - ctx := r.ctx +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params ivs_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) - var opt endpoints_sdkv1.Options - opt.Set(opts...) - - useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) - defaultResolver := endpoints_sdkv1.DefaultResolver() + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } - if useFIPS { + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) - endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) if err != nil { return endpoint, err } tflog.Debug(ctx, "endpoint resolved", map[string]any{ - "tf_aws.endpoint": endpoint.URL, + "tf_aws.endpoint": endpoint.URI.String(), }) - var endpointURL *url.URL - endpointURL, err = url.Parse(endpoint.URL) - if err != nil { - return endpoint, err - } - - hostname := endpointURL.Hostname() + hostname := endpoint.URI.Hostname() _, err = net.LookupHost(hostname) if err != nil { if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ "tf_aws.hostname": hostname, }) - opts = append(opts, func(o *endpoints_sdkv1.Options) { - o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - }) + params.UseFIPS = aws_sdkv2.Bool(false) } else { - err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ivs endpoint %q: %s", hostname, err) return } } else { @@ -72,5 +70,13 @@ func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoin } } - return defaultResolver.EndpointFor(service, region, opts...) + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*ivs_sdkv2.Options) { + return func(o *ivs_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } } diff --git a/internal/service/ivs/service_endpoints_gen_test.go b/internal/service/ivs/service_endpoints_gen_test.go index 780ee904757..fb198412585 100644 --- a/internal/service/ivs/service_endpoints_gen_test.go +++ b/internal/service/ivs/service_endpoints_gen_test.go @@ -4,18 +4,22 @@ package ivs_test import ( "context" + "errors" "fmt" "maps" "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - ivs_sdkv1 "github.com/aws/aws-sdk-go/service/ivs" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + ivs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivs" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -240,54 +244,63 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := ivs_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(ivs_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), ivs_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func defaultFIPSEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := ivs_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(ivs_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), ivs_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.IVSConn(ctx) + client := meta.IVSClient(ctx) - req, _ := client.ListChannelsRequest(&ivs_sdkv1.ListChannelsInput{}) + var result apiCallParams - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListChannels(ctx, &ivs_sdkv2.ListChannelsInput{}, + func(opts *ivs_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -466,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/ivs/service_package_gen.go b/internal/service/ivs/service_package_gen.go index 04946040bcd..9af4b3e36fe 100644 --- a/internal/service/ivs/service_package_gen.go +++ b/internal/service/ivs/service_package_gen.go @@ -5,10 +5,8 @@ package ivs import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - ivs_sdkv1 "github.com/aws/aws-sdk-go/service/ivs" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ivs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivs" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -66,22 +64,14 @@ func (p *servicePackage) ServicePackageName() string { return names.IVS } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*ivs_sdkv1.IVS, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*ivs_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - } else { - cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) - } - - return ivs_sdkv1.New(sess.Copy(&cfg)), nil + return ivs_sdkv2.NewFromConfig(cfg, + ivs_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/ivs/tags_gen.go b/internal/service/ivs/tags_gen.go index 05b645c5056..3facea49fe5 100644 --- a/internal/service/ivs/tags_gen.go +++ b/internal/service/ivs/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ivs" - "github.com/aws/aws-sdk-go/service/ivs/ivsiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ivs" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists ivs service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn ivsiface.IVSAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *ivs.Client, identifier string, optFns ...func(*ivs.Options)) (tftags.KeyValueTags, error) { input := &ivs.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn ivsiface.IVSAPI, identifier string) (tft // ListTags lists ivs service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).IVSConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).IVSClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns ivs service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from ivs service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns ivs service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets ivs service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates ivs service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn ivsiface.IVSAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *ivs.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*ivs.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn ivsiface.IVSAPI, identifier string, ol if len(removedTags) > 0 { input := &ivs.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn ivsiface.IVSAPI, identifier string, ol Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn ivsiface.IVSAPI, identifier string, ol // UpdateTags updates ivs service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).IVSConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).IVSClient(ctx), identifier, oldTags, newTags) } From b0c8e9874323d2a06cb8af17d328d97424107ead Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:41:27 +0100 Subject: [PATCH 22/38] r/ivs_channel: Migrate to AWS SDK v2 --- internal/service/ivs/channel.go | 29 ++++++++++++++-------------- internal/service/ivs/channel_test.go | 1 - 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/internal/service/ivs/channel.go b/internal/service/ivs/channel.go index e490079ba86..4ae38278b67 100644 --- a/internal/service/ivs/channel.go +++ b/internal/service/ivs/channel.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -61,10 +60,10 @@ func ResourceChannel() *schema.Resource { Computed: true, }, "latency_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: enum.Validate[awstypes.ChannelLatencyMode](), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.ChannelLatencyMode](), }, names.AttrName: { Type: schema.TypeString, @@ -85,10 +84,10 @@ func ResourceChannel() *schema.Resource { names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrType: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: enum.Validate[awstypes.ChannelType](), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.ChannelType](), }, }, @@ -110,11 +109,11 @@ func resourceChannelCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("authorized"); ok { - in.Authorized = aws.Bool(v.(bool)) + in.Authorized = v.(bool) } if v, ok := d.GetOk("latency_mode"); ok { - in.LatencyMode = aws.String(v.(string)) + in.LatencyMode = awstypes.ChannelLatencyMode(v.(string)) } if v, ok := d.GetOk(names.AttrName); ok { @@ -126,7 +125,7 @@ func resourceChannelCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk(names.AttrType); ok { - in.Type = aws.String(v.(string)) + in.Type = awstypes.ChannelType(v.(string)) } out, err := conn.CreateChannel(ctx, in) @@ -189,12 +188,12 @@ func resourceChannelUpdate(ctx context.Context, d *schema.ResourceData, meta int } if d.HasChanges("authorized") { - in.Authorized = aws.Bool(d.Get("authorized").(bool)) + in.Authorized = d.Get("authorized").(bool) update = true } if d.HasChanges("latency_mode") { - in.LatencyMode = aws.String(d.Get("latency_mode").(string)) + in.LatencyMode = awstypes.ChannelLatencyMode(d.Get("latency_mode").(string)) update = true } @@ -209,7 +208,7 @@ func resourceChannelUpdate(ctx context.Context, d *schema.ResourceData, meta int } if d.HasChanges(names.AttrType) { - in.Type = aws.String(d.Get(names.AttrType).(string)) + in.Type = awstypes.ChannelType(d.Get(names.AttrType).(string)) update = true } diff --git a/internal/service/ivs/channel_test.go b/internal/service/ivs/channel_test.go index 2150e18ab58..dc3ca225ff3 100644 --- a/internal/service/ivs/channel_test.go +++ b/internal/service/ivs/channel_test.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" From 42b648c874701abe7f7f0c12460213f61a598e94 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:42:53 +0100 Subject: [PATCH 23/38] r/ivs_playback_key_pair: Migrate to AWS SDK v2 --- internal/service/ivs/playback_key_pair.go | 1 - internal/service/ivs/playback_key_pair_test.go | 1 - 2 files changed, 2 deletions(-) diff --git a/internal/service/ivs/playback_key_pair.go b/internal/service/ivs/playback_key_pair.go index 168d6696dc6..6d3e22b45ea 100644 --- a/internal/service/ivs/playback_key_pair.go +++ b/internal/service/ivs/playback_key_pair.go @@ -12,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" diff --git a/internal/service/ivs/playback_key_pair_test.go b/internal/service/ivs/playback_key_pair_test.go index 7f115ecbc1a..b4f6c31700e 100644 --- a/internal/service/ivs/playback_key_pair_test.go +++ b/internal/service/ivs/playback_key_pair_test.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" From 9b225e26f401efe6a8bd7150f670e0ea3f0828ea Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:44:25 +0100 Subject: [PATCH 24/38] r/ivs_recording_configuration: Migrate to AWS SDK v2 --- internal/service/ivs/recording_configuration.go | 17 +++++++---------- .../service/ivs/recording_configuration_test.go | 1 - 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/internal/service/ivs/recording_configuration.go b/internal/service/ivs/recording_configuration.go index cc09d4b2a7f..6cb4ec7755b 100644 --- a/internal/service/ivs/recording_configuration.go +++ b/internal/service/ivs/recording_configuration.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -103,10 +102,10 @@ func ResourceRecordingConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "recording_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: enum.Validate[awstypes.RecordingMode](), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.RecordingMode](), }, "target_interval_seconds": { Type: schema.TypeInt, @@ -142,13 +141,13 @@ func resourceRecordingConfigurationCreate(ctx context.Context, d *schema.Resourc } if v, ok := d.GetOk("recording_reconnect_window_seconds"); ok { - in.RecordingReconnectWindowSeconds = aws.Int64(int64(v.(int))) + in.RecordingReconnectWindowSeconds = int32(v.(int)) } if v, ok := d.GetOk("thumbnail_configuration"); ok { in.ThumbnailConfiguration = expandThumbnailConfiguration(v.([]interface{})) - if aws.ToString(in.ThumbnailConfiguration.RecordingMode) == awstypes.RecordingModeDisabled && in.ThumbnailConfiguration.TargetIntervalSeconds != nil { + if in.ThumbnailConfiguration.RecordingMode == awstypes.RecordingModeDisabled && in.ThumbnailConfiguration.TargetIntervalSeconds != nil { return sdkdiag.AppendErrorf(diags, "thumbnail configuration target interval cannot be set if recording_mode is \"DISABLED\"") } } @@ -266,9 +265,7 @@ func flattenThumbnailConfiguration(apiObject *awstypes.ThumbnailConfiguration) [ m := map[string]interface{}{} - if v := apiObject.RecordingMode; v != nil { - m["recording_mode"] = aws.ToString(v) - } + m["recording_mode"] = string(apiObject.RecordingMode) if v := apiObject.TargetIntervalSeconds; v != nil { m["target_interval_seconds"] = aws.ToInt64(v) diff --git a/internal/service/ivs/recording_configuration_test.go b/internal/service/ivs/recording_configuration_test.go index d383c717dc8..d55649af552 100644 --- a/internal/service/ivs/recording_configuration_test.go +++ b/internal/service/ivs/recording_configuration_test.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" From 91e9e870d55052a6fb4d110279ce3eaeeef19168 Mon Sep 17 00:00:00 2001 From: Matt Burgess <549318+mattburgess@users.noreply.github.com> Date: Thu, 18 Jul 2024 21:49:13 +0100 Subject: [PATCH 25/38] ivs: Misc fixes --- internal/service/ivs/find.go | 1 - internal/service/ivs/status.go | 9 ++++----- internal/service/ivs/wait.go | 7 ++++--- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/internal/service/ivs/find.go b/internal/service/ivs/find.go index 561c12fddbf..c7535fbea96 100644 --- a/internal/service/ivs/find.go +++ b/internal/service/ivs/find.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" - "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" diff --git a/internal/service/ivs/status.go b/internal/service/ivs/status.go index 252bb0da895..5ff9f63a3fc 100644 --- a/internal/service/ivs/status.go +++ b/internal/service/ivs/status.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" - awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -45,7 +44,7 @@ func statusRecordingConfiguration(ctx context.Context, conn *ivs.Client, id stri return nil, "", err } - return out, aws.ToString(out.State), nil + return out, string(out.State), nil } } @@ -63,11 +62,11 @@ func statusChannel(ctx context.Context, conn *ivs.Client, arn string, updateDeta if updateDetails == nil { return out, statusNormal, nil } else { - if (updateDetails.Authorized != nil && aws.ToBool(updateDetails.Authorized) == aws.ToBool(out.Authorized)) || - (updateDetails.LatencyMode != nil && aws.ToString(updateDetails.LatencyMode) == aws.ToString(out.LatencyMode)) || + if (updateDetails.Authorized == out.Authorized) || + (updateDetails.LatencyMode == out.LatencyMode) || (updateDetails.Name != nil && aws.ToString(updateDetails.Name) == aws.ToString(out.Name)) || (updateDetails.RecordingConfigurationArn != nil && aws.ToString(updateDetails.RecordingConfigurationArn) == aws.ToString(out.RecordingConfigurationArn)) || - (updateDetails.Type != nil && aws.ToString(updateDetails.Type) == aws.ToString(out.Type)) { + (updateDetails.Type == out.Type) { return out, statusUpdated, nil } return out, statusChangePending, nil diff --git a/internal/service/ivs/wait.go b/internal/service/ivs/wait.go index 92ceede4019..a7b2e5a26e0 100644 --- a/internal/service/ivs/wait.go +++ b/internal/service/ivs/wait.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ivs" awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" ) func waitPlaybackKeyPairCreated(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.PlaybackKeyPair, error) { @@ -48,8 +49,8 @@ func waitPlaybackKeyPairDeleted(ctx context.Context, conn *ivs.Client, id string func waitRecordingConfigurationCreated(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.RecordingConfiguration, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{awstypes.RecordingConfigurationStateCreating}, - Target: []string{awstypes.RecordingConfigurationStateActive}, + Pending: enum.Slice(awstypes.RecordingConfigurationStateCreating), + Target: enum.Slice(awstypes.RecordingConfigurationStateActive), Refresh: statusRecordingConfiguration(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -66,7 +67,7 @@ func waitRecordingConfigurationCreated(ctx context.Context, conn *ivs.Client, id func waitRecordingConfigurationDeleted(ctx context.Context, conn *ivs.Client, id string, timeout time.Duration) (*awstypes.RecordingConfiguration, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{awstypes.RecordingConfigurationStateActive}, + Pending: enum.Slice(awstypes.RecordingConfigurationStateActive), Target: []string{}, Refresh: statusRecordingConfiguration(ctx, conn, id), Timeout: timeout, From 41bff3c0616567e4c980601bccb588c9a618751f Mon Sep 17 00:00:00 2001 From: Justin Retzolk <44710313+justinretzolk@users.noreply.github.com> Date: Thu, 18 Jul 2024 16:48:02 -0500 Subject: [PATCH 26/38] Update issue_comment workflow to use new community check --- .github/workflows/issue_comment.yml | 55 ++++++++++++++--------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/.github/workflows/issue_comment.yml b/.github/workflows/issue_comment.yml index bd4b2609c57..aaefc1d6d60 100644 --- a/.github/workflows/issue_comment.yml +++ b/.github/workflows/issue_comment.yml @@ -1,35 +1,34 @@ -name: 'Process issue_comment Events' - +name: Process issue_comment Events on: issue_comment: types: [created] -jobs: - community_check: - name: 'Community Check' - uses: ./.github/workflows/community-check.yml - secrets: inherit - with: - username: ${{ github.event.comment.user.login }} +permissions: + contents: read + issues: write + pull-requests: write - automation_labeler: - name: 'Automation Labeler' - needs: community_check +jobs: + labeler: + name: Labeler runs-on: ubuntu-latest - # Since the only step in this job requires non-maintainer, skip the job entirely if that's not met. - if: needs.community_check.outputs.maintainer == 'false' - env: - # This is a ternary that sets the variable to the assigned user's login on assigned events, - # and otherwise sets it to the username of the pull request's author. For more information: - # https://docs.github.com/en/actions/learn-github-actions/expressions#example - # - # issue_comment events are triggered by comments on issues and pull requests. Checking the - # value of github.event.issue.pull_request tells us whether the issue is an issue or is - # actually a pull request, allowing us to dynamically set the gh subcommand: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#issue_comment-on-issues-only-or-pull-requests-only - COMMAND: ${{ github.event.issue.pull_request && 'pr' || 'issue' }} - GH_TOKEN: ${{ github.token }} + if: contains(github.event.issue.labels.*.name, 'stale') || contains(github.event.issue.labels.*.name, 'waiting-response') steps: - - name: 'Remove stale and waiting-response on non-maintainer comment' - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#issue_comment-on-issues-only-or-pull-requests-only - run: gh ${{ env.COMMAND }} edit ${{ github.event.issue.html_url }} --remove-label stale,waiting-response + - name: Checkout Community Check + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + sparse-checkout: .github/actions/community_check + + - name: Run Community Check + id: community_check + uses: ./.github/actions/community_check + with: + user_login: ${{ github.event.comment.user.login }} + maintainers: ${{ secrets.MAINTAINERS }} + + - name: Remove stale and waiting-response + if: steps.community_check.outputs.maintainer == 'false' + env: + COMMAND: ${{ github.event.issue.pull_request && 'pr' || 'issue' }} + GH_TOKEN: ${{ github.token }} + run: gh $COMMAND edit ${{ github.event.issue.html_url }} --remove-label stale,waiting-response From 5f9c5488dc18eb77ceb037799fca39c132ee46cc Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 18 Jul 2024 22:02:22 +0000 Subject: [PATCH 27/38] Update CHANGELOG.md for #38424 --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29ff6b616d1..ebb6018c465 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,12 +10,17 @@ FEATURES: * **New Resource:** `aws_grafana_workspace_service_account` ([#38101](https://github.com/hashicorp/terraform-provider-aws/issues/38101)) * **New Resource:** `aws_grafana_workspace_service_account_token` ([#38101](https://github.com/hashicorp/terraform-provider-aws/issues/38101)) * **New Resource:** `aws_rds_certificate` ([#35003](https://github.com/hashicorp/terraform-provider-aws/issues/35003)) +* **New Resource:** `aws_rekognition_stream_processor` ([#37536](https://github.com/hashicorp/terraform-provider-aws/issues/37536)) ENHANCEMENTS: * data-source/aws_elasticache_replication_group: Add `cluster_mode` attribute ([#38002](https://github.com/hashicorp/terraform-provider-aws/issues/38002)) * data-source/aws_lakeformation_data_lake_settings: Add `allow_full_table_external_data_access` attribute ([#34474](https://github.com/hashicorp/terraform-provider-aws/issues/34474)) * data-source/aws_msk_cluster: Add `broker_node_group_info` attribute ([#37705](https://github.com/hashicorp/terraform-provider-aws/issues/37705)) +* resource/aws_bedrockagent_agent : Add `skip_resource_in_use_check` argument ([#37586](https://github.com/hashicorp/terraform-provider-aws/issues/37586)) +* resource/aws_bedrockagent_agent_action_group: Add `action_group_executor.custom_control` argument ([#37484](https://github.com/hashicorp/terraform-provider-aws/issues/37484)) +* resource/aws_bedrockagent_agent_action_group: Add `function_schema` configuration block ([#37484](https://github.com/hashicorp/terraform-provider-aws/issues/37484)) +* resource/aws_bedrockagent_agent_alias : Add `routing_configuration.provisioned_throughput` argument ([#37520](https://github.com/hashicorp/terraform-provider-aws/issues/37520)) * resource/aws_codebuild_webhook: Add `scope_configuration` argument ([#38199](https://github.com/hashicorp/terraform-provider-aws/issues/38199)) * resource/aws_codepipeline: Add `timeout_in_minutes` argument to the `action` configuration block ([#36316](https://github.com/hashicorp/terraform-provider-aws/issues/36316)) * resource/aws_db_instance: Add `engine_lifecycle_support` argument ([#37708](https://github.com/hashicorp/terraform-provider-aws/issues/37708)) @@ -23,6 +28,10 @@ ENHANCEMENTS: * resource/aws_elasticache_replication_group: Add `cluster_mode` argument ([#38002](https://github.com/hashicorp/terraform-provider-aws/issues/38002)) * resource/aws_emrserverless_application: Add `interactive_configuration` argument ([#37889](https://github.com/hashicorp/terraform-provider-aws/issues/37889)) * resource/aws_fis_experiment_template: Add `experiment_options` configuration block ([#36900](https://github.com/hashicorp/terraform-provider-aws/issues/36900)) +* resource/aws_fsx_lustre_file_system: Add `final_backup_tags` and `skip_final_backup` arguments ([#37717](https://github.com/hashicorp/terraform-provider-aws/issues/37717)) +* resource/aws_fsx_ontap_volume: Add `final_backup_tags` argument ([#37717](https://github.com/hashicorp/terraform-provider-aws/issues/37717)) +* resource/aws_fsx_openzfs_file_system: Add `delete_options` and `final_backup_tags` arguments ([#37717](https://github.com/hashicorp/terraform-provider-aws/issues/37717)) +* resource/aws_fsx_windows_file_system: Add `final_backup_tags` argument ([#37717](https://github.com/hashicorp/terraform-provider-aws/issues/37717)) * resource/aws_imagebuilder_image_pipeline: Add `execution_role` and `workflow` arguments ([#37317](https://github.com/hashicorp/terraform-provider-aws/issues/37317)) * resource/aws_kinesis_firehose_delivery_stream: Add `secrets_manager_configuration` to `http_endpoint_configuration` ([#38245](https://github.com/hashicorp/terraform-provider-aws/issues/38245)) * resource/aws_kinesisanalyticsv2_application: Support `FLINK-1_19` as a valid value for `runtime_environment` ([#38350](https://github.com/hashicorp/terraform-provider-aws/issues/38350)) From ada73f155c3e811c18527b0ed349513510ea4c1e Mon Sep 17 00:00:00 2001 From: Anthony Wat Date: Thu, 18 Jul 2024 23:44:37 -0400 Subject: [PATCH 28/38] fix: Add missing security policy names for aws_transfer_server --- .changelog/38425.txt | 3 ++ internal/service/transfer/server.go | 18 +++++--- internal/service/transfer/server_test.go | 43 ++++++++++++++++++-- website/docs/r/transfer_server.html.markdown | 43 +++++++++++--------- 4 files changed, 78 insertions(+), 29 deletions(-) create mode 100644 .changelog/38425.txt diff --git a/.changelog/38425.txt b/.changelog/38425.txt new file mode 100644 index 00000000000..f74bd777515 --- /dev/null +++ b/.changelog/38425.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_transfer_server: Add supported values `TransferSecurityPolicy-FIPS-2024-05`, `TransferSecurityPolicy-Restricted-2018-11`, and `TransferSecurityPolicy-Restricted-2020-06` for the `security_policy_name` argument +``` \ No newline at end of file diff --git a/internal/service/transfer/server.go b/internal/service/transfer/server.go index 784c233d489..4ea52411d0d 100644 --- a/internal/service/transfer/server.go +++ b/internal/service/transfer/server.go @@ -1246,27 +1246,33 @@ type securityPolicyName string const ( securityPolicyName2018_11 securityPolicyName = "TransferSecurityPolicy-2018-11" securityPolicyName2020_06 securityPolicyName = "TransferSecurityPolicy-2020-06" - securityPolicyNameFIPS_2020_06 securityPolicyName = "TransferSecurityPolicy-FIPS-2020-06" - securityPolicyNameFIPS_2023_05 securityPolicyName = "TransferSecurityPolicy-FIPS-2023-05" - securityPolicyNameFIPS_2024_01 securityPolicyName = "TransferSecurityPolicy-FIPS-2024-01" securityPolicyName2022_03 securityPolicyName = "TransferSecurityPolicy-2022-03" securityPolicyName2023_05 securityPolicyName = "TransferSecurityPolicy-2023-05" securityPolicyName2024_01 securityPolicyName = "TransferSecurityPolicy-2024-01" + securityPolicyNameFIPS_2020_06 securityPolicyName = "TransferSecurityPolicy-FIPS-2020-06" + securityPolicyNameFIPS_2023_05 securityPolicyName = "TransferSecurityPolicy-FIPS-2023-05" + securityPolicyNameFIPS_2024_01 securityPolicyName = "TransferSecurityPolicy-FIPS-2024-01" + securityPolicyNameFIPS_2024_05 securityPolicyName = "TransferSecurityPolicy-FIPS-2024-05" securityPolicyNamePQ_SSH_2023_04 securityPolicyName = "TransferSecurityPolicy-PQ-SSH-Experimental-2023-04" securityPolicyNamePQ_SSH_FIPS_2023_04 securityPolicyName = "TransferSecurityPolicy-PQ-SSH-FIPS-Experimental-2023-04" + securityPolicyNameRestricted_2018_11 securityPolicyName = "TransferSecurityPolicy-Restricted-2018-11" + securityPolicyNameRestricted_2020_06 securityPolicyName = "TransferSecurityPolicy-Restricted-2020-06" ) func (securityPolicyName) Values() []securityPolicyName { return []securityPolicyName{ securityPolicyName2018_11, securityPolicyName2020_06, - securityPolicyNameFIPS_2020_06, - securityPolicyNameFIPS_2023_05, - securityPolicyNameFIPS_2024_01, securityPolicyName2022_03, securityPolicyName2023_05, securityPolicyName2024_01, + securityPolicyNameFIPS_2020_06, + securityPolicyNameFIPS_2023_05, + securityPolicyNameFIPS_2024_01, + securityPolicyNameFIPS_2024_05, securityPolicyNamePQ_SSH_2023_04, securityPolicyNamePQ_SSH_FIPS_2023_04, + securityPolicyNameRestricted_2018_11, + securityPolicyNameRestricted_2020_06, } } diff --git a/internal/service/transfer/server_test.go b/internal/service/transfer/server_test.go index 4977f0c2738..2b31ac56fea 100644 --- a/internal/service/transfer/server_test.go +++ b/internal/service/transfer/server_test.go @@ -258,6 +258,13 @@ func testAccServer_securityPolicy(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-2023-05"), ), }, + { + Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-2024-01"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-2024-01"), + ), + }, { Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-PQ-SSH-Experimental-2023-04"), Check: resource.ComposeTestCheckFunc( @@ -266,10 +273,17 @@ func testAccServer_securityPolicy(t *testing.T) { ), }, { - Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-2024-01"), + Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-Restricted-2018-11"), Check: resource.ComposeTestCheckFunc( testAccCheckServerExists(ctx, resourceName, &conf), - resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-2024-01"), + resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-Restricted-2018-11"), + ), + }, + { + Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-Restricted-2020-06"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-Restricted-2020-06"), ), }, }, @@ -289,10 +303,10 @@ func testAccServer_securityPolicyFIPS(t *testing.T) { CheckDestroy: testAccCheckServerDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-FIPS-2023-05"), + Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-FIPS-2020-06"), Check: resource.ComposeTestCheckFunc( testAccCheckServerExists(ctx, resourceName, &conf), - resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-FIPS-2023-05"), + resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-FIPS-2020-06"), ), }, { @@ -301,6 +315,13 @@ func testAccServer_securityPolicyFIPS(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{names.AttrForceDestroy}, }, + { + Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-FIPS-2023-05"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-FIPS-2023-05"), + ), + }, { Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-FIPS-2024-01"), Check: resource.ComposeTestCheckFunc( @@ -308,6 +329,20 @@ func testAccServer_securityPolicyFIPS(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-FIPS-2024-01"), ), }, + { + Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-FIPS-2024-05"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-FIPS-2024-05"), + ), + }, + { + Config: testAccServerConfig_securityPolicy(rName, "TransferSecurityPolicy-PQ-SSH-FIPS-Experimental-2023-04"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-PQ-SSH-FIPS-Experimental-2023-04"), + ), + }, }, }) } diff --git a/website/docs/r/transfer_server.html.markdown b/website/docs/r/transfer_server.html.markdown index 1b7631f363b..8e5287c8610 100644 --- a/website/docs/r/transfer_server.html.markdown +++ b/website/docs/r/transfer_server.html.markdown @@ -132,7 +132,7 @@ This resource supports the following arguments: * `SFTP`: File transfer over SSH * `FTPS`: File transfer with TLS encryption * `FTP`: Unencrypted file transfer -* `endpoint_details` - (Optional) The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. See [`endpoint_details` block](#endpoint_details-block) below for details. +* `endpoint_details` - (Optional) The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. See [`endpoint_details` Block](#endpoint_details-block) below for details. * `endpoint_type` - (Optional) The type of endpoint that you want your SFTP server connect to. If you connect to a `VPC` (or `VPC_ENDPOINT`), your SFTP server isn't accessible over the public internet. If you want to connect your SFTP server via public internet, set `PUBLIC`. Defaults to `PUBLIC`. * `invocation_role` - (Optional) Amazon Resource Name (ARN) of the IAM role used to authenticate the user account with an `identity_provider_type` of `API_GATEWAY`. * `host_key` - (Optional) RSA, ECDSA, or ED25519 private key (e.g., as generated by the `ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key`, `ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key` or `ssh-keygen -t ed25519 -N "" -f my-new-server-key` commands). @@ -145,24 +145,29 @@ This resource supports the following arguments: * `force_destroy` - (Optional) A boolean that indicates all users associated with the server should be deleted so that the Server can be destroyed without error. The default value is `false`. This option only applies to servers configured with a `SERVICE_MANAGED` `identity_provider_type`. * `post_authentication_login_banner`- (Optional) Specify a string to display when users connect to a server. This string is displayed after the user authenticates. The SFTP protocol does not support post-authentication display banners. * `pre_authentication_login_banner`- (Optional) Specify a string to display when users connect to a server. This string is displayed before the user authenticates. -* `protocol_details`- (Optional) The protocol settings that are configured for your server. See [`protocol_details` block](#protocol_details-block) below for details. -* `s3_storage_options`- (Optional) Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default. See [`s3_storage_options` block](#s3_storage_options-block) below for details. +* `protocol_details`- (Optional) The protocol settings that are configured for your server. See [`protocol_details` Block](#protocol_details-block) below for details. +* `s3_storage_options`- (Optional) Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default. See [`s3_storage_options` Block](#s3_storage_options-block) below for details. * `security_policy_name` - (Optional) Specifies the name of the security policy that is attached to the server. Default value is: `TransferSecurityPolicy-2018-11`. The available values are: - * `TransferSecurityPolicy-2024-01` - * `TransferSecurityPolicy-2023-05` - * `TransferSecurityPolicy-2022-03` - * `TransferSecurityPolicy-2020-06` * `TransferSecurityPolicy-2018-11` - * `TransferSecurityPolicy-FIPS-2024-01` - * `TransferSecurityPolicy-FIPS-2023-05` + * `TransferSecurityPolicy-2020-06` + * `TransferSecurityPolicy-2022-03` + * `TransferSecurityPolicy-2023-05` + * `TransferSecurityPolicy-2024-01` * `TransferSecurityPolicy-FIPS-2020-06` + * `TransferSecurityPolicy-FIPS-2023-05` + * `TransferSecurityPolicy-FIPS-2024-01` + * `TransferSecurityPolicy-FIPS-2024-05` * `TransferSecurityPolicy-PQ-SSH-Experimental-2023-04` * `TransferSecurityPolicy-PQ-SSH-FIPS-Experimental-2023-04` + * `TransferSecurityPolicy-Restricted-2018-11` + * `TransferSecurityPolicy-Restricted-2020-06` + + See [Security policies for AWS Transfer Family servers](https://docs.aws.amazon.com/transfer/latest/userguide/security-policies.html) for details. * `structured_log_destinations` - (Optional) A set of ARNs of destinations that will receive structured logs from the transfer server such as CloudWatch Log Group ARNs. If provided this enables the transfer server to emit structured logs to the specified locations. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `workflow_details` - (Optional) Specifies the workflow details. See [`workflow_details` block](#workflow_details-block) below for details. +* `workflow_details` - (Optional) Specifies the workflow details. See [`workflow_details` Block](#workflow_details-block) below for details. -### `endpoint_details` block +### `endpoint_details` Block The `endpoint_details` configuration block supports the following arguments: @@ -172,7 +177,7 @@ The `endpoint_details` configuration block supports the following arguments: * `vpc_endpoint_id` - (Optional) The ID of the VPC endpoint. This property can only be used when `endpoint_type` is set to `VPC_ENDPOINT` * `vpc_id` - (Optional) The VPC ID of the virtual private cloud in which the SFTP server's endpoint will be hosted. This property can only be used when `endpoint_type` is set to `VPC`. -### `protocol_details` block +### `protocol_details` Block THe `protocol_details` configuration block supports the following arguments: @@ -181,7 +186,7 @@ THe `protocol_details` configuration block supports the following arguments: * `set_stat_option` - (Optional) Use to ignore the error that is generated when the client attempts to use `SETSTAT` on a file you are uploading to an S3 bucket. Valid values: `DEFAULT`, `ENABLE_NO_OP`. * `tls_session_resumption_mode` - (Optional) A property used with Transfer Family servers that use the FTPS protocol. Provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. Valid values: `DISABLED`, `ENABLED`, `ENFORCED`. -### `s3_storage_options` block +### `s3_storage_options` Block The `s3_storage_options` configuration block supports the following arguments: @@ -189,21 +194,21 @@ The `s3_storage_options` configuration block supports the following arguments: By default, home directory mappings have a `TYPE` of `DIRECTORY`. If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` Type to `FILE` if you want a mapping to have a file target. See [Using logical directories to simplify your Transfer Family directory structures](https://docs.aws.amazon.com/transfer/latest/userguide/logical-dir-mappings.html) for details. -### `workflow_details` block +### `workflow_details` Block The `workflow_details` configuration block supports the following arguments: -* `on_upload` - (Optional) A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. See [`on_upload` block](#on_upload-block) below for details. -* `on_partial_upload` - (Optional) A trigger that starts a workflow if a file is only partially uploaded. See Workflow Detail below. See [`on_partial_upload` block](#on_partial_upload-block) below for details. +* `on_upload` - (Optional) A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. See [`on_upload` Block](#on_upload-block) below for details. +* `on_partial_upload` - (Optional) A trigger that starts a workflow if a file is only partially uploaded. See Workflow Detail below. See [`on_partial_upload` Block](#on_partial_upload-block) below for details. -#### `on_upload` block +#### `on_upload` Block The `on_upload` configuration block supports the following arguments: * `execution_role` - (Required) Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. * `workflow_id` - (Required) A unique identifier for the workflow. -#### `on_partial_upload` block +#### `on_partial_upload` Block The `on_partial_upload` configuration block supports the following arguments: @@ -222,7 +227,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Transfer Servers using the server `id`. For example: +In Terraform v1.5.0 and later, use an [`import` Block](https://developer.hashicorp.com/terraform/language/import) to import Transfer Servers using the server `id`. For example: ```terraform import { From 40ec186c3203e83b67f580592744462c64e5bee9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Jul 2024 06:43:39 +0000 Subject: [PATCH 29/38] build(deps): bump actions/setup-go from 5.0.1 to 5.0.2 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.1 to 5.0.2. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.1...0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependencies.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index 1aecd4caccd..c854f2a5eda 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -28,7 +28,7 @@ jobs: uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: .go-version From dcb29fd8dfa1b716aa7944b5de3ed363151b8142 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Fri, 19 Jul 2024 09:16:56 -0400 Subject: [PATCH 30/38] docs: use note admonition on naming standards (#38423) --- docs/naming.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/naming.md b/docs/naming.md index f8aecb9f3fb..e859a98fc28 100644 --- a/docs/naming.md +++ b/docs/naming.md @@ -82,7 +82,8 @@ A correct example is `accessanalyzer_analyzer.html.markdown`. An incorrect examp ## Mixed Caps -**NOTE:** Mixed Caps is different than camel case, Pascal case, or snake case! +!!! note + Mixed Caps is different than camel case, Pascal case, or snake case! Idiomatic Go uses [_Mixed Caps_](https://go.dev/wiki/CodeReviewComments#initialisms) for multiword names in code. Mixed caps is similar to camel case except **initialisms and abbreviations in mixed caps should be the correct, human-readable case**, such as `VPCEndpoint` not `VpcEndpoint`. After all, names in code _are for humans_. From b63cdbe0b30cdaf3591c672c9ab97eb33cd4c028 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Fri, 19 Jul 2024 09:21:41 -0400 Subject: [PATCH 31/38] chime: add voice connector sweeper (#38419) ```console % make sweep SWEEPARGS=-sweep-run=aws_chime_voice_connector WARNING: This will destroy infrastructure. Use only in development accounts. go1.22.5 test ./internal/sweep -v -sweep=us-west-2,us-east-1,us-east-2,us-west-1 -sweep-run=aws_chime_voice_connector -timeout 360m 2024/07/18 15:46:19 [DEBUG] Running Sweepers for region (us-west-2): 2024/07/18 15:46:21 Completed Sweepers for region (us-west-1) in 398.482166ms 2024/07/18 15:46:21 Sweeper Tests for region (us-west-1) ran successfully: 2024/07/18 15:46:21 - aws_chime_voice_connector ok github.com/hashicorp/terraform-provider-aws/internal/sweep 7.861s ``` --- internal/service/chime/sweep.go | 65 +++++++++++++++++++++++++++++ internal/sweep/register_gen_test.go | 2 + 2 files changed, 67 insertions(+) create mode 100644 internal/service/chime/sweep.go diff --git a/internal/service/chime/sweep.go b/internal/service/chime/sweep.go new file mode 100644 index 00000000000..c0d3c359d12 --- /dev/null +++ b/internal/service/chime/sweep.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package chime + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/chimesdkvoice" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/sweep" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/sdk" +) + +func RegisterSweepers() { + resource.AddTestSweepers("aws_chime_voice_connector", &resource.Sweeper{ + Name: "aws_chime_voice_connector", + F: sweepVoiceConnectors, + }) +} + +func sweepVoiceConnectors(region string) error { + ctx := sweep.Context(region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + + conn := client.ChimeSDKVoiceClient(ctx) + sweepResources := make([]sweep.Sweepable, 0) + in := &chimesdkvoice.ListVoiceConnectorsInput{} + + pages := chimesdkvoice.NewListVoiceConnectorsPaginator(conn, in) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Chime Voice Connector sweep for %s: %s", region, err) + return nil + } + if err != nil { + return fmt.Errorf("error retrieving Chime Voice Connectors: %w", err) + } + + for _, vc := range page.VoiceConnectors { + id := aws.ToString(vc.VoiceConnectorId) + + r := ResourceVoiceConnector() + d := r.Data(nil) + d.SetId(id) + + log.Printf("[INFO] Deleting Chime Voice Connector: %s", id) + sweepResources = append(sweepResources, sdk.NewSweepResource(r, d, client)) + } + } + + if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + return fmt.Errorf("error sweeping Chime Voice Connectors for %s: %w", region, err) + } + + return nil +} diff --git a/internal/sweep/register_gen_test.go b/internal/sweep/register_gen_test.go index 4a5f5fb18f9..44f4d7df938 100644 --- a/internal/sweep/register_gen_test.go +++ b/internal/sweep/register_gen_test.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/batch" "github.com/hashicorp/terraform-provider-aws/internal/service/bcmdataexports" "github.com/hashicorp/terraform-provider-aws/internal/service/budgets" + "github.com/hashicorp/terraform-provider-aws/internal/service/chime" "github.com/hashicorp/terraform-provider-aws/internal/service/cloud9" "github.com/hashicorp/terraform-provider-aws/internal/service/cloudformation" "github.com/hashicorp/terraform-provider-aws/internal/service/cloudfront" @@ -183,6 +184,7 @@ func registerSweepers() { batch.RegisterSweepers() bcmdataexports.RegisterSweepers() budgets.RegisterSweepers() + chime.RegisterSweepers() cloud9.RegisterSweepers() cloudformation.RegisterSweepers() cloudfront.RegisterSweepers() From 426cff39c1601a6e571cd7041b26d3c81b1a3ab3 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Fri, 19 Jul 2024 09:42:40 -0400 Subject: [PATCH 32/38] r/aws_chime_voice_connector_group: handle NotFound error on delete The delete operation calls an update when the `connector` argument is configured, which was not handling cases where the group did not exist (deleted out of band). This change prevents this error from propagating back to the user, and instead just removes the resource from state when the group cannot be found during the delete operation. --- .changelog/36774.txt | 3 +++ internal/service/chime/voice_connector_group.go | 8 +++++++- internal/service/chime/voice_connector_group_test.go | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 .changelog/36774.txt diff --git a/.changelog/36774.txt b/.changelog/36774.txt new file mode 100644 index 00000000000..44d53925f24 --- /dev/null +++ b/.changelog/36774.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_chime_voice_connector_group: Properly handle voice connector groups deleted out of band +``` diff --git a/internal/service/chime/voice_connector_group.go b/internal/service/chime/voice_connector_group.go index 42d80c6f73f..ef6f3042abd 100644 --- a/internal/service/chime/voice_connector_group.go +++ b/internal/service/chime/voice_connector_group.go @@ -97,7 +97,7 @@ func resourceVoiceConnectorGroupRead(ctx context.Context, d *schema.ResourceData resp, err = findVoiceConnectorGroupByID(ctx, conn, d.Id()) } - if !d.IsNewResource() && tfresource.NotFound(err) { + if !d.IsNewResource() && errs.IsA[*awstypes.NotFoundException](err) { log.Printf("[WARN] Chime Voice conector group %s not found", d.Id()) d.SetId("") return diags @@ -147,6 +147,12 @@ func resourceVoiceConnectorGroupDelete(ctx context.Context, d *schema.ResourceDa conn := meta.(*conns.AWSClient).ChimeSDKVoiceClient(ctx) if v, ok := d.GetOk("connector"); ok && v.(*schema.Set).Len() > 0 { + // Exit before attempting connector updates if the group does not exist + _, err := findVoiceConnectorGroupByID(ctx, conn, d.Id()) + if errs.IsA[*awstypes.NotFoundException](err) { + return diags + } + if err := resourceVoiceConnectorGroupUpdate(ctx, d, meta); err != nil { return err } diff --git a/internal/service/chime/voice_connector_group_test.go b/internal/service/chime/voice_connector_group_test.go index d2409f306a8..f844430b23f 100644 --- a/internal/service/chime/voice_connector_group_test.go +++ b/internal/service/chime/voice_connector_group_test.go @@ -14,8 +14,8 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfchime "github.com/hashicorp/terraform-provider-aws/internal/service/chime" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -196,7 +196,7 @@ func testAccCheckVoiceConnectorGroupDestroy(ctx context.Context) resource.TestCh return tfchime.FindVoiceConnectorGroupByID(ctx, conn, rs.Primary.ID) }) - if tfresource.NotFound(err) { + if errs.IsA[*awstypes.NotFoundException](err) { continue } From 1e07f51c48c1a45b399192fb1e4c1173c8c49c00 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 19 Jul 2024 14:52:28 +0000 Subject: [PATCH 33/38] Update CHANGELOG.md for #38426 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ebb6018c465..9f62f2af1b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ BUG FIXES: * data-source/aws_efs_access_point: Set `id` the the access point ID, not the file system ID. This fixes a regression introduced in [v5.58.0](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md#5580-july-11-2024) ([#38372](https://github.com/hashicorp/terraform-provider-aws/issues/38372)) * data-source/aws_lb_listener: Correctly set `default_action.target_group_arn` ([#37348](https://github.com/hashicorp/terraform-provider-aws/issues/37348)) +* resource/aws_chime_voice_connector_group: Properly handle voice connector groups deleted out of band ([#36774](https://github.com/hashicorp/terraform-provider-aws/issues/36774)) * resource/aws_codebuild_project: Fix unsetting `concurrent_build_limit` ([#37748](https://github.com/hashicorp/terraform-provider-aws/issues/37748)) * resource/aws_codepipeline: Mark `trigger` as Computed ([#36316](https://github.com/hashicorp/terraform-provider-aws/issues/36316)) * resource/aws_ecs_service: Change `volume_configuration.managed_ebs_volume.throughput` from `TypeString` to `TypeInt` ([#38109](https://github.com/hashicorp/terraform-provider-aws/issues/38109)) @@ -58,6 +59,7 @@ BUG FIXES: * resource/aws_lb_target_group: Use the configured `ip_address_type` value when `target_type` is `instance` ([#36423](https://github.com/hashicorp/terraform-provider-aws/issues/36423)) * resource/aws_lb_trust_store: Wait until trust store is `ACTIVE` on resource Create ([#38332](https://github.com/hashicorp/terraform-provider-aws/issues/38332)) * resource/aws_pinpoint_app: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panic when `campaign_hook` is empty (`{}`) ([#38323](https://github.com/hashicorp/terraform-provider-aws/issues/38323)) +* resource/aws_transfer_server: Add supported values `TransferSecurityPolicy-FIPS-2024-05`, `TransferSecurityPolicy-Restricted-2018-11`, and `TransferSecurityPolicy-Restricted-2020-06` for the `security_policy_name` argument ([#38425](https://github.com/hashicorp/terraform-provider-aws/issues/38425)) ## 5.58.0 (July 11, 2024) From cfb6dd8db742dc1ce8954aa27a9cc0ce33db5fa3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 19 Jul 2024 10:54:48 -0400 Subject: [PATCH 34/38] r/aws_emrcontainers_job_template: Reduce visibility. --- .../service/emrcontainers/exports_test.go | 11 +++ .../service/emrcontainers/job_template.go | 79 +++++++++---------- .../emrcontainers/service_package_gen.go | 2 +- internal/service/emrcontainers/sweep.go | 4 +- 4 files changed, 51 insertions(+), 45 deletions(-) create mode 100644 internal/service/emrcontainers/exports_test.go diff --git a/internal/service/emrcontainers/exports_test.go b/internal/service/emrcontainers/exports_test.go new file mode 100644 index 00000000000..be204ac71d3 --- /dev/null +++ b/internal/service/emrcontainers/exports_test.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package emrcontainers + +// Exports for use in tests only. +var ( + ResourceJobTemplate = resourceJobTemplate + + FindJobTemplateByID = findJobTemplateByID +) diff --git a/internal/service/emrcontainers/job_template.go b/internal/service/emrcontainers/job_template.go index 390060cded0..4ecc7c54c3f 100644 --- a/internal/service/emrcontainers/job_template.go +++ b/internal/service/emrcontainers/job_template.go @@ -30,7 +30,7 @@ import ( // @SDKResource("aws_emrcontainers_job_template", name="Job Template") // @Tags(identifierAttribute="arn") -func ResourceJobTemplate() *schema.Resource { +func resourceJobTemplate() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceJobTemplateCreate, ReadWithoutTimeout: resourceJobTemplateRead, @@ -261,7 +261,6 @@ func ResourceJobTemplate() *schema.Resource { func resourceJobTemplateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) name := d.Get(names.AttrName).(string) @@ -292,10 +291,9 @@ func resourceJobTemplateCreate(ctx context.Context, d *schema.ResourceData, meta func resourceJobTemplateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) - vc, err := FindJobTemplateByID(ctx, conn, d.Id()) + vc, err := findJobTemplateByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EMR Containers Job Template %s not found, removing from state", d.Id()) @@ -315,8 +313,8 @@ func resourceJobTemplateRead(ctx context.Context, d *schema.ResourceData, meta i } else { d.Set("job_template_data", nil) } - d.Set(names.AttrName, vc.Name) d.Set(names.AttrKMSKeyARN, vc.KmsKeyArn) + d.Set(names.AttrName, vc.Name) setTagsOut(ctx, vc.Tags) @@ -325,7 +323,6 @@ func resourceJobTemplateRead(ctx context.Context, d *schema.ResourceData, meta i func resourceJobTemplateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) log.Printf("[INFO] Deleting EMR Containers Job Template: %s", d.Id()) @@ -353,6 +350,41 @@ func resourceJobTemplateDelete(ctx context.Context, d *schema.ResourceData, meta return diags } +func findJobTemplate(ctx context.Context, conn *emrcontainers.Client, input *emrcontainers.DescribeJobTemplateInput) (*awstypes.JobTemplate, error) { + output, err := conn.DescribeJobTemplate(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.JobTemplate == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.JobTemplate, nil +} + +func findJobTemplateByID(ctx context.Context, conn *emrcontainers.Client, id string) (*awstypes.JobTemplate, error) { + input := &emrcontainers.DescribeJobTemplateInput{ + Id: aws.String(id), + } + + output, err := findJobTemplate(ctx, conn, input) + + if err != nil { + return nil, err + } + + return output, nil +} + func expandJobTemplateData(tfMap map[string]interface{}) *awstypes.JobTemplateData { if tfMap == nil { return nil @@ -739,38 +771,3 @@ func flattenSparkSubmitJobDriver(apiObject *awstypes.SparkSubmitJobDriver) map[s return tfMap } - -func findJobTemplate(ctx context.Context, conn *emrcontainers.Client, input *emrcontainers.DescribeJobTemplateInput) (*awstypes.JobTemplate, error) { - output, err := conn.DescribeJobTemplate(ctx, input) - - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.JobTemplate == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.JobTemplate, nil -} - -func FindJobTemplateByID(ctx context.Context, conn *emrcontainers.Client, id string) (*awstypes.JobTemplate, error) { - input := &emrcontainers.DescribeJobTemplateInput{ - Id: aws.String(id), - } - - output, err := findJobTemplate(ctx, conn, input) - - if err != nil { - return nil, err - } - - return output, nil -} diff --git a/internal/service/emrcontainers/service_package_gen.go b/internal/service/emrcontainers/service_package_gen.go index 500ca69d480..0dc085bbe5b 100644 --- a/internal/service/emrcontainers/service_package_gen.go +++ b/internal/service/emrcontainers/service_package_gen.go @@ -34,7 +34,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceJobTemplate, + Factory: resourceJobTemplate, TypeName: "aws_emrcontainers_job_template", Name: "Job Template", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/emrcontainers/sweep.go b/internal/service/emrcontainers/sweep.go index 548707ba16d..9548964889b 100644 --- a/internal/service/emrcontainers/sweep.go +++ b/internal/service/emrcontainers/sweep.go @@ -38,7 +38,6 @@ func sweepVirtualClusters(region string) error { sweepResources := make([]sweep.Sweepable, 0) pages := emrcontainers.NewListVirtualClustersPaginator(conn, input) - for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -84,7 +83,6 @@ func sweepJobTemplates(region string) error { sweepResources := make([]sweep.Sweepable, 0) pages := emrcontainers.NewListJobTemplatesPaginator(conn, input) - for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -98,7 +96,7 @@ func sweepJobTemplates(region string) error { } for _, v := range page.Templates { - r := ResourceJobTemplate() + r := resourceJobTemplate() d := r.Data(nil) d.SetId(aws.ToString(v.Id)) From 9eaa75411e60c222a1f6ce2a4278f71e10232288 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Jul 2024 11:00:54 -0400 Subject: [PATCH 35/38] build(deps): bump the aws-sdk-go group across 4 directories with 13 updates (#38427) Bumps the aws-sdk-go group with 11 updates in the / directory: | Package | From | To | | --- | --- | --- | | [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) | `1.54.19` | `1.54.20` | | [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) | `1.27.26` | `1.27.27` | | [github.com/aws/aws-sdk-go-v2/feature/s3/manager](https://github.com/aws/aws-sdk-go-v2) | `1.17.7` | `1.17.8` | | [github.com/aws/aws-sdk-go-v2/service/acmpca](https://github.com/aws/aws-sdk-go-v2) | `1.34.0` | `1.35.0` | | [github.com/aws/aws-sdk-go-v2/service/ec2](https://github.com/aws/aws-sdk-go-v2) | `1.170.0` | `1.171.0` | | [github.com/aws/aws-sdk-go-v2/service/firehose](https://github.com/aws/aws-sdk-go-v2) | `1.31.3` | `1.32.0` | | [github.com/aws/aws-sdk-go-v2/service/ivschat](https://github.com/aws/aws-sdk-go-v2) | `1.14.3` | `1.14.4` | | [github.com/aws/aws-sdk-go-v2/service/medialive](https://github.com/aws/aws-sdk-go-v2) | `1.54.3` | `1.55.0` | | [github.com/aws/aws-sdk-go-v2/service/networkfirewall](https://github.com/aws/aws-sdk-go-v2) | `1.40.3` | `1.40.4` | | [github.com/aws/aws-sdk-go-v2/service/rds](https://github.com/aws/aws-sdk-go-v2) | `1.81.4` | `1.81.5` | | [github.com/aws/aws-sdk-go-v2/service/secretsmanager](https://github.com/aws/aws-sdk-go-v2) | `1.32.3` | `1.32.4` | Bumps the aws-sdk-go group with 1 update in the /.ci/providerlint directory: [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go). Bumps the aws-sdk-go group with 1 update in the /.ci/tools directory: [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go). Bumps the aws-sdk-go group with 11 updates in the /tools/tfsdk2fw directory: | Package | From | To | | --- | --- | --- | | [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) | `1.54.19` | `1.54.20` | | [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) | `1.27.26` | `1.27.27` | | [github.com/aws/aws-sdk-go-v2/feature/s3/manager](https://github.com/aws/aws-sdk-go-v2) | `1.17.7` | `1.17.8` | | [github.com/aws/aws-sdk-go-v2/service/acmpca](https://github.com/aws/aws-sdk-go-v2) | `1.34.0` | `1.35.0` | | [github.com/aws/aws-sdk-go-v2/service/ec2](https://github.com/aws/aws-sdk-go-v2) | `1.170.0` | `1.171.0` | | [github.com/aws/aws-sdk-go-v2/service/firehose](https://github.com/aws/aws-sdk-go-v2) | `1.31.3` | `1.32.0` | | [github.com/aws/aws-sdk-go-v2/service/ivschat](https://github.com/aws/aws-sdk-go-v2) | `1.14.3` | `1.14.4` | | [github.com/aws/aws-sdk-go-v2/service/medialive](https://github.com/aws/aws-sdk-go-v2) | `1.54.3` | `1.55.0` | | [github.com/aws/aws-sdk-go-v2/service/networkfirewall](https://github.com/aws/aws-sdk-go-v2) | `1.40.3` | `1.40.4` | | [github.com/aws/aws-sdk-go-v2/service/rds](https://github.com/aws/aws-sdk-go-v2) | `1.81.4` | `1.81.5` | | [github.com/aws/aws-sdk-go-v2/service/secretsmanager](https://github.com/aws/aws-sdk-go-v2) | `1.32.3` | `1.32.4` | Updates `github.com/aws/aws-sdk-go` from 1.54.19 to 1.54.20 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.19...v1.54.20) Updates `github.com/aws/aws-sdk-go-v2/config` from 1.27.26 to 1.27.27 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.27.26...config/v1.27.27) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.26 to 1.17.27 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.26...credentials/v1.17.27) Updates `github.com/aws/aws-sdk-go-v2/feature/s3/manager` from 1.17.7 to 1.17.8 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/v1.17.8/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.17.7...v1.17.8) Updates `github.com/aws/aws-sdk-go-v2/service/acmpca` from 1.34.0 to 1.35.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/s3/v1.35.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.34.0...service/s3/v1.35.0) Updates `github.com/aws/aws-sdk-go-v2/service/ec2` from 1.170.0 to 1.171.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ec2/v1.170.0...service/ec2/v1.171.0) Updates `github.com/aws/aws-sdk-go-v2/service/firehose` from 1.31.3 to 1.32.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/s3/v1.32.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.31.3...service/s3/v1.32.0) Updates `github.com/aws/aws-sdk-go-v2/service/ivschat` from 1.14.3 to 1.14.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/pi/v1.14.4/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.14.3...service/pi/v1.14.4) Updates `github.com/aws/aws-sdk-go-v2/service/medialive` from 1.54.3 to 1.55.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.54.3...service/s3/v1.55.0) Updates `github.com/aws/aws-sdk-go-v2/service/networkfirewall` from 1.40.3 to 1.40.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/rds/v1.40.3...service/rds/v1.40.4) Updates `github.com/aws/aws-sdk-go-v2/service/rds` from 1.81.4 to 1.81.5 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/rds/v1.81.4...service/rds/v1.81.5) Updates `github.com/aws/aws-sdk-go-v2/service/secretsmanager` from 1.32.3 to 1.32.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/kms/v1.32.3...service/sqs/v1.32.4) Updates `github.com/aws/aws-sdk-go-v2/service/sso` from 1.22.3 to 1.22.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.22.3...service/pi/v1.22.4) Updates `github.com/aws/aws-sdk-go` from 1.54.19 to 1.54.20 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.19...v1.54.20) Updates `github.com/aws/aws-sdk-go` from 1.54.19 to 1.54.20 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.19...v1.54.20) Updates `github.com/aws/aws-sdk-go` from 1.54.19 to 1.54.20 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.19...v1.54.20) Updates `github.com/aws/aws-sdk-go-v2/config` from 1.27.26 to 1.27.27 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.27.26...config/v1.27.27) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.26 to 1.17.27 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.26...credentials/v1.17.27) Updates `github.com/aws/aws-sdk-go-v2/feature/s3/manager` from 1.17.7 to 1.17.8 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/v1.17.8/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.17.7...v1.17.8) Updates `github.com/aws/aws-sdk-go-v2/service/acmpca` from 1.34.0 to 1.35.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/s3/v1.35.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.34.0...service/s3/v1.35.0) Updates `github.com/aws/aws-sdk-go-v2/service/ec2` from 1.170.0 to 1.171.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ec2/v1.170.0...service/ec2/v1.171.0) Updates `github.com/aws/aws-sdk-go-v2/service/firehose` from 1.31.3 to 1.32.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/s3/v1.32.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.31.3...service/s3/v1.32.0) Updates `github.com/aws/aws-sdk-go-v2/service/ivschat` from 1.14.3 to 1.14.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/pi/v1.14.4/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.14.3...service/pi/v1.14.4) Updates `github.com/aws/aws-sdk-go-v2/service/medialive` from 1.54.3 to 1.55.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.54.3...service/s3/v1.55.0) Updates `github.com/aws/aws-sdk-go-v2/service/networkfirewall` from 1.40.3 to 1.40.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/rds/v1.40.3...service/rds/v1.40.4) Updates `github.com/aws/aws-sdk-go-v2/service/rds` from 1.81.4 to 1.81.5 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/rds/v1.81.4...service/rds/v1.81.5) Updates `github.com/aws/aws-sdk-go-v2/service/secretsmanager` from 1.32.3 to 1.32.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/kms/v1.32.3...service/sqs/v1.32.4) Updates `github.com/aws/aws-sdk-go-v2/service/sso` from 1.22.3 to 1.22.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.22.3...service/pi/v1.22.4) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/feature/s3/manager dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/acmpca dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/ec2 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/firehose dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/ivschat dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/medialive dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/networkfirewall dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/rds dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/secretsmanager dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/sso dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/feature/s3/manager dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/acmpca dependency-type: indirect update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/ec2 dependency-type: indirect update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/firehose dependency-type: indirect update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/ivschat dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/medialive dependency-type: indirect update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/networkfirewall dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/rds dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/secretsmanager dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/sso dependency-type: indirect update-type: version-update:semver-patch dependency-group: aws-sdk-go ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .ci/providerlint/go.mod | 2 +- .ci/providerlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 56 +++++++++++++++++-- .ci/providerlint/vendor/modules.txt | 2 +- .ci/tools/go.mod | 2 +- .ci/tools/go.sum | 4 +- go.mod | 26 ++++----- go.sum | 52 ++++++++--------- tools/tfsdk2fw/go.mod | 28 +++++----- tools/tfsdk2fw/go.sum | 56 ++++++++++--------- 10 files changed, 143 insertions(+), 89 deletions(-) diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index 59febe30a6d..72979bb0e66 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint go 1.22.5 require ( - github.com/aws/aws-sdk-go v1.54.19 + github.com/aws/aws-sdk-go v1.54.20 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 5b3676e7422..37f632f9531 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -9,8 +9,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.20 h1:FZ2UcXya7bUkvkpf7TaPmiL7EubK0go1nlXGLRwEsoo= +github.com/aws/aws-sdk-go v1.54.20/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 84dc7dc08e7..6f5247ffdb5 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -13068,6 +13068,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -22522,6 +22525,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -24479,6 +24485,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -33621,6 +33635,20 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "tax": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "tax.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "textract": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40969,20 +40997,40 @@ var awsusgovPartition = partition{ "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", }, }, }, diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index ffe99ec31c3..2cc103e9639 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -28,7 +28,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.54.19 +# github.com/aws/aws-sdk-go v1.54.20 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints diff --git a/.ci/tools/go.mod b/.ci/tools/go.mod index 1fd3c8d798c..a9a940b0c0b 100644 --- a/.ci/tools/go.mod +++ b/.ci/tools/go.mod @@ -51,7 +51,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect - github.com/aws/aws-sdk-go v1.54.19 // indirect + github.com/aws/aws-sdk-go v1.54.20 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect diff --git a/.ci/tools/go.sum b/.ci/tools/go.sum index 0020a79d899..fdc11e35b28 100644 --- a/.ci/tools/go.sum +++ b/.ci/tools/go.sum @@ -279,8 +279,8 @@ github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1 github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.20 h1:FZ2UcXya7bUkvkpf7TaPmiL7EubK0go1nlXGLRwEsoo= +github.com/aws/aws-sdk-go v1.54.20/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= diff --git a/go.mod b/go.mod index 8028b705f73..d86bb13f360 100644 --- a/go.mod +++ b/go.mod @@ -6,16 +6,16 @@ require ( github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton github.com/YakDriver/go-version v0.1.0 github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.54.19 + github.com/aws/aws-sdk-go v1.54.20 github.com/aws/aws-sdk-go-v2 v1.30.3 - github.com/aws/aws-sdk-go-v2/config v1.27.26 - github.com/aws/aws-sdk-go-v2/credentials v1.17.26 + github.com/aws/aws-sdk-go-v2/config v1.27.27 + github.com/aws/aws-sdk-go-v2/credentials v1.17.27 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.32.3 github.com/aws/aws-sdk-go-v2/service/account v1.19.3 github.com/aws/aws-sdk-go-v2/service/acm v1.28.4 - github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0 + github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0 github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3 github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.3 @@ -92,7 +92,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.11.3 github.com/aws/aws-sdk-go-v2/service/drs v1.28.3 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0 github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3 github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3 github.com/aws/aws-sdk-go-v2/service/ecs v1.44.3 @@ -107,7 +107,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3 - github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3 + github.com/aws/aws-sdk-go-v2/service/firehose v1.32.0 github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3 @@ -124,7 +124,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/iot v1.55.3 github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 - github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 + github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.4 github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 github.com/aws/aws-sdk-go-v2/service/kendra v1.52.3 @@ -140,14 +140,14 @@ require ( github.com/aws/aws-sdk-go-v2/service/m2 v1.15.3 github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0 github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3 - github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3 + github.com/aws/aws-sdk-go-v2/service/medialive v1.55.0 github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3 github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3 github.com/aws/aws-sdk-go-v2/service/mediastore v1.22.3 github.com/aws/aws-sdk-go-v2/service/mq v1.25.3 github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4 github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3 - github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3 + github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.4 github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3 github.com/aws/aws-sdk-go-v2/service/oam v1.13.3 github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.13.3 @@ -162,7 +162,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/qldb v1.23.3 github.com/aws/aws-sdk-go-v2/service/ram v1.27.3 github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3 - github.com/aws/aws-sdk-go-v2/service/rds v1.81.4 + github.com/aws/aws-sdk-go-v2/service/rds v1.81.5 github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4 github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3 github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.20.3 @@ -179,7 +179,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3control v1.46.3 github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3 github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3 - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3 github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3 github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.22.3 @@ -196,7 +196,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.24.3 github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3 github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3 - github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 + github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3 github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 github.com/aws/aws-sdk-go-v2/service/swf v1.25.3 diff --git a/go.sum b/go.sum index 398244426a4..fe837faff37 100644 --- a/go.sum +++ b/go.sum @@ -22,20 +22,20 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.20 h1:FZ2UcXya7bUkvkpf7TaPmiL7EubK0go1nlXGLRwEsoo= +github.com/aws/aws-sdk-go v1.54.20/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= -github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= -github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= -github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= +github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90= +github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 h1:kNemAUX+bJFBSfPkGVZ8HFOKIadjLoI2Ua1ZKivhGSo= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7/go.mod h1:71S2C1g/Zjn+ANmyoOqJ586OrPF9uC9iiHt9ZAT+MOw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 h1:u1KOU1S15ufyZqmH/rA3POkiRH6EcDANHj2xHRzq+zc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8/go.mod h1:WPv2FRnkIOoDv/8j2gSUsI4qDc7392w5anFB/I89GZ8= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= @@ -50,8 +50,8 @@ github.com/aws/aws-sdk-go-v2/service/account v1.19.3 h1:w/ZZ69+nzIYoussDQvIqyezI github.com/aws/aws-sdk-go-v2/service/account v1.19.3/go.mod h1:s7hT4ZWjp8GoSr0z8d5ZsJ8k+C2g4AsknLtmQaJgp0c= github.com/aws/aws-sdk-go-v2/service/acm v1.28.4 h1:wiW1Y6/1lysA0eJZRq0I53YYKuV9MNAzL15z2eZRlEE= github.com/aws/aws-sdk-go-v2/service/acm v1.28.4/go.mod h1:bzjymHHRhexkSMIvUHMpKydo9U82bmqQ5ru0IzYM8m8= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0 h1:CCaeK/FqBo/fmhSSqY0K8buep/ELBDEWc8IoOjf2piM= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0/go.mod h1:vDUysl9ROGF6GAsl1OgTg6xHDnw391hCc5+IYg2U/GQ= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0 h1:GZ7eaCsYZar0pOQPzBJeP8ImFEzDpPFbJ52JCiF9HQ4= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0/go.mod h1:vDUysl9ROGF6GAsl1OgTg6xHDnw391hCc5+IYg2U/GQ= github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 h1:o1cMErMp45oKZ2ScvBOdVXYhvu6FdUcz0Xn+JpDd408= github.com/aws/aws-sdk-go-v2/service/amp v1.27.3/go.mod h1:TuSBSV1IedYHHrC4A3bW84WjQXNSzc6XasgvuDRDb4E= github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3 h1:Plmg9O8/Pt4SKvPtUfSqCfv+SSSllouzlISFcvHK4bM= @@ -204,8 +204,8 @@ github.com/aws/aws-sdk-go-v2/service/drs v1.28.3 h1:ss4Ib/kWbYA4pveQtSOluDE/Kf0e github.com/aws/aws-sdk-go-v2/service/drs v1.28.3/go.mod h1:tjzPl3EOCkojHm9Q4y+Kuq7GGSJJw/P0UIqc4eHvtFI= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 h1:nEhZKd1JQ4EB1tekcqW1oIVpDC1ZFrjrp/cLC5MXjFQ= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3/go.mod h1:q9vzW3Xr1KEXa8n4waHiFt1PrppNDlMymlYP+xpsFbY= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0 h1:zPwhEYn3Y83mnnr9QG+i6NTiAbVbcJe6RpCSJKHIQNE= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0/go.mod h1:9KdiRVKTZyPRTlbX3i41FxTV+5OatZ7xOJCN4lleX7g= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0 h1:r398oizT1O8AdQGpnxOMOIstEAAb3PPW5QZsL8w4Ujc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0/go.mod h1:9KdiRVKTZyPRTlbX3i41FxTV+5OatZ7xOJCN4lleX7g= github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3 h1:+v2hv29pWaVDASIScHuUhDC93nqJGVlGf6cujrJMHZE= github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3/go.mod h1:RhaP7Wil0+uuuhiE4FzOOEFZwkmFAk1ZflXzK+O3ptU= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3 h1:n2eqzO9VabUkd77b88Hos6OEtbGohB/TRrtXLTZi38Y= @@ -234,8 +234,8 @@ github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 h1:bAuNjv1PmyZvjojnXlozw6 github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3/go.mod h1:EtC1+tObvVB/l/c9Dh6IILA/r/cu9Pc17S870zRihq4= github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3 h1:Y8VS/XHyeJ1cxSCtmvUOFLqfNIl9rASWOE/gsrydGFw= github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3/go.mod h1:xbE7o+ADq+h0DeKA/05618ox75wY/jtoZTF9XuvSvnI= -github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3 h1:BMYs3DZYSIaIDhkPSsAUeobQ7Z0ipNRJSiFTP2C4RWE= -github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3/go.mod h1:8rN4JsVXcCHl/f4hwOWVuy+iQ5iolXOdSX+QFYZyubw= +github.com/aws/aws-sdk-go-v2/service/firehose v1.32.0 h1:1ovnU04ZuvpaqJUGmqrcwJ9xZViHmdJpZQ0NUqMT5co= +github.com/aws/aws-sdk-go-v2/service/firehose v1.32.0/go.mod h1:8rN4JsVXcCHl/f4hwOWVuy+iQ5iolXOdSX+QFYZyubw= github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 h1:NwddG0xUTBM2zoq4D8rotQmT2Z/S8IGM+D2wYzKFSQs= github.com/aws/aws-sdk-go-v2/service/fis v1.26.3/go.mod h1:QmdVf0N/vrhckZLHK4x+f+u9EUuMhetsRgu1rjU1eL0= github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 h1:QeYAz3JhpkTxkS+fifDBfmgWFdSRBI21MQzN2bCO1xo= @@ -278,8 +278,8 @@ github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 h1:SEt8SRvlGvnOkqDV5PJ github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3/go.mod h1:XDi19IK0UluaSVnm1mu2AakZKHtWjg6gksitvH7+LQw= github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 h1:9Lao6kmD9P+yywuIn9I8hrraJ2jHIztU/GJspIxn6lA= github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3/go.mod h1:V2BDVrnP+Tn+MM1xxFI7Qcb+YPhiGgY5PUoKzrKHaCQ= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 h1:d7y5Gs9BfO+1Jhj8y1/lZhegiJXXy/DlanzwRgYrkXM= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3/go.mod h1:rtw6VOH+4X/TWoOKQlOC+oq/WBDJD4BqaPi930II6Mk= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.4 h1:isEOoNaq94Wh+x1tt8ScUeV+oQUH2I+5zjCmqCkUa/g= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.4/go.mod h1:rtw6VOH+4X/TWoOKQlOC+oq/WBDJD4BqaPi930II6Mk= github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 h1:MUx27PrqicGxgsiDWo7xv/Zsl4b0X8kHCRvMpX7XrQs= github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3/go.mod h1:mBWO7tOHjEvfZ88cUBhCfViO9vclCumFcTeiR1cB4IA= github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 h1:jJyh5SN/b78UZjIsVqM8/N5GQsD12sEvM2g5bVsFVhg= @@ -310,8 +310,8 @@ github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0 h1:Y79CoATONI7M7deTCC5 github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0/go.mod h1:6cpEF3W3oCNX9shBj9N3lrehYdxLuzDbYZdhOiaoN94= github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3 h1:1ls4o+377rEfTuZ4YaqDrSo75qpC1ySv8m2FfVk23tw= github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3/go.mod h1:JAiHALb6LfTclPNBdUUTL8xmDZcwBCTbSVgJEkgiIv4= -github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3 h1:9aDpYGrfgFjfvzOdAfMcEdGbWa3l/1RjGtOr4On9Kd4= -github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3/go.mod h1:49kVyWdlOWpusFyzDrmxCG9PqXlKtpKmHYoTv5h1O5k= +github.com/aws/aws-sdk-go-v2/service/medialive v1.55.0 h1:H1cqEGQ5rFpTPZawnOH8txXm2+KksldmlaIrAGBifOc= +github.com/aws/aws-sdk-go-v2/service/medialive v1.55.0/go.mod h1:49kVyWdlOWpusFyzDrmxCG9PqXlKtpKmHYoTv5h1O5k= github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3 h1:fBtklFkqk6QhJBzSBgNJiwWySt1RvspmvCvY+giXgdI= github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3/go.mod h1:BejXbLdRRWr6uMl4wZrz3iAcJDVgJu3EEstqDq8wxEE= github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3 h1:ytQ77lC/wrYatbiLSZlYSpgjzvtgXBey0xxRsBA4swY= @@ -324,8 +324,8 @@ github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4 h1:lptYTP7Br5zll9USf2aKY1ZlN69 github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4/go.mod h1:mtgvj3nNI+LiRNT07JaHbTh6E/y8QRrClvd+/GMhMS4= github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3 h1:XEbvRhi+ELazJaqh8k0KgTZrAgXM3rmR0hsGPTIpUIo= github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3/go.mod h1:tfCOS8E/SwIkqHHGgpwRZTly3ZQxcsORZPEVBKMkbx4= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3 h1:hb3i/o9ouQj6RZjykyGI1koOfp22/ZMuWpuPfeu+zNE= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3/go.mod h1:Y9mINPJv+o9q8Ztr5/PRh2C1Iynik64IhPzwe2ERGqQ= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.4 h1:NxceAAooi4+0/rIKXKqFUOVoT1rrVZKo6sLFuyITWPs= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.4/go.mod h1:Y9mINPJv+o9q8Ztr5/PRh2C1Iynik64IhPzwe2ERGqQ= github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3 h1:I+m+rITTdVA9BNJeuCzYgMQjqbUE10xcY0OqgBvFEFE= github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3/go.mod h1:R+4X5haYg3eRWYb99y+m1UhlVjFrHNlcfl3WES5e1oQ= github.com/aws/aws-sdk-go-v2/service/oam v1.13.3 h1:KCbGN36Q/qQ27mv+/4BSax0q6/KSAxh3K3R+gRhNHwg= @@ -354,8 +354,8 @@ github.com/aws/aws-sdk-go-v2/service/ram v1.27.3 h1:MoQ0up3IiE2fl0+qySx3Lb0swK6G github.com/aws/aws-sdk-go-v2/service/ram v1.27.3/go.mod h1:XymSCzlSx2QjdvU/KdV/+niPQBZRC1A8luPDFz3pjyg= github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3 h1:5Izo7ZI9zrvG9VLpJdnDl97gNyCFr310RtriuKIJgFk= github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3/go.mod h1:GlAG1tgrchQnNlO/fxXLmmF6t+v+9fQMNHNdW7Zc8Zc= -github.com/aws/aws-sdk-go-v2/service/rds v1.81.4 h1:tBtjOMKyEWLvsO6HaX6A+0A0V1gKcU2aSZKQXw6MSCM= -github.com/aws/aws-sdk-go-v2/service/rds v1.81.4/go.mod h1:j27FNXhbbHXC3ExFsJkoxq2Y+4dQypf8KFX1IkgwVvM= +github.com/aws/aws-sdk-go-v2/service/rds v1.81.5 h1:0vEV6OFcCInf/G98MIwwNJM21cd0g+8/jcxXNE40pJA= +github.com/aws/aws-sdk-go-v2/service/rds v1.81.5/go.mod h1:j27FNXhbbHXC3ExFsJkoxq2Y+4dQypf8KFX1IkgwVvM= github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4 h1:wNBruTRRDfBv2Pz3Mvw6JIJS7ujfTd1ztCG5pIlrfRk= github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4/go.mod h1:AhuwOvTE4nMwWfJQNZ2khZGV9yXexB2MjNYtCuLQA4s= github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3 h1:rtX1ZHGPpqbQGZlPuN1u7nA+0zjq0DB7QTVNlYY/gfw= @@ -388,8 +388,8 @@ github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3 h1:gmpU7E0ntMzXr+yQQIXbii github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3/go.mod h1:jnQp5kPPvEgPmVPm0h/XZPmlx7DQ0pqUiISRO4s6U3s= github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3 h1:ZJW2OQNpkR8P7URtISmF8twpvz2V0tUN/OgMenlxkao= github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3/go.mod h1:QcRvTKZ9cBv6TlZECUStXI1z1qlCMWKpPi/ZefknVpQ= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3 h1:ilavrucVBQHYnMjD2KmZQDCU1fuluQb0l9zRigGNVEc= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3/go.mod h1:TKKN7IQoM7uTnyuFm9bm9cw5P//ZYTl4m3htBWQ1G/c= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4 h1:NgRFYyFpiMD62y4VPXh4DosPFbZd4vdMVBWKk0VmWXc= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4/go.mod h1:TKKN7IQoM7uTnyuFm9bm9cw5P//ZYTl4m3htBWQ1G/c= github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3 h1:tFzkGJZKDWgwGDSQXwxZK7Bm3NzlKOW6KwNr14xXZqc= github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3/go.mod h1:MfWlz2hEZ2O0XdyBBJNtF6qUZwpHtvc892BU7gludBw= github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3 h1:7isk2tSNmVbm2f8epPfokkHjjWfwS46IpNNmI+rarUo= @@ -422,8 +422,8 @@ github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3 h1:IXODiFsgKoyW7QVWWHo github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3/go.mod h1:JvtI6itHlTxyGew0oT7xYNbF7OA767givRMsCuBFK5k= github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3 h1:vBcoorWl+c4r5un837H8fhLoS0Kc8SKlGBHpyq7KM9w= github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3/go.mod h1:Mq0FruBai8A9f7fpzjcfD+S+y0I4DkZTygb3HxuqDB4= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3 h1:pBE7FzR3AUpauidRUITPlDWTQ4hHktI649xZt3e/wKM= github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3/go.mod h1:EyoPT+dUT5zqspxSub9KHDWOZyIP30bPgIavBvGGVz0= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= diff --git a/tools/tfsdk2fw/go.mod b/tools/tfsdk2fw/go.mod index 65b12ffb4a1..797605f08d9 100644 --- a/tools/tfsdk2fw/go.mod +++ b/tools/tfsdk2fw/go.mod @@ -18,13 +18,13 @@ require ( github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go v1.54.19 // indirect + github.com/aws/aws-sdk-go v1.54.20 // indirect github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.26 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.26 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.27 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect @@ -32,7 +32,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.32.3 // indirect github.com/aws/aws-sdk-go-v2/service/account v1.19.3 // indirect github.com/aws/aws-sdk-go-v2/service/acm v1.28.4 // indirect - github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0 // indirect + github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0 // indirect github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 // indirect github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3 // indirect github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.3 // indirect @@ -58,6 +58,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3 // indirect github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/service/chime v1.32.3 // indirect github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3 // indirect github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.17.3 // indirect github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.14.3 // indirect @@ -95,6 +96,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3 // indirect github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3 // indirect github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/datapipeline v1.23.3 // indirect github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3 // indirect github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2 // indirect github.com/aws/aws-sdk-go-v2/service/dax v1.21.3 // indirect @@ -107,7 +109,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.11.3 // indirect github.com/aws/aws-sdk-go-v2/service/drs v1.28.3 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3 // indirect github.com/aws/aws-sdk-go-v2/service/ecs v1.44.3 // indirect @@ -122,7 +124,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 // indirect github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 // indirect github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3 // indirect - github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3 // indirect + github.com/aws/aws-sdk-go-v2/service/firehose v1.32.0 // indirect github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 // indirect github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 // indirect github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3 // indirect @@ -144,7 +146,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/iot v1.55.3 // indirect github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 // indirect github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.4 // indirect github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 // indirect github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 // indirect github.com/aws/aws-sdk-go-v2/service/kendra v1.52.3 // indirect @@ -160,14 +162,14 @@ require ( github.com/aws/aws-sdk-go-v2/service/m2 v1.15.3 // indirect github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0 // indirect github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3 // indirect - github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3 // indirect + github.com/aws/aws-sdk-go-v2/service/medialive v1.55.0 // indirect github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3 // indirect github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3 // indirect github.com/aws/aws-sdk-go-v2/service/mediastore v1.22.3 // indirect github.com/aws/aws-sdk-go-v2/service/mq v1.25.3 // indirect github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4 // indirect github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3 // indirect - github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.4 // indirect github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3 // indirect github.com/aws/aws-sdk-go-v2/service/oam v1.13.3 // indirect github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.13.3 // indirect @@ -182,7 +184,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/qldb v1.23.3 // indirect github.com/aws/aws-sdk-go-v2/service/ram v1.27.3 // indirect github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/service/rds v1.81.4 // indirect + github.com/aws/aws-sdk-go-v2/service/rds v1.81.5 // indirect github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4 // indirect github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3 // indirect github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.20.3 // indirect @@ -199,7 +201,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3control v1.46.3 // indirect github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3 // indirect github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3 // indirect - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3 // indirect + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4 // indirect github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3 // indirect github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3 // indirect github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.22.3 // indirect @@ -216,7 +218,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.24.3 // indirect github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3 // indirect github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect diff --git a/tools/tfsdk2fw/go.sum b/tools/tfsdk2fw/go.sum index 15537020e1b..8d47b0c2ed6 100644 --- a/tools/tfsdk2fw/go.sum +++ b/tools/tfsdk2fw/go.sum @@ -22,20 +22,20 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.20 h1:FZ2UcXya7bUkvkpf7TaPmiL7EubK0go1nlXGLRwEsoo= +github.com/aws/aws-sdk-go v1.54.20/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= -github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= -github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= -github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= +github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90= +github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 h1:kNemAUX+bJFBSfPkGVZ8HFOKIadjLoI2Ua1ZKivhGSo= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7/go.mod h1:71S2C1g/Zjn+ANmyoOqJ586OrPF9uC9iiHt9ZAT+MOw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 h1:u1KOU1S15ufyZqmH/rA3POkiRH6EcDANHj2xHRzq+zc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8/go.mod h1:WPv2FRnkIOoDv/8j2gSUsI4qDc7392w5anFB/I89GZ8= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= @@ -50,8 +50,8 @@ github.com/aws/aws-sdk-go-v2/service/account v1.19.3 h1:w/ZZ69+nzIYoussDQvIqyezI github.com/aws/aws-sdk-go-v2/service/account v1.19.3/go.mod h1:s7hT4ZWjp8GoSr0z8d5ZsJ8k+C2g4AsknLtmQaJgp0c= github.com/aws/aws-sdk-go-v2/service/acm v1.28.4 h1:wiW1Y6/1lysA0eJZRq0I53YYKuV9MNAzL15z2eZRlEE= github.com/aws/aws-sdk-go-v2/service/acm v1.28.4/go.mod h1:bzjymHHRhexkSMIvUHMpKydo9U82bmqQ5ru0IzYM8m8= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0 h1:CCaeK/FqBo/fmhSSqY0K8buep/ELBDEWc8IoOjf2piM= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0/go.mod h1:vDUysl9ROGF6GAsl1OgTg6xHDnw391hCc5+IYg2U/GQ= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0 h1:GZ7eaCsYZar0pOQPzBJeP8ImFEzDpPFbJ52JCiF9HQ4= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0/go.mod h1:vDUysl9ROGF6GAsl1OgTg6xHDnw391hCc5+IYg2U/GQ= github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 h1:o1cMErMp45oKZ2ScvBOdVXYhvu6FdUcz0Xn+JpDd408= github.com/aws/aws-sdk-go-v2/service/amp v1.27.3/go.mod h1:TuSBSV1IedYHHrC4A3bW84WjQXNSzc6XasgvuDRDb4E= github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3 h1:Plmg9O8/Pt4SKvPtUfSqCfv+SSSllouzlISFcvHK4bM= @@ -102,6 +102,8 @@ github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3 h1:BfuKcgSyNTzS2N57JSM4uQ/d github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3/go.mod h1:QJ119U4g137qbYZRXqFxtvyARMT88athXWt9gYcRBjM= github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3 h1:BFVoEcC9czVq0/KHdNheLtPUGjBvu133EfgIF0hO3SI= github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3/go.mod h1:9jB/CYDhmh+LPD3iRNnu4Zj+9A3AMoBQkxPp1j8reSs= +github.com/aws/aws-sdk-go-v2/service/chime v1.32.3 h1:teC5V+ROwSQM81JEcpM5X1B+YCnf/auLo88wO4h8/iU= +github.com/aws/aws-sdk-go-v2/service/chime v1.32.3/go.mod h1:3C8KIz5+H5gCLKTvQIrZtofV5J6E7NyRCOBvaViDzO8= github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3 h1:NY/98Ry+J3xzQXaH9uy8KXya6JiOnoXjFqGLL7aKHLw= github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3/go.mod h1:AC5wH108q+kaTSjuQoKoKCH4fxGKoteUMRPb0wLYzGI= github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.17.3 h1:e8mAmTy94SOhD/KdTRpocBj6+KOyxjQg7JYN1oBjT08= @@ -176,6 +178,8 @@ github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3 h1:tFFs24+oIWlHLbTyluhnQIH github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3/go.mod h1:WP7xXB608MyVv3yFzduKlLeYmU0AxMo7zeF9Cuwbvwc= github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3 h1:GndlSdjdgcW1r+mGL635+6ZlwXgdu/663aHHyBJ6Jtk= github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3/go.mod h1:xUxKkSfH4sCQixoxh3pYc7C4N+OH2POgS0dhkOzR+u8= +github.com/aws/aws-sdk-go-v2/service/datapipeline v1.23.3 h1:kA26fZh30b6kOZZIkxr/1M4f4TnIsXBw3RcHEFuFxcs= +github.com/aws/aws-sdk-go-v2/service/datapipeline v1.23.3/go.mod h1:9Z4AiKwAlu2eXOPFEDfkLV/wTpI9o2FX09M4l6E4VE4= github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3 h1:ZrKMl8jsL5YHurOLf0YVLb7JBYxGtqQQAknJ5g4MTz4= github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3/go.mod h1:+ObRlRcKO/p38yJSkpVZKlCU3t9PqXMORXC+xTkb9NU= github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2 h1:9l6JiWZz/2Sp3ne9E/AXECwnzi7NASQUJnQ7xts/8oA= @@ -200,8 +204,8 @@ github.com/aws/aws-sdk-go-v2/service/drs v1.28.3 h1:ss4Ib/kWbYA4pveQtSOluDE/Kf0e github.com/aws/aws-sdk-go-v2/service/drs v1.28.3/go.mod h1:tjzPl3EOCkojHm9Q4y+Kuq7GGSJJw/P0UIqc4eHvtFI= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 h1:nEhZKd1JQ4EB1tekcqW1oIVpDC1ZFrjrp/cLC5MXjFQ= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3/go.mod h1:q9vzW3Xr1KEXa8n4waHiFt1PrppNDlMymlYP+xpsFbY= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0 h1:zPwhEYn3Y83mnnr9QG+i6NTiAbVbcJe6RpCSJKHIQNE= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0/go.mod h1:9KdiRVKTZyPRTlbX3i41FxTV+5OatZ7xOJCN4lleX7g= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0 h1:r398oizT1O8AdQGpnxOMOIstEAAb3PPW5QZsL8w4Ujc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.171.0/go.mod h1:9KdiRVKTZyPRTlbX3i41FxTV+5OatZ7xOJCN4lleX7g= github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3 h1:+v2hv29pWaVDASIScHuUhDC93nqJGVlGf6cujrJMHZE= github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3/go.mod h1:RhaP7Wil0+uuuhiE4FzOOEFZwkmFAk1ZflXzK+O3ptU= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3 h1:n2eqzO9VabUkd77b88Hos6OEtbGohB/TRrtXLTZi38Y= @@ -230,8 +234,8 @@ github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 h1:bAuNjv1PmyZvjojnXlozw6 github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3/go.mod h1:EtC1+tObvVB/l/c9Dh6IILA/r/cu9Pc17S870zRihq4= github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3 h1:Y8VS/XHyeJ1cxSCtmvUOFLqfNIl9rASWOE/gsrydGFw= github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3/go.mod h1:xbE7o+ADq+h0DeKA/05618ox75wY/jtoZTF9XuvSvnI= -github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3 h1:BMYs3DZYSIaIDhkPSsAUeobQ7Z0ipNRJSiFTP2C4RWE= -github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3/go.mod h1:8rN4JsVXcCHl/f4hwOWVuy+iQ5iolXOdSX+QFYZyubw= +github.com/aws/aws-sdk-go-v2/service/firehose v1.32.0 h1:1ovnU04ZuvpaqJUGmqrcwJ9xZViHmdJpZQ0NUqMT5co= +github.com/aws/aws-sdk-go-v2/service/firehose v1.32.0/go.mod h1:8rN4JsVXcCHl/f4hwOWVuy+iQ5iolXOdSX+QFYZyubw= github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 h1:NwddG0xUTBM2zoq4D8rotQmT2Z/S8IGM+D2wYzKFSQs= github.com/aws/aws-sdk-go-v2/service/fis v1.26.3/go.mod h1:QmdVf0N/vrhckZLHK4x+f+u9EUuMhetsRgu1rjU1eL0= github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 h1:QeYAz3JhpkTxkS+fifDBfmgWFdSRBI21MQzN2bCO1xo= @@ -274,8 +278,8 @@ github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 h1:SEt8SRvlGvnOkqDV5PJ github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3/go.mod h1:XDi19IK0UluaSVnm1mu2AakZKHtWjg6gksitvH7+LQw= github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 h1:9Lao6kmD9P+yywuIn9I8hrraJ2jHIztU/GJspIxn6lA= github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3/go.mod h1:V2BDVrnP+Tn+MM1xxFI7Qcb+YPhiGgY5PUoKzrKHaCQ= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 h1:d7y5Gs9BfO+1Jhj8y1/lZhegiJXXy/DlanzwRgYrkXM= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3/go.mod h1:rtw6VOH+4X/TWoOKQlOC+oq/WBDJD4BqaPi930II6Mk= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.4 h1:isEOoNaq94Wh+x1tt8ScUeV+oQUH2I+5zjCmqCkUa/g= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.4/go.mod h1:rtw6VOH+4X/TWoOKQlOC+oq/WBDJD4BqaPi930II6Mk= github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 h1:MUx27PrqicGxgsiDWo7xv/Zsl4b0X8kHCRvMpX7XrQs= github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3/go.mod h1:mBWO7tOHjEvfZ88cUBhCfViO9vclCumFcTeiR1cB4IA= github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 h1:jJyh5SN/b78UZjIsVqM8/N5GQsD12sEvM2g5bVsFVhg= @@ -306,8 +310,8 @@ github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0 h1:Y79CoATONI7M7deTCC5 github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0/go.mod h1:6cpEF3W3oCNX9shBj9N3lrehYdxLuzDbYZdhOiaoN94= github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3 h1:1ls4o+377rEfTuZ4YaqDrSo75qpC1ySv8m2FfVk23tw= github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3/go.mod h1:JAiHALb6LfTclPNBdUUTL8xmDZcwBCTbSVgJEkgiIv4= -github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3 h1:9aDpYGrfgFjfvzOdAfMcEdGbWa3l/1RjGtOr4On9Kd4= -github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3/go.mod h1:49kVyWdlOWpusFyzDrmxCG9PqXlKtpKmHYoTv5h1O5k= +github.com/aws/aws-sdk-go-v2/service/medialive v1.55.0 h1:H1cqEGQ5rFpTPZawnOH8txXm2+KksldmlaIrAGBifOc= +github.com/aws/aws-sdk-go-v2/service/medialive v1.55.0/go.mod h1:49kVyWdlOWpusFyzDrmxCG9PqXlKtpKmHYoTv5h1O5k= github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3 h1:fBtklFkqk6QhJBzSBgNJiwWySt1RvspmvCvY+giXgdI= github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3/go.mod h1:BejXbLdRRWr6uMl4wZrz3iAcJDVgJu3EEstqDq8wxEE= github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3 h1:ytQ77lC/wrYatbiLSZlYSpgjzvtgXBey0xxRsBA4swY= @@ -320,8 +324,8 @@ github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4 h1:lptYTP7Br5zll9USf2aKY1ZlN69 github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4/go.mod h1:mtgvj3nNI+LiRNT07JaHbTh6E/y8QRrClvd+/GMhMS4= github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3 h1:XEbvRhi+ELazJaqh8k0KgTZrAgXM3rmR0hsGPTIpUIo= github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3/go.mod h1:tfCOS8E/SwIkqHHGgpwRZTly3ZQxcsORZPEVBKMkbx4= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3 h1:hb3i/o9ouQj6RZjykyGI1koOfp22/ZMuWpuPfeu+zNE= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3/go.mod h1:Y9mINPJv+o9q8Ztr5/PRh2C1Iynik64IhPzwe2ERGqQ= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.4 h1:NxceAAooi4+0/rIKXKqFUOVoT1rrVZKo6sLFuyITWPs= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.4/go.mod h1:Y9mINPJv+o9q8Ztr5/PRh2C1Iynik64IhPzwe2ERGqQ= github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3 h1:I+m+rITTdVA9BNJeuCzYgMQjqbUE10xcY0OqgBvFEFE= github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3/go.mod h1:R+4X5haYg3eRWYb99y+m1UhlVjFrHNlcfl3WES5e1oQ= github.com/aws/aws-sdk-go-v2/service/oam v1.13.3 h1:KCbGN36Q/qQ27mv+/4BSax0q6/KSAxh3K3R+gRhNHwg= @@ -350,8 +354,8 @@ github.com/aws/aws-sdk-go-v2/service/ram v1.27.3 h1:MoQ0up3IiE2fl0+qySx3Lb0swK6G github.com/aws/aws-sdk-go-v2/service/ram v1.27.3/go.mod h1:XymSCzlSx2QjdvU/KdV/+niPQBZRC1A8luPDFz3pjyg= github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3 h1:5Izo7ZI9zrvG9VLpJdnDl97gNyCFr310RtriuKIJgFk= github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3/go.mod h1:GlAG1tgrchQnNlO/fxXLmmF6t+v+9fQMNHNdW7Zc8Zc= -github.com/aws/aws-sdk-go-v2/service/rds v1.81.4 h1:tBtjOMKyEWLvsO6HaX6A+0A0V1gKcU2aSZKQXw6MSCM= -github.com/aws/aws-sdk-go-v2/service/rds v1.81.4/go.mod h1:j27FNXhbbHXC3ExFsJkoxq2Y+4dQypf8KFX1IkgwVvM= +github.com/aws/aws-sdk-go-v2/service/rds v1.81.5 h1:0vEV6OFcCInf/G98MIwwNJM21cd0g+8/jcxXNE40pJA= +github.com/aws/aws-sdk-go-v2/service/rds v1.81.5/go.mod h1:j27FNXhbbHXC3ExFsJkoxq2Y+4dQypf8KFX1IkgwVvM= github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4 h1:wNBruTRRDfBv2Pz3Mvw6JIJS7ujfTd1ztCG5pIlrfRk= github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4/go.mod h1:AhuwOvTE4nMwWfJQNZ2khZGV9yXexB2MjNYtCuLQA4s= github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3 h1:rtX1ZHGPpqbQGZlPuN1u7nA+0zjq0DB7QTVNlYY/gfw= @@ -384,8 +388,8 @@ github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3 h1:gmpU7E0ntMzXr+yQQIXbii github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3/go.mod h1:jnQp5kPPvEgPmVPm0h/XZPmlx7DQ0pqUiISRO4s6U3s= github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3 h1:ZJW2OQNpkR8P7URtISmF8twpvz2V0tUN/OgMenlxkao= github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3/go.mod h1:QcRvTKZ9cBv6TlZECUStXI1z1qlCMWKpPi/ZefknVpQ= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3 h1:ilavrucVBQHYnMjD2KmZQDCU1fuluQb0l9zRigGNVEc= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3/go.mod h1:TKKN7IQoM7uTnyuFm9bm9cw5P//ZYTl4m3htBWQ1G/c= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4 h1:NgRFYyFpiMD62y4VPXh4DosPFbZd4vdMVBWKk0VmWXc= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.4/go.mod h1:TKKN7IQoM7uTnyuFm9bm9cw5P//ZYTl4m3htBWQ1G/c= github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3 h1:tFzkGJZKDWgwGDSQXwxZK7Bm3NzlKOW6KwNr14xXZqc= github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3/go.mod h1:MfWlz2hEZ2O0XdyBBJNtF6qUZwpHtvc892BU7gludBw= github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3 h1:7isk2tSNmVbm2f8epPfokkHjjWfwS46IpNNmI+rarUo= @@ -418,8 +422,8 @@ github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3 h1:IXODiFsgKoyW7QVWWHo github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3/go.mod h1:JvtI6itHlTxyGew0oT7xYNbF7OA767givRMsCuBFK5k= github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3 h1:vBcoorWl+c4r5un837H8fhLoS0Kc8SKlGBHpyq7KM9w= github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3/go.mod h1:Mq0FruBai8A9f7fpzjcfD+S+y0I4DkZTygb3HxuqDB4= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3 h1:pBE7FzR3AUpauidRUITPlDWTQ4hHktI649xZt3e/wKM= github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3/go.mod h1:EyoPT+dUT5zqspxSub9KHDWOZyIP30bPgIavBvGGVz0= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= From a78723d3f8ffdffb4b4fac2bc76fe22bddea14a8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 19 Jul 2024 11:06:40 -0400 Subject: [PATCH 36/38] r/aws_emrcontainers_virtual_cluster: Reduce visibility. --- .../service/emrcontainers/exports_test.go | 6 +- .../emrcontainers/service_package_gen.go | 2 +- internal/service/emrcontainers/sweep.go | 2 +- .../service/emrcontainers/virtual_cluster.go | 161 +++++++++--------- .../virtual_cluster_data_source.go | 2 +- 5 files changed, 86 insertions(+), 87 deletions(-) diff --git a/internal/service/emrcontainers/exports_test.go b/internal/service/emrcontainers/exports_test.go index be204ac71d3..56da566c74a 100644 --- a/internal/service/emrcontainers/exports_test.go +++ b/internal/service/emrcontainers/exports_test.go @@ -5,7 +5,9 @@ package emrcontainers // Exports for use in tests only. var ( - ResourceJobTemplate = resourceJobTemplate + ResourceJobTemplate = resourceJobTemplate + ResourceVirtualCluster = resourceVirtualCluster - FindJobTemplateByID = findJobTemplateByID + FindJobTemplateByID = findJobTemplateByID + FindVirtualClusterByID = findVirtualClusterByID ) diff --git a/internal/service/emrcontainers/service_package_gen.go b/internal/service/emrcontainers/service_package_gen.go index 0dc085bbe5b..7ed7dcfc290 100644 --- a/internal/service/emrcontainers/service_package_gen.go +++ b/internal/service/emrcontainers/service_package_gen.go @@ -42,7 +42,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceVirtualCluster, + Factory: resourceVirtualCluster, TypeName: "aws_emrcontainers_virtual_cluster", Name: "Virtual Cluster", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/emrcontainers/sweep.go b/internal/service/emrcontainers/sweep.go index 9548964889b..24fa1296fbd 100644 --- a/internal/service/emrcontainers/sweep.go +++ b/internal/service/emrcontainers/sweep.go @@ -55,7 +55,7 @@ func sweepVirtualClusters(region string) error { continue } - r := ResourceVirtualCluster() + r := resourceVirtualCluster() d := r.Data(nil) d.SetId(aws.ToString(v.Id)) diff --git a/internal/service/emrcontainers/virtual_cluster.go b/internal/service/emrcontainers/virtual_cluster.go index 4c2ec118f1f..7e6e48bf9c2 100644 --- a/internal/service/emrcontainers/virtual_cluster.go +++ b/internal/service/emrcontainers/virtual_cluster.go @@ -28,7 +28,7 @@ import ( // @SDKResource("aws_emrcontainers_virtual_cluster", name="Virtual Cluster") // @Tags(identifierAttribute="arn") -func ResourceVirtualCluster() *schema.Resource { +func resourceVirtualCluster() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceVirtualClusterCreate, ReadWithoutTimeout: resourceVirtualClusterRead, @@ -115,7 +115,6 @@ func ResourceVirtualCluster() *schema.Resource { func resourceVirtualClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) name := d.Get(names.AttrName).(string) @@ -141,10 +140,9 @@ func resourceVirtualClusterCreate(ctx context.Context, d *schema.ResourceData, m func resourceVirtualClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) - vc, err := FindVirtualClusterByID(ctx, conn, d.Id()) + vc, err := findVirtualClusterByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EMR Containers Virtual Cluster %s not found, removing from state", d.Id()) @@ -178,7 +176,6 @@ func resourceVirtualClusterUpdate(ctx context.Context, d *schema.ResourceData, m func resourceVirtualClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) log.Printf("[INFO] Deleting EMR Containers Virtual Cluster: %s", d.Id()) @@ -204,13 +201,89 @@ func resourceVirtualClusterDelete(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "deleting EMR Containers Virtual Cluster (%s): %s", d.Id(), err) } - if _, err = waitVirtualClusterDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if _, err := waitVirtualClusterDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EMR Containers Virtual Cluster (%s) delete: %s", d.Id(), err) } return diags } +func findVirtualCluster(ctx context.Context, conn *emrcontainers.Client, input *emrcontainers.DescribeVirtualClusterInput) (*awstypes.VirtualCluster, error) { + output, err := conn.DescribeVirtualCluster(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.VirtualCluster == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.VirtualCluster, nil +} + +func findVirtualClusterByID(ctx context.Context, conn *emrcontainers.Client, id string) (*awstypes.VirtualCluster, error) { + input := &emrcontainers.DescribeVirtualClusterInput{ + Id: aws.String(id), + } + + output, err := findVirtualCluster(ctx, conn, input) + + if err != nil { + return nil, err + } + + if output.State == awstypes.VirtualClusterStateTerminated { + return nil, &retry.NotFoundError{ + Message: string(output.State), + LastRequest: input, + } + } + + return output, nil +} + +func statusVirtualCluster(ctx context.Context, conn *emrcontainers.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findVirtualClusterByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil + } +} + +func waitVirtualClusterDeleted(ctx context.Context, conn *emrcontainers.Client, id string, timeout time.Duration) (*awstypes.VirtualCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.VirtualClusterStateTerminating), + Target: []string{}, + Refresh: statusVirtualCluster(ctx, conn, id), + Timeout: timeout, + Delay: 1 * time.Minute, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if v, ok := outputRaw.(*awstypes.VirtualCluster); ok { + return v, err + } + + return nil, err +} + func expandContainerProvider(tfMap map[string]interface{}) *awstypes.ContainerProvider { if tfMap == nil { return nil @@ -305,79 +378,3 @@ func flattenEKSInfo(apiObject *awstypes.EksInfo) map[string]interface{} { return tfMap } - -func findVirtualCluster(ctx context.Context, conn *emrcontainers.Client, input *emrcontainers.DescribeVirtualClusterInput) (*awstypes.VirtualCluster, error) { - output, err := conn.DescribeVirtualCluster(ctx, input) - - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.VirtualCluster == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.VirtualCluster, nil -} - -func FindVirtualClusterByID(ctx context.Context, conn *emrcontainers.Client, id string) (*awstypes.VirtualCluster, error) { - input := &emrcontainers.DescribeVirtualClusterInput{ - Id: aws.String(id), - } - - output, err := findVirtualCluster(ctx, conn, input) - - if err != nil { - return nil, err - } - - if output.State == awstypes.VirtualClusterStateTerminated { - return nil, &retry.NotFoundError{ - Message: string(output.State), - LastRequest: input, - } - } - - return output, nil -} - -func statusVirtualCluster(ctx context.Context, conn *emrcontainers.Client, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindVirtualClusterByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.State), nil - } -} - -func waitVirtualClusterDeleted(ctx context.Context, conn *emrcontainers.Client, id string, timeout time.Duration) (*awstypes.VirtualCluster, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.VirtualClusterStateTerminating), - Target: []string{}, - Refresh: statusVirtualCluster(ctx, conn, id), - Timeout: timeout, - Delay: 1 * time.Minute, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if v, ok := outputRaw.(*awstypes.VirtualCluster); ok { - return v, err - } - - return nil, err -} diff --git a/internal/service/emrcontainers/virtual_cluster_data_source.go b/internal/service/emrcontainers/virtual_cluster_data_source.go index 067dd0e04c8..92925e6deea 100644 --- a/internal/service/emrcontainers/virtual_cluster_data_source.go +++ b/internal/service/emrcontainers/virtual_cluster_data_source.go @@ -89,7 +89,7 @@ func dataSourceVirtualClusterRead(ctx context.Context, d *schema.ResourceData, m ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig id := d.Get("virtual_cluster_id").(string) - vc, err := FindVirtualClusterByID(ctx, conn, id) + vc, err := findVirtualClusterByID(ctx, conn, id) if err != nil { return sdkdiag.AppendErrorf(diags, "reading EMR Containers Virtual Cluster (%s): %s", id, err) From 14021f4c8196e827ed561e0b0475c1a33f6b2cf6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 19 Jul 2024 11:08:07 -0400 Subject: [PATCH 37/38] d/aws_emrcontainers_virtual_cluster: Reduce visibility. --- internal/service/emrcontainers/service_package_gen.go | 4 +++- .../emrcontainers/virtual_cluster_data_source.go | 11 ++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/internal/service/emrcontainers/service_package_gen.go b/internal/service/emrcontainers/service_package_gen.go index 7ed7dcfc290..87ce0530473 100644 --- a/internal/service/emrcontainers/service_package_gen.go +++ b/internal/service/emrcontainers/service_package_gen.go @@ -25,8 +25,10 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceVirtualCluster, + Factory: dataSourceVirtualCluster, TypeName: "aws_emrcontainers_virtual_cluster", + Name: "Virtual Cluster", + Tags: &types.ServicePackageResourceTags{}, }, } } diff --git a/internal/service/emrcontainers/virtual_cluster_data_source.go b/internal/service/emrcontainers/virtual_cluster_data_source.go index 92925e6deea..58b51966847 100644 --- a/internal/service/emrcontainers/virtual_cluster_data_source.go +++ b/internal/service/emrcontainers/virtual_cluster_data_source.go @@ -15,8 +15,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_emrcontainers_virtual_cluster") -func DataSourceVirtualCluster() *schema.Resource { +// @SDKDataSource("aws_emrcontainers_virtual_cluster", name="Virtual Cluster") +// @Tags +func dataSourceVirtualCluster() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceVirtualClusterRead, @@ -84,9 +85,7 @@ func DataSourceVirtualCluster() *schema.Resource { func dataSourceVirtualClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EMRContainersClient(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig id := d.Get("virtual_cluster_id").(string) vc, err := findVirtualClusterByID(ctx, conn, id) @@ -109,9 +108,7 @@ func dataSourceVirtualClusterRead(ctx context.Context, d *schema.ResourceData, m d.Set(names.AttrState, vc.State) d.Set("virtual_cluster_id", vc.Id) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, vc.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOut(ctx, vc.Tags) return diags } From 11f8df3f8b37b0f8ae334d7d61159965db94b37f Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Fri, 19 Jul 2024 11:12:17 -0400 Subject: [PATCH 38/38] r/aws_elastictranscoder_[pipeline|preset]: handle NotFound errors on delete ```console % make testacc PKG=elastictranscoder make: Verifying source code with gofmt... ==> Checking that code complies with gofmt requirements... TF_ACC=1 go1.22.5 test ./internal/service/elastictranscoder/... -v -count 1 -parallel 20 -timeout 360m --- PASS: TestEndpointConfiguration (0.66s) --- PASS: TestEndpointConfiguration/no_config (0.03s) --- PASS: TestEndpointConfiguration/package_name_endpoint_config (0.04s) --- PASS: TestEndpointConfiguration/package_name_endpoint_config_overrides_base_envvar (0.04s) --- PASS: TestEndpointConfiguration/service_aws_envvar_overrides_service_config_file (0.03s) --- PASS: TestEndpointConfiguration/use_fips_config_with_package_name_endpoint_config (0.04s) --- PASS: TestEndpointConfiguration/package_name_endpoint_config_overrides_service_config_file (0.04s) --- PASS: TestEndpointConfiguration/package_name_endpoint_config_overrides_base_config_file (0.05s) --- PASS: TestEndpointConfiguration/service_aws_envvar_overrides_base_envvar (0.03s) --- PASS: TestEndpointConfiguration/base_endpoint_envvar (0.03s) --- PASS: TestEndpointConfiguration/base_endpoint_config_file (0.03s) --- PASS: TestEndpointConfiguration/package_name_endpoint_config_overrides_aws_service_envvar (0.04s) --- PASS: TestEndpointConfiguration/service_aws_envvar_overrides_base_config_file (0.03s) --- PASS: TestEndpointConfiguration/base_endpoint_envvar_overrides_service_config_file (0.03s) --- PASS: TestEndpointConfiguration/use_fips_config (0.03s) --- PASS: TestEndpointConfiguration/service_aws_envvar (0.03s) --- PASS: TestEndpointConfiguration/base_endpoint_envvar_overrides_base_config_file (0.03s) --- PASS: TestEndpointConfiguration/service_config_file (0.03s) --- PASS: TestEndpointConfiguration/service_config_file_overrides_base_config_file (0.03s) === CONT TestAccElasticTranscoderPipeline_basic === CONT TestAccElasticTranscoderPreset_video_noCodec === CONT TestAccElasticTranscoderPipeline_withPermissions === CONT TestAccElasticTranscoderPreset_Video_frameRate === CONT TestAccElasticTranscoderPreset_full === CONT TestAccElasticTranscoderPipeline_notifications === CONT TestAccElasticTranscoderPreset_description === CONT TestAccElasticTranscoderPreset_AudioCodecOptions_empty === CONT TestAccElasticTranscoderPreset_disappears === CONT TestAccElasticTranscoderPreset_audio_noBitRate === CONT TestAccElasticTranscoderPipeline_withContent === CONT TestAccElasticTranscoderPreset_basic === CONT TestAccElasticTranscoderPipeline_kmsKey === CONT TestAccElasticTranscoderPipeline_disappears --- PASS: TestAccElasticTranscoderPreset_disappears (18.36s) --- PASS: TestAccElasticTranscoderPreset_Video_frameRate (21.60s) --- PASS: TestAccElasticTranscoderPreset_description (23.73s) --- PASS: TestAccElasticTranscoderPreset_basic (23.86s) --- PASS: TestAccElasticTranscoderPreset_audio_noBitRate (23.93s) --- PASS: TestAccElasticTranscoderPipeline_disappears (25.19s) --- PASS: TestAccElasticTranscoderPipeline_basic (26.98s) --- PASS: TestAccElasticTranscoderPipeline_withPermissions (28.32s) --- PASS: TestAccElasticTranscoderPreset_AudioCodecOptions_empty (28.55s) --- PASS: TestAccElasticTranscoderPreset_video_noCodec (30.26s) --- PASS: TestAccElasticTranscoderPreset_full (34.48s) --- PASS: TestAccElasticTranscoderPipeline_notifications (37.32s) --- PASS: TestAccElasticTranscoderPipeline_withContent (37.32s) --- PASS: TestAccElasticTranscoderPipeline_kmsKey (48.26s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/elastictranscoder 54.746s ``` --- .changelog/38018.txt | 6 ++++++ internal/service/elastictranscoder/pipeline.go | 3 +++ internal/service/elastictranscoder/preset.go | 3 +++ 3 files changed, 12 insertions(+) create mode 100644 .changelog/38018.txt diff --git a/.changelog/38018.txt b/.changelog/38018.txt new file mode 100644 index 00000000000..4e061ce95b4 --- /dev/null +++ b/.changelog/38018.txt @@ -0,0 +1,6 @@ +```release-note:bug +resource/aws_elastictranscoder_pipeline: Properly handle NotFound exceptions during deletion +``` +```release-note:bug +resource/aws_elastictranscoder_preset: Properly handle NotFound exceptions during deletion +``` diff --git a/internal/service/elastictranscoder/pipeline.go b/internal/service/elastictranscoder/pipeline.go index d380e6f2786..9e88687992e 100644 --- a/internal/service/elastictranscoder/pipeline.go +++ b/internal/service/elastictranscoder/pipeline.go @@ -537,6 +537,9 @@ func resourcePipelineDelete(ctx context.Context, d *schema.ResourceData, meta in Id: aws.String(d.Id()), }) if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return diags + } return sdkdiag.AppendErrorf(diags, "deleting Elastic Transcoder Pipeline: %s", err) } return diags diff --git a/internal/service/elastictranscoder/preset.go b/internal/service/elastictranscoder/preset.go index de6cd7f6322..5cfe8fec701 100644 --- a/internal/service/elastictranscoder/preset.go +++ b/internal/service/elastictranscoder/preset.go @@ -929,6 +929,9 @@ func resourcePresetDelete(ctx context.Context, d *schema.ResourceData, meta inte }) if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return diags + } return sdkdiag.AppendErrorf(diags, "deleting Elastic Transcoder Preset: %s", err) }