diff --git a/.changelog/32530.txt b/.changelog/32530.txt new file mode 100644 index 00000000000..c092c4bd830 --- /dev/null +++ b/.changelog/32530.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_fsx_ontap_volume: Add `copy_tags_to_backups` and `snapshot_policy` arguments +``` + +```release-note:enhancement +resource/aws_fsx_ontap_volume: Add `bypass_snaplock_enterprise_retention` argument and `snaplock_configuration` configuration block to support [SnapLock](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snaplock.html) +``` + +```release-note:enhancement +resource/aws_fsx_openzfs_volume: Add `delete_volume_options` argument +``` \ No newline at end of file diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index dc5f1784182..a9ddb842a8c 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -541,7 +541,7 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } } @@ -658,14 +658,6 @@ func logStateFunc(v interface{}) string { return value } -func findFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { - input := &fsx.DescribeFileSystemsInput{ - FileSystemIds: aws.StringSlice([]string{id}), - } - - return findFileSystem(ctx, conn, input, tfslices.PredicateTrue[*fsx.FileSystem]()) -} - func FindLustreFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeLustre) @@ -680,6 +672,14 @@ func FindLustreFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*f return output, nil } +func findFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { + input := &fsx.DescribeFileSystemsInput{ + FileSystemIds: aws.StringSlice([]string{id}), + } + + return findFileSystem(ctx, conn, input, tfslices.PredicateTrue[*fsx.FileSystem]()) +} + func findFileSystemByIDAndType(ctx context.Context, conn *fsx.FSx, fsID, fsType string) (*fsx.FileSystem, error) { input := &fsx.DescribeFileSystemsInput{ FileSystemIds: aws.StringSlice([]string{fsID}), @@ -761,7 +761,7 @@ func waitFileSystemCreated(ctx context.Context, conn *fsx.FSx, id string, timeou if output, ok := outputRaw.(*fsx.FileSystem); ok { if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileSystemLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + tfresource.SetLastError(err, errors.New(aws.StringValue(details.Message))) } return output, err @@ -823,7 +823,7 @@ func waitFileSystemDeleted(ctx context.Context, conn *fsx.FSx, id string, timeou if output, ok := outputRaw.(*fsx.FileSystem); ok { if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileSystemLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + tfresource.SetLastError(err, errors.New(aws.StringValue(details.Message))) } return output, err @@ -832,7 +832,7 @@ func waitFileSystemDeleted(ctx context.Context, conn *fsx.FSx, id string, timeou return nil, err } -func findAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) (*fsx.AdministrativeAction, error) { +func findFileSystemAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) (*fsx.AdministrativeAction, error) { output, err := findFileSystemByID(ctx, conn, fsID) if err != nil { @@ -853,9 +853,9 @@ func findAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionTy return &fsx.AdministrativeAction{Status: aws.String(fsx.StatusCompleted)}, nil } -func statusAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) retry.StateRefreshFunc { +func statusFileSystemAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := findAdministrativeAction(ctx, conn, fsID, actionType) + output, err := findFileSystemAdministrativeAction(ctx, conn, fsID, actionType) if tfresource.NotFound(err) { return nil, "", nil @@ -869,11 +869,11 @@ func statusAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, action } } -func waitAdministrativeActionCompleted(ctx context.Context, conn *fsx.FSx, fsID, actionType string, timeout time.Duration) (*fsx.AdministrativeAction, error) { //nolint:unparam +func waitFileSystemAdministrativeActionCompleted(ctx context.Context, conn *fsx.FSx, fsID, actionType string, timeout time.Duration) (*fsx.AdministrativeAction, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{fsx.StatusInProgress, fsx.StatusPending}, Target: []string{fsx.StatusCompleted, fsx.StatusUpdatedOptimizing}, - Refresh: statusAdministrativeAction(ctx, conn, fsID, actionType), + Refresh: statusFileSystemAdministrativeAction(ctx, conn, fsID, actionType), Timeout: timeout, Delay: 30 * time.Second, } diff --git a/internal/service/fsx/ontap_file_system.go b/internal/service/fsx/ontap_file_system.go index 07adec5097d..e65d87065b7 100644 --- a/internal/service/fsx/ontap_file_system.go +++ b/internal/service/fsx/ontap_file_system.go @@ -412,7 +412,7 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } } diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index f5d81e2eba9..6ef01fa1627 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -5,6 +5,8 @@ package fsx import ( "context" + "errors" + "fmt" "log" "time" @@ -13,10 +15,12 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -25,15 +29,16 @@ import ( // @SDKResource("aws_fsx_ontap_volume", name="ONTAP Volume") // @Tags(identifierAttribute="arn") -func ResourceOntapVolume() *schema.Resource { +func ResourceONTAPVolume() *schema.Resource { return &schema.Resource{ - CreateWithoutTimeout: resourceOntapVolumeCreate, - ReadWithoutTimeout: resourceOntapVolumeRead, - UpdateWithoutTimeout: resourceOntapVolumeUpdate, - DeleteWithoutTimeout: resourceOntapVolumeDelete, + CreateWithoutTimeout: resourceONTAPVolumeCreate, + ReadWithoutTimeout: resourceONTAPVolumeRead, + UpdateWithoutTimeout: resourceONTAPVolumeUpdate, + DeleteWithoutTimeout: resourceONTAPVolumeDelete, Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("bypass_snaplock_enterprise_retention", false) d.Set("skip_final_backup", false) return []*schema.ResourceData{d}, nil @@ -51,6 +56,16 @@ func ResourceOntapVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "bypass_snaplock_enterprise_retention": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "copy_tags_to_backups": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "file_system_id": { Type: schema.TypeString, Computed: true, @@ -93,6 +108,139 @@ func ResourceOntapVolume() *schema.Resource { Optional: true, Default: false, }, + "snaplock_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audit_log_volume": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "autocommit_period": { + Type: schema.TypeList, + Optional: true, + Computed: true, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(fsx.AutocommitPeriodType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 65535), + }, + }, + }, + }, + "privileged_delete": { + Type: schema.TypeString, + Optional: true, + Default: fsx.PrivilegedDeleteDisabled, + ValidateFunc: validation.StringInSlice(fsx.PrivilegedDelete_Values(), false), + }, + "retention_period": { + Type: schema.TypeList, + Optional: true, + Computed: true, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_retention": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 65535), + }, + }, + }, + }, + "maximum_retention": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 65535), + }, + }, + }, + }, + "minimum_retention": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + }, + "value": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 65535), + }, + }, + }, + }, + }, + }, + }, + "snaplock_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(fsx.SnaplockType_Values(), false), + }, + "volume_append_mode_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "snapshot_policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, "storage_efficiency_enabled": { Type: schema.TypeBool, Optional: true, @@ -138,92 +286,117 @@ func ResourceOntapVolume() *schema.Resource { ValidateFunc: validation.StringInSlice(fsx.VolumeType_Values(), false), }, }, + CustomizeDiff: verify.SetTagsDiff, } } -func resourceOntapVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - name := d.Get("name").(string) - input := &fsx.CreateVolumeInput{ - Name: aws.String(name), - OntapConfiguration: &fsx.CreateOntapVolumeConfiguration{ - SizeInMegabytes: aws.Int64(int64(d.Get("size_in_megabytes").(int))), - StorageVirtualMachineId: aws.String(d.Get("storage_virtual_machine_id").(string)), - }, - Tags: getTagsIn(ctx), - VolumeType: aws.String(d.Get("volume_type").(string)), + ontapConfig := &fsx.CreateOntapVolumeConfiguration{ + SizeInMegabytes: aws.Int64(int64(d.Get("size_in_megabytes").(int))), + StorageVirtualMachineId: aws.String(d.Get("storage_virtual_machine_id").(string)), + } + + if v, ok := d.GetOk("copy_tags_to_backups"); ok { + ontapConfig.CopyTagsToBackups = aws.Bool(v.(bool)) } if v, ok := d.GetOk("junction_path"); ok { - input.OntapConfiguration.JunctionPath = aws.String(v.(string)) + ontapConfig.JunctionPath = aws.String(v.(string)) } if v, ok := d.GetOk("ontap_volume_type"); ok { - input.OntapConfiguration.OntapVolumeType = aws.String(v.(string)) + ontapConfig.OntapVolumeType = aws.String(v.(string)) } if v, ok := d.GetOk("security_style"); ok { - input.OntapConfiguration.SecurityStyle = aws.String(v.(string)) + ontapConfig.SecurityStyle = aws.String(v.(string)) + } + + if v, ok := d.GetOk("snaplock_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + ontapConfig.SnaplockConfiguration = expandCreateSnaplockConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + + if v, ok := d.GetOk("snapshot_policy"); ok { + ontapConfig.SnapshotPolicy = aws.String(v.(string)) } if v, ok := d.GetOkExists("storage_efficiency_enabled"); ok { - input.OntapConfiguration.StorageEfficiencyEnabled = aws.Bool(v.(bool)) + ontapConfig.StorageEfficiencyEnabled = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("tiering_policy"); ok { - input.OntapConfiguration.TieringPolicy = expandOntapVolumeTieringPolicy(v.([]interface{})) + if v, ok := d.GetOk("tiering_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + ontapConfig.TieringPolicy = expandTieringPolicy(v.([]interface{})[0].(map[string]interface{})) } - result, err := conn.CreateVolumeWithContext(ctx, input) + name := d.Get("name").(string) + input := &fsx.CreateVolumeInput{ + Name: aws.String(name), + OntapConfiguration: ontapConfig, + Tags: getTagsIn(ctx), + VolumeType: aws.String(d.Get("volume_type").(string)), + } + + output, err := conn.CreateVolumeWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx ONTAP Volume (%s): %s", name, err) + return sdkdiag.AppendErrorf(diags, "creating FSx for NetApp ONTAP Volume (%s): %s", name, err) } - d.SetId(aws.StringValue(result.Volume.VolumeId)) + d.SetId(aws.StringValue(output.Volume.VolumeId)) if _, err := waitVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx ONTAP Volume (%s) create: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) create: %s", d.Id(), err) } - return append(diags, resourceOntapVolumeRead(ctx, d, meta)...) + return append(diags, resourceONTAPVolumeRead(ctx, d, meta)...) } -func resourceOntapVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceONTAPVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - volume, err := FindVolumeByID(ctx, conn, d.Id()) + volume, err := FindONTAPVolumeByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] FSx ONTAP Volume (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] FSx for NetApp ONTAP Volume (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading FSx ONTAP Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading FSx for NetApp ONTAP Volume (%s): %s", d.Id(), err) } ontapConfig := volume.OntapConfiguration - if ontapConfig == nil { - return sdkdiag.AppendErrorf(diags, "reading FSx ONTAP Volume (%s): empty ONTAP configuration", d.Id()) - } d.Set("arn", volume.ResourceARN) - d.Set("name", volume.Name) + d.Set("copy_tags_to_backups", ontapConfig.CopyTagsToBackups) d.Set("file_system_id", volume.FileSystemId) d.Set("junction_path", ontapConfig.JunctionPath) + d.Set("name", volume.Name) d.Set("ontap_volume_type", ontapConfig.OntapVolumeType) d.Set("security_style", ontapConfig.SecurityStyle) d.Set("size_in_megabytes", ontapConfig.SizeInMegabytes) + if ontapConfig.SnaplockConfiguration != nil { + if err := d.Set("snaplock_configuration", []interface{}{flattenSnaplockConfiguration(ontapConfig.SnaplockConfiguration)}); err != nil { + return diag.Errorf("setting snaplock_configuration: %s", err) + } + } else { + d.Set("snaplock_configuration", nil) + } + d.Set("snapshot_policy", ontapConfig.SnapshotPolicy) d.Set("storage_efficiency_enabled", ontapConfig.StorageEfficiencyEnabled) d.Set("storage_virtual_machine_id", ontapConfig.StorageVirtualMachineId) - if err := d.Set("tiering_policy", flattenOntapVolumeTieringPolicy(ontapConfig.TieringPolicy)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tiering_policy: %s", err) + if ontapConfig.TieringPolicy != nil { + if err := d.Set("tiering_policy", []interface{}{flattenTieringPolicy(ontapConfig.TieringPolicy)}); err != nil { + return diag.Errorf("setting tiering_policy: %s", err) + } + } else { + d.Set("tiering_policy", nil) } d.Set("uuid", ontapConfig.UUID) d.Set("volume_type", volume.VolumeType) @@ -231,59 +404,83 @@ func resourceOntapVolumeRead(ctx context.Context, d *schema.ResourceData, meta i return diags } -func resourceOntapVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept("tags_all", "tags") { - input := &fsx.UpdateVolumeInput{ - ClientRequestToken: aws.String(id.UniqueId()), - OntapConfiguration: &fsx.UpdateOntapVolumeConfiguration{}, - VolumeId: aws.String(d.Id()), + if d.HasChangesExcept("tags", "tags_all") { + ontapConfig := &fsx.UpdateOntapVolumeConfiguration{} + + if d.HasChange("copy_tags_to_backups") { + ontapConfig.CopyTagsToBackups = aws.Bool(d.Get("copy_tags_to_backups").(bool)) } if d.HasChange("junction_path") { - input.OntapConfiguration.JunctionPath = aws.String(d.Get("junction_path").(string)) + ontapConfig.JunctionPath = aws.String(d.Get("junction_path").(string)) } if d.HasChange("security_style") { - input.OntapConfiguration.SecurityStyle = aws.String(d.Get("security_style").(string)) + ontapConfig.SecurityStyle = aws.String(d.Get("security_style").(string)) } if d.HasChange("size_in_megabytes") { - input.OntapConfiguration.SizeInMegabytes = aws.Int64(int64(d.Get("size_in_megabytes").(int))) + ontapConfig.SizeInMegabytes = aws.Int64(int64(d.Get("size_in_megabytes").(int))) + } + + if d.HasChange("snaplock_configuration") { + if v, ok := d.GetOk("snaplock_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + ontapConfig.SnaplockConfiguration = expandUpdateSnaplockConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + } + + if d.HasChange("snapshot_policy") { + ontapConfig.SnapshotPolicy = aws.String(d.Get("snapshot_policy").(string)) } if d.HasChange("storage_efficiency_enabled") { - input.OntapConfiguration.StorageEfficiencyEnabled = aws.Bool(d.Get("storage_efficiency_enabled").(bool)) + ontapConfig.StorageEfficiencyEnabled = aws.Bool(d.Get("storage_efficiency_enabled").(bool)) } if d.HasChange("tiering_policy") { - input.OntapConfiguration.TieringPolicy = expandOntapVolumeTieringPolicy(d.Get("tiering_policy").([]interface{})) + if v, ok := d.GetOk("tiering_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + ontapConfig.TieringPolicy = expandTieringPolicy(v.([]interface{})[0].(map[string]interface{})) + } } + input := &fsx.UpdateVolumeInput{ + ClientRequestToken: aws.String(id.UniqueId()), + OntapConfiguration: ontapConfig, + VolumeId: aws.String(d.Id()), + } + + startTime := time.Now() _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating FSx ONTAP Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating FSx for NetApp ONTAP Volume (%s): %s", d.Id(), err) + } + + if _, err := waitVolumeUpdated(ctx, conn, d.Id(), startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) update: %s", d.Id(), err) } - if _, err := waitVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx ONTAP Volume (%s) update: %s", d.Id(), err) + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) } } - return append(diags, resourceOntapVolumeRead(ctx, d, meta)...) + return append(diags, resourceONTAPVolumeRead(ctx, d, meta)...) } -func resourceOntapVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - log.Printf("[DEBUG] Deleting FSx ONTAP Volume: %s", d.Id()) + log.Printf("[DEBUG] Deleting FSx for NetApp ONTAP Volume: %s", d.Id()) _, err := conn.DeleteVolumeWithContext(ctx, &fsx.DeleteVolumeInput{ OntapConfiguration: &fsx.DeleteVolumeOntapConfiguration{ - SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), + BypassSnaplockEnterpriseRetention: aws.Bool(d.Get("bypass_snaplock_enterprise_retention").(bool)), + SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), }, VolumeId: aws.String(d.Id()), }) @@ -293,53 +490,501 @@ func resourceOntapVolumeDelete(ctx context.Context, d *schema.ResourceData, meta } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting FSx ONTAP Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting FSx for NetApp ONTAP Volume (%s): %s", d.Id(), err) } if _, err := waitVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx ONTAP Volume (%s) delete: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) delete: %s", d.Id(), err) } return diags } -func expandOntapVolumeTieringPolicy(cfg []interface{}) *fsx.TieringPolicy { - if len(cfg) < 1 { +const minTieringPolicyCoolingPeriod = 2 + +func expandTieringPolicy(tfMap map[string]interface{}) *fsx.TieringPolicy { + if tfMap == nil { + return nil + } + + apiObject := &fsx.TieringPolicy{} + + // Cooling period only accepts a minimum of 2 but int will return 0 not nil if unset. + // Therefore we only set it if it is 2 or more. + if v, ok := tfMap["cooling_period"].(int); ok && v >= minTieringPolicyCoolingPeriod { + apiObject.CoolingPeriod = aws.Int64(int64(v)) + } + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + return apiObject +} + +func flattenTieringPolicy(apiObject *fsx.TieringPolicy) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.CoolingPeriod; v != nil { + if v := aws.Int64Value(v); v >= minTieringPolicyCoolingPeriod { + tfMap["cooling_period"] = v + } + } + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + return tfMap +} + +func expandCreateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.CreateSnaplockConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &fsx.CreateSnaplockConfiguration{} + + if v, ok := tfMap["audit_log_volume"].(bool); ok && v { + apiObject.AuditLogVolume = aws.Bool(v) + } + + if v, ok := tfMap["autocommit_period"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.AutocommitPeriod = expandAutocommitPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["privileged_delete"].(string); ok && v != "" { + apiObject.PrivilegedDelete = aws.String(v) + } + + if v, ok := tfMap["retention_period"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RetentionPeriod = expandSnaplockRetentionPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["snaplock_type"].(string); ok && v != "" { + apiObject.SnaplockType = aws.String(v) + } + + if v, ok := tfMap["volume_append_mode_enabled"].(bool); ok && v { + apiObject.VolumeAppendModeEnabled = aws.Bool(v) + } + + return apiObject +} + +func expandUpdateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.UpdateSnaplockConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &fsx.UpdateSnaplockConfiguration{} + + if v, ok := tfMap["audit_log_volume"].(bool); ok && v { + apiObject.AuditLogVolume = aws.Bool(v) + } + + if v, ok := tfMap["autocommit_period"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.AutocommitPeriod = expandAutocommitPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["privileged_delete"].(string); ok && v != "" { + apiObject.PrivilegedDelete = aws.String(v) + } + + if v, ok := tfMap["retention_period"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RetentionPeriod = expandSnaplockRetentionPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["volume_append_mode_enabled"].(bool); ok && v { + apiObject.VolumeAppendModeEnabled = aws.Bool(v) + } + + return apiObject +} + +func expandAutocommitPeriod(tfMap map[string]interface{}) *fsx.AutocommitPeriod { + if tfMap == nil { + return nil + } + + apiObject := &fsx.AutocommitPeriod{} + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = aws.String(v) + } + + if v, ok := tfMap["value"].(int); ok && v != 0 { + apiObject.Value = aws.Int64(int64(v)) + } + + return apiObject +} + +func expandSnaplockRetentionPeriod(tfMap map[string]interface{}) *fsx.SnaplockRetentionPeriod { + if tfMap == nil { + return nil + } + + apiObject := &fsx.SnaplockRetentionPeriod{} + + if v, ok := tfMap["default_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DefaultRetention = expandRetentionPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.MaximumRetention = expandRetentionPeriod(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["minimum_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.MinimumRetention = expandRetentionPeriod(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandRetentionPeriod(tfMap map[string]interface{}) *fsx.RetentionPeriod { + if tfMap == nil { + return nil + } + + apiObject := &fsx.RetentionPeriod{} + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = aws.String(v) + } + + if v, ok := tfMap["value"].(int); ok && v != 0 { + apiObject.Value = aws.Int64(int64(v)) + } + + return apiObject +} + +func flattenSnaplockConfiguration(apiObject *fsx.SnaplockConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AuditLogVolume; v != nil { + tfMap["audit_log_volume"] = aws.BoolValue(v) + } + + if v := apiObject.AutocommitPeriod; v != nil { + tfMap["autocommit_period"] = []interface{}{flattenAutocommitPeriod(v)} + } + + if v := apiObject.PrivilegedDelete; v != nil { + tfMap["privileged_delete"] = aws.StringValue(v) + } + + if v := apiObject.RetentionPeriod; v != nil { + tfMap["retention_period"] = []interface{}{flattenSnaplockRetentionPeriod(v)} + } + + if v := apiObject.SnaplockType; v != nil { + tfMap["snaplock_type"] = aws.StringValue(v) + } + + if v := apiObject.VolumeAppendModeEnabled; v != nil { + tfMap["volume_append_mode_enabled"] = aws.BoolValue(v) + } + + return tfMap +} + +func flattenAutocommitPeriod(apiObject *fsx.AutocommitPeriod) map[string]interface{} { + if apiObject == nil { return nil } - conf := cfg[0].(map[string]interface{}) + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != nil { + tfMap["type"] = aws.StringValue(v) + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.Int64Value(v) + } + + return tfMap +} + +func flattenSnaplockRetentionPeriod(apiObject *fsx.SnaplockRetentionPeriod) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.DefaultRetention; v != nil { + tfMap["default_retention"] = []interface{}{flattenRetentionPeriod(v)} + } + + if v := apiObject.MaximumRetention; v != nil { + tfMap["maximum_retention"] = []interface{}{flattenRetentionPeriod(v)} + } + + if v := apiObject.MinimumRetention; v != nil { + tfMap["minimum_retention"] = []interface{}{flattenRetentionPeriod(v)} + } + + return tfMap +} + +func flattenRetentionPeriod(apiObject *fsx.RetentionPeriod) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != nil { + tfMap["type"] = aws.StringValue(v) + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.Int64Value(v) + } + + return tfMap +} + +func FindONTAPVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { + output, err := findVolumeByIDAndType(ctx, conn, id, fsx.VolumeTypeOntap) + + if err != nil { + return nil, err + } + + if output.OntapConfiguration == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output, nil +} + +func findVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { + input := &fsx.DescribeVolumesInput{ + VolumeIds: aws.StringSlice([]string{id}), + } + + return findVolume(ctx, conn, input, tfslices.PredicateTrue[*fsx.Volume]()) +} + +func findVolumeByIDAndType(ctx context.Context, conn *fsx.FSx, volID, volType string) (*fsx.Volume, error) { + input := &fsx.DescribeVolumesInput{ + VolumeIds: aws.StringSlice([]string{volID}), + } + filter := func(fs *fsx.Volume) bool { + return aws.StringValue(fs.VolumeType) == volType + } + + return findVolume(ctx, conn, input, filter) +} + +func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput, filter tfslices.Predicate[*fsx.Volume]) (*fsx.Volume, error) { + output, err := findVolumes(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput, filter tfslices.Predicate[*fsx.Volume]) ([]*fsx.Volume, error) { + var output []*fsx.Volume + + err := conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Volumes { + if v != nil && filter(v) { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } - out := fsx.TieringPolicy{} + if err != nil { + return nil, err + } + + return output, nil +} + +func statusVolume(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findVolumeByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } - //Cooling period only accepts a minimum of 2 but int will return 0 not nil if unset - //Therefore we only set it if it is 2 or more - if v, ok := conf["cooling_period"].(int); ok && v >= 2 { - out.CoolingPeriod = aws.Int64(int64(v)) + return output, aws.StringValue(output.Lifecycle), nil } +} - if v, ok := conf["name"].(string); ok { - out.Name = aws.String(v) +func waitVolumeCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.VolumeLifecycleCreating, fsx.VolumeLifecyclePending}, + Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, } - return &out + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.Volume); ok { + if status, reason := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && reason != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) + } + + return output, err + } + + return nil, err } -func flattenOntapVolumeTieringPolicy(rs *fsx.TieringPolicy) []interface{} { - if rs == nil { - return []interface{}{} +func waitVolumeUpdated(ctx context.Context, conn *fsx.FSx, id string, startTime time.Time, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.VolumeLifecyclePending}, + Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 150 * time.Second, } - minCoolingPeriod := 2 + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.Volume); ok { + switch status := aws.StringValue(output.Lifecycle); status { + case fsx.VolumeLifecycleFailed: + // Report any failed non-VOLUME_UPDATE administrative actions. + // See https://docs.aws.amazon.com/fsx/latest/APIReference/API_AdministrativeAction.html#FSx-Type-AdministrativeAction-AdministrativeActionType. + administrativeActions := tfslices.Filter(output.AdministrativeActions, func(v *fsx.AdministrativeAction) bool { + return v != nil && aws.StringValue(v.Status) == fsx.StatusFailed && aws.StringValue(v.AdministrativeActionType) != fsx.AdministrativeActionTypeVolumeUpdate && v.FailureDetails != nil && startTime.Before(aws.TimeValue(v.RequestTime)) + }) + administrativeActionsError := errors.Join(tfslices.ApplyToAll(administrativeActions, func(v *fsx.AdministrativeAction) error { + return fmt.Errorf("%s: %s", aws.StringValue(v.AdministrativeActionType), aws.StringValue(v.FailureDetails.Message)) + })...) + + if reason := output.LifecycleTransitionReason; reason != nil { + if message := aws.StringValue(reason.Message); administrativeActionsError != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %w", message, administrativeActionsError)) + } else { + tfresource.SetLastError(err, errors.New(message)) + } + } else { + tfresource.SetLastError(err, administrativeActionsError) + } + } - m := make(map[string]interface{}) - if aws.Int64Value(rs.CoolingPeriod) >= int64(minCoolingPeriod) { - m["cooling_period"] = aws.Int64Value(rs.CoolingPeriod) + return output, err } - if rs.Name != nil { - m["name"] = aws.StringValue(rs.Name) + return nil, err +} + +func waitVolumeDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable, fsx.VolumeLifecycleDeleting}, + Target: []string{}, + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.Volume); ok { + if status, reason := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && reason != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) + } + + return output, err + } + + return nil, err +} + +func findVolumeAdministrativeAction(ctx context.Context, conn *fsx.FSx, volID, actionType string) (*fsx.AdministrativeAction, error) { + output, err := findVolumeByID(ctx, conn, volID) + + if err != nil { + return nil, err + } + + for _, v := range output.AdministrativeActions { + if v == nil { + continue + } + + if aws.StringValue(v.AdministrativeActionType) == actionType { + return v, nil + } + } + + // If the administrative action isn't found, assume it's complete. + return &fsx.AdministrativeAction{Status: aws.String(fsx.StatusCompleted)}, nil +} + +func statusVolumeAdministrativeAction(ctx context.Context, conn *fsx.FSx, volID, actionType string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findVolumeAdministrativeAction(ctx, conn, volID, actionType) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Status), nil + } +} + +func waitVolumeAdministrativeActionCompleted(ctx context.Context, conn *fsx.FSx, volID, actionType string, timeout time.Duration) (*fsx.AdministrativeAction, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.StatusInProgress, fsx.StatusPending}, + Target: []string{fsx.StatusCompleted, fsx.StatusUpdatedOptimizing}, + Refresh: statusVolumeAdministrativeAction(ctx, conn, volID, actionType), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.AdministrativeAction); ok { + if status, details := aws.StringValue(output.Status), output.FailureDetails; status == fsx.StatusFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + + return output, err } - return []interface{}{m} + return nil, err } diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index 320c300093e..02fdaa004a1 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func TestAccFSxOntapVolume_basic(t *testing.T) { +func TestAccFSxONTAPVolume_basic(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -30,13 +30,15 @@ func TestAccFSxOntapVolume_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), + resource.TestCheckResourceAttr(resourceName, "bypass_snaplock_enterprise_retention", "false"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), resource.TestCheckResourceAttrSet(resourceName, "file_system_id"), resource.TestCheckResourceAttr(resourceName, "junction_path", fmt.Sprintf("/%[1]s", rName)), resource.TestCheckResourceAttr(resourceName, "ontap_volume_type", "RW"), @@ -44,24 +46,26 @@ func TestAccFSxOntapVolume_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_style", ""), resource.TestCheckResourceAttr(resourceName, "size_in_megabytes", "1024"), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", "false"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "snapshot_policy", "default"), resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", "true"), resource.TestCheckResourceAttrSet(resourceName, "storage_virtual_machine_id"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tiering_policy.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "uuid"), resource.TestCheckResourceAttr(resourceName, "volume_type", "ONTAP"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccFSxOntapVolume_disappears(t *testing.T) { +func TestAccFSxONTAPVolume_disappears(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -71,13 +75,13 @@ func TestAccFSxOntapVolume_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceOntapVolume(), resourceName), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceONTAPVolume(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -85,45 +89,44 @@ func TestAccFSxOntapVolume_disappears(t *testing.T) { }) } -func TestAccFSxOntapVolume_name(t *testing.T) { +func TestAccFSxONTAPVolume_copyTagsToBackups(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) - rName2 := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccONTAPVolumeConfig_basic(rName), + Config: testAccONTAPVolumeConfig_copyTagsToBackups(rName, true), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), - resource.TestCheckResourceAttr(resourceName, "name", rName), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "true"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { - Config: testAccONTAPVolumeConfig_basic(rName2), + Config: testAccONTAPVolumeConfig_copyTagsToBackups(rName, false), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeRecreated(&volume1, &volume2), - resource.TestCheckResourceAttr(resourceName, "name", rName2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), ), }, }, }) } -func TestAccFSxOntapVolume_junctionPath(t *testing.T) { +func TestAccFSxONTAPVolume_junctionPath(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -135,12 +138,12 @@ func TestAccFSxOntapVolume_junctionPath(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_junctionPath(rName, jPath1), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "junction_path", jPath1), ), @@ -149,13 +152,13 @@ func TestAccFSxOntapVolume_junctionPath(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_junctionPath(rName, jPath2), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "junction_path", jPath2), ), @@ -164,7 +167,45 @@ func TestAccFSxOntapVolume_junctionPath(t *testing.T) { }) } -func TestAccFSxOntapVolume_ontapVolumeType(t *testing.T) { +func TestAccFSxONTAPVolume_name(t *testing.T) { + ctx := acctest.Context(t) + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_ontap_volume.test" + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + rName2 := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + }, + { + Config: testAccONTAPVolumeConfig_basic(rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "name", rName2), + ), + }, + }, + }) +} + +func TestAccFSxONTAPVolume_ontapVolumeType(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -174,13 +215,12 @@ func TestAccFSxOntapVolume_ontapVolumeType(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_ontapVolumeTypeDP(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume), - resource.TestCheckResourceAttr(resourceName, "name", rName), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume), resource.TestCheckResourceAttr(resourceName, "ontap_volume_type", "DP"), ), }, @@ -188,13 +228,13 @@ func TestAccFSxOntapVolume_ontapVolumeType(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, }, }) } -func TestAccFSxOntapVolume_securityStyle(t *testing.T) { +func TestAccFSxONTAPVolume_securityStyle(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2, volume3 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -204,12 +244,12 @@ func TestAccFSxOntapVolume_securityStyle(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_securityStyle(rName, "UNIX"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "security_style", "UNIX"), ), @@ -218,13 +258,13 @@ func TestAccFSxOntapVolume_securityStyle(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_securityStyle(rName, "NTFS"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "security_style", "NTFS"), ), @@ -232,8 +272,8 @@ func TestAccFSxOntapVolume_securityStyle(t *testing.T) { { Config: testAccONTAPVolumeConfig_securityStyle(rName, "MIXED"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume3), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume3), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume3), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume3), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "security_style", "MIXED"), ), @@ -242,7 +282,7 @@ func TestAccFSxOntapVolume_securityStyle(t *testing.T) { }) } -func TestAccFSxOntapVolume_size(t *testing.T) { +func TestAccFSxONTAPVolume_size(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -254,12 +294,12 @@ func TestAccFSxOntapVolume_size(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_size(rName, size1), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "size_in_megabytes", fmt.Sprint(size1)), ), @@ -268,13 +308,13 @@ func TestAccFSxOntapVolume_size(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_size(rName, size2), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "size_in_megabytes", fmt.Sprint(size2)), ), @@ -283,7 +323,126 @@ func TestAccFSxOntapVolume_size(t *testing.T) { }) } -func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { +func TestAccFSxONTAPVolume_snaplock(t *testing.T) { + ctx := acctest.Context(t) + var volume1 /*, volume2*/ fsx.Volume + resourceName := "aws_fsx_ontap_volume.test" + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPVolumeConfig_snaplockCreate(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "bypass_snaplock_enterprise_retention", "true"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.audit_log_volume", "false"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.value", "0"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "DISABLED"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.type", "YEARS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.value", "0"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.type", "YEARS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.value", "30"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.type", "YEARS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.value", "0"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.snaplock_type", "ENTERPRISE"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.volume_append_mode_enabled", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + }, + /* + See https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/how-snaplock-works.html#snaplock-audit-log-volume. + > The minimum retention period for a SnapLock audit log volume is six months. Until this retention period expires, the SnapLock audit log volume and the SVM and file system that are associated with it can't be deleted even if the volume was created in SnapLock Enterprise mode. + + { + Config: testAccONTAPVolumeConfig_snaplockUpdate(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "bypass_snaplock_enterprise_retention", "true"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.audit_log_volume", "true"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.type", "DAYS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.autocommit_period.0.value", "14"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.privileged_delete", "PERMANENTLY_DISABLED"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.type", "DAYS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.default_retention.0.value", "30"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.type", "MONTHS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.maximum_retention.0.value", "9"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.type", "HOURS"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.retention_period.0.minimum_retention.0.value", "24"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.snaplock_type", "ENTERPRISE"), + resource.TestCheckResourceAttr(resourceName, "snaplock_configuration.0.volume_append_mode_enabled", "true"), + ), + }, + */ + }, + }) +} + +func TestAccFSxONTAPVolume_snapshotPolicy(t *testing.T) { + ctx := acctest.Context(t) + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_ontap_volume.test" + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + policy1 := "default" + policy2 := "none" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPVolumeConfig_snapshotPolicy(rName, policy1), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "snapshot_policy", fmt.Sprint(policy1)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, + }, + { + Config: testAccONTAPVolumeConfig_snapshotPolicy(rName, policy2), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "snapshot_policy", fmt.Sprint(policy2)), + ), + }, + }, + }) +} + +func TestAccFSxONTAPVolume_storageEfficiency(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -293,12 +452,12 @@ func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_storageEfficiency(rName, true), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", "true"), ), @@ -307,13 +466,13 @@ func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_storageEfficiency(rName, false), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "storage_efficiency_enabled", "false"), ), @@ -322,7 +481,7 @@ func TestAccFSxOntapVolume_storageEfficiency(t *testing.T) { }) } -func TestAccFSxOntapVolume_tags(t *testing.T) { +func TestAccFSxONTAPVolume_tags(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2, volume3 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -332,12 +491,12 @@ func TestAccFSxOntapVolume_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -346,13 +505,13 @@ func TestAccFSxOntapVolume_tags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -361,8 +520,8 @@ func TestAccFSxOntapVolume_tags(t *testing.T) { { Config: testAccONTAPVolumeConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume3), - testAccCheckOntapVolumeNotRecreated(&volume2, &volume3), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume3), + testAccCheckONTAPVolumeNotRecreated(&volume2, &volume3), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -371,7 +530,7 @@ func TestAccFSxOntapVolume_tags(t *testing.T) { }) } -func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { +func TestAccFSxONTAPVolume_tieringPolicy(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2, volume3, volume4 fsx.Volume resourceName := "aws_fsx_ontap_volume.test" @@ -381,12 +540,12 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOntapVolumeDestroy(ctx), + CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName, "NONE"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume1), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.name", "NONE"), ), @@ -395,13 +554,13 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"skip_final_backup"}, + ImportStateVerifyIgnore: []string{"bypass_snaplock_enterprise_retention", "skip_final_backup"}, }, { Config: testAccONTAPVolumeConfig_tieringPolicy(rName, "SNAPSHOT_ONLY", 10), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume2), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume2), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume2), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.name", "SNAPSHOT_ONLY"), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.cooling_period", "10"), @@ -410,8 +569,8 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { { Config: testAccONTAPVolumeConfig_tieringPolicy(rName, "AUTO", 60), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume3), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume3), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume3), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume3), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.name", "AUTO"), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.cooling_period", "60"), @@ -420,8 +579,8 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { { Config: testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName, "ALL"), Check: resource.ComposeTestCheckFunc( - testAccCheckOntapVolumeExists(ctx, resourceName, &volume4), - testAccCheckOntapVolumeNotRecreated(&volume1, &volume4), + testAccCheckONTAPVolumeExists(ctx, resourceName, &volume4), + testAccCheckONTAPVolumeNotRecreated(&volume1, &volume4), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "tiering_policy.0.name", "ALL"), ), @@ -430,7 +589,7 @@ func TestAccFSxOntapVolume_tieringPolicy(t *testing.T) { }) } -func testAccCheckOntapVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -439,7 +598,7 @@ func testAccCheckOntapVolumeExists(ctx context.Context, n string, v *fsx.Volume) conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - output, err := tffsx.FindVolumeByID(ctx, conn, rs.Primary.ID) + output, err := tffsx.FindONTAPVolumeByID(ctx, conn, rs.Primary.ID) if err != nil { return err @@ -451,7 +610,7 @@ func testAccCheckOntapVolumeExists(ctx context.Context, n string, v *fsx.Volume) } } -func testAccCheckOntapVolumeDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckONTAPVolumeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) @@ -460,41 +619,43 @@ func testAccCheckOntapVolumeDestroy(ctx context.Context) resource.TestCheckFunc continue } - volume, err := tffsx.FindVolumeByID(ctx, conn, rs.Primary.ID) + _, err := tffsx.FindONTAPVolumeByID(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue } - if volume != nil { - return fmt.Errorf("FSx ONTAP Volume (%s) still exists", rs.Primary.ID) + if err != nil { + return err } + + return fmt.Errorf("FSx for NetApp ONTAP Volume %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckOntapVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.VolumeId) != aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx ONTAP Volume (%s) recreated", aws.StringValue(i.VolumeId)) + return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) recreated", aws.StringValue(i.VolumeId)) } return nil } } -func testAccCheckOntapVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.VolumeId) == aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx ONTAP Volume (%s) not recreated", aws.StringValue(i.VolumeId)) + return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) not recreated", aws.StringValue(i.VolumeId)) } return nil } } -func testAccOntapVolumeConfig_base(rName string) string { +func testAccONTAPVolumeConfig_base(rName string) string { return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` resource "aws_fsx_ontap_file_system" "test" { storage_capacity = 1024 @@ -516,7 +677,7 @@ resource "aws_fsx_ontap_storage_virtual_machine" "test" { } func testAccONTAPVolumeConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -527,8 +688,21 @@ resource "aws_fsx_ontap_volume" "test" { `, rName)) } -func testAccONTAPVolumeConfig_junctionPath(rName string, junctionPath string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` +func testAccONTAPVolumeConfig_copyTagsToBackups(rName string, copyTagsToBackups bool) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/%[1]s" + size_in_megabytes = 1024 + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id + copy_tags_to_backups = %[2]t +} +`, rName, copyTagsToBackups)) +} + +func testAccONTAPVolumeConfig_junctionPath(rName, junctionPath string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = %[2]q @@ -540,7 +714,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_ontapVolumeTypeDP(rName string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q ontap_volume_type = "DP" @@ -551,8 +725,8 @@ resource "aws_fsx_ontap_volume" "test" { `, rName)) } -func testAccONTAPVolumeConfig_securityStyle(rName string, securityStyle string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` +func testAccONTAPVolumeConfig_securityStyle(rName, securityStyle string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -565,7 +739,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_size(rName string, size int) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -576,8 +750,84 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, size)) } +func testAccONTAPVolumeConfig_snaplockCreate(rName string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/%[1]s" + size_in_megabytes = 1024 + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id + + snaplock_configuration { + snaplock_type = "ENTERPRISE" + } + + bypass_snaplock_enterprise_retention = true +} +`, rName)) +} + +/* +func testAccONTAPVolumeConfig_snaplockUpdate(rName string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/snaplock_audit_log" + size_in_megabytes = 1024 + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id + + snaplock_configuration { + audit_log_volume = true + privileged_delete = "PERMANENTLY_DISABLED" + snaplock_type = "ENTERPRISE" + volume_append_mode_enabled = true + + autocommit_period { + type = "DAYS" + value = 14 + } + + retention_period { + default_retention { + type = "DAYS" + value = 30 + } + + maximum_retention { + type = "MONTHS" + value = 9 + } + + minimum_retention { + type = "HOURS" + value = 24 + } + } + } + + bypass_snaplock_enterprise_retention = true +} +`, rName)) +} +*/ + +func testAccONTAPVolumeConfig_snapshotPolicy(rName, snapshotPolicy string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_volume" "test" { + name = %[1]q + junction_path = "/%[1]s" + size_in_megabytes = 1024 + snapshot_policy = %[2]q + storage_efficiency_enabled = true + storage_virtual_machine_id = aws_fsx_ontap_storage_virtual_machine.test.id +} +`, rName, snapshotPolicy)) +} + func testAccONTAPVolumeConfig_storageEfficiency(rName string, storageEfficiencyEnabled bool) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -588,8 +838,8 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, storageEfficiencyEnabled)) } -func testAccONTAPVolumeConfig_tieringPolicy(rName string, policy string, coolingPeriod int) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` +func testAccONTAPVolumeConfig_tieringPolicy(rName, policy string, coolingPeriod int) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -605,8 +855,8 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, policy, coolingPeriod)) } -func testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName string, policy string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` +func testAccONTAPVolumeConfig_tieringPolicyNoCooling(rName, policy string) string { + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -622,7 +872,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" @@ -638,7 +888,7 @@ resource "aws_fsx_ontap_volume" "test" { } func testAccONTAPVolumeConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccOntapVolumeConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccONTAPVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_volume" "test" { name = %[1]q junction_path = "/%[1]s" diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index f2a453c370c..c02c32af978 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -386,8 +385,8 @@ func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("disk_iops_configuration"); ok { - inputC.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(v.([]interface{})) - inputB.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(v.([]interface{})) + inputC.OpenZFSConfiguration.DiskIopsConfiguration = expandDiskIopsConfiguration(v.([]interface{})) + inputB.OpenZFSConfiguration.DiskIopsConfiguration = expandDiskIopsConfiguration(v.([]interface{})) } if v, ok := d.GetOk("endpoint_ip_address_range"); ok { @@ -406,8 +405,8 @@ func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("root_volume_configuration"); ok { - inputC.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSRootVolumeConfiguration(v.([]interface{})) - inputB.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSRootVolumeConfiguration(v.([]interface{})) + inputC.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSCreateRootVolumeConfiguration(v.([]interface{})) + inputB.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSCreateRootVolumeConfiguration(v.([]interface{})) } if v, ok := d.GetOk("route_table_ids"); ok { @@ -482,7 +481,7 @@ func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, d.Set("copy_tags_to_volumes", openZFSConfig.CopyTagsToVolumes) d.Set("daily_automatic_backup_start_time", openZFSConfig.DailyAutomaticBackupStartTime) d.Set("deployment_type", openZFSConfig.DeploymentType) - if err := d.Set("disk_iops_configuration", flattenOpenZFSFileDiskIopsConfiguration(openZFSConfig.DiskIopsConfiguration)); err != nil { + if err := d.Set("disk_iops_configuration", flattenDiskIopsConfiguration(openZFSConfig.DiskIopsConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting disk_iops_configuration: %s", err) } d.Set("dns_name", filesystem.DNSName) @@ -503,13 +502,13 @@ func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, setTagsOut(ctx, filesystem.Tags) - rootVolume, err := FindVolumeByID(ctx, conn, rootVolumeID) + rootVolume, err := FindOpenZFSVolumeByID(ctx, conn, rootVolumeID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading FSx for OpenZFS File System (%s) root volume (%s): %s", d.Id(), rootVolumeID, err) } - if err := d.Set("root_volume_configuration", flattenOpenZFSRootVolumeConfiguration(rootVolume)); err != nil { + if err := d.Set("root_volume_configuration", flattenOpenZFSFileSystemRootVolume(rootVolume)); err != nil { return sdkdiag.AppendErrorf(diags, "setting root_volume_configuration: %s", err) } @@ -544,7 +543,7 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("disk_iops_configuration") { - input.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(d.Get("disk_iops_configuration").([]interface{})) + input.OpenZFSConfiguration.DiskIopsConfiguration = expandDiskIopsConfiguration(d.Get("disk_iops_configuration").([]interface{})) } if d.HasChange("route_table_ids") { @@ -583,7 +582,7 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } @@ -591,22 +590,23 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData rootVolumeID := d.Get("root_volume_id").(string) input := &fsx.UpdateVolumeInput{ ClientRequestToken: aws.String(id.UniqueId()), - OpenZFSConfiguration: expandOpenZFSUpdateRootVolumeConfiguration(d.Get("root_volume_configuration").([]interface{})), + OpenZFSConfiguration: expandUpdateOpenZFSVolumeConfiguration(d.Get("root_volume_configuration").([]interface{})), VolumeId: aws.String(rootVolumeID), } + startTime := time.Now() _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS Root Volume (%s): %s", rootVolumeID, err) } - if _, err := waitVolumeUpdated(ctx, conn, rootVolumeID, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitVolumeUpdated(ctx, conn, rootVolumeID, startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Root Volume (%s) update: %s", rootVolumeID, err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, rootVolumeID, fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) administrative action (%s) complete: %s", rootVolumeID, fsx.AdministrativeActionTypeVolumeUpdate, err) } } } @@ -638,7 +638,7 @@ func resourceOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData return diags } -func expandOpenZFSFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { +func expandDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { if len(cfg) < 1 { return nil } @@ -658,7 +658,7 @@ func expandOpenZFSFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConf return &out } -func expandOpenZFSRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateRootVolumeConfiguration { +func expandOpenZFSCreateRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateRootVolumeConfiguration { if len(cfg) < 1 { return nil } @@ -684,17 +684,17 @@ func expandOpenZFSRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateR } if v, ok := conf["user_and_group_quotas"]; ok { - out.UserAndGroupQuotas = expandOpenZFSUserAndGroupQuotas(v.(*schema.Set).List()) + out.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List()) } if v, ok := conf["nfs_exports"].([]interface{}); ok { - out.NfsExports = expandOpenZFSNFSExports(v) + out.NfsExports = expandOpenZFSNfsExports(v) } return &out } -func expandOpenZFSUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZFSVolumeConfiguration { +func expandUpdateOpenZFSVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZFSVolumeConfiguration { if len(cfg) < 1 { return nil } @@ -716,102 +716,17 @@ func expandOpenZFSUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.UpdateOp } if v, ok := conf["user_and_group_quotas"]; ok { - out.UserAndGroupQuotas = expandOpenZFSUserAndGroupQuotas(v.(*schema.Set).List()) + out.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List()) } if v, ok := conf["nfs_exports"].([]interface{}); ok { - out.NfsExports = expandOpenZFSNFSExports(v) + out.NfsExports = expandOpenZFSNfsExports(v) } return &out } -func expandOpenZFSUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { - quotas := []*fsx.OpenZFSUserOrGroupQuota{} - - for _, quota := range cfg { - expandedQuota := expandOpenZFSUserAndGroupQuota(quota.(map[string]interface{})) - if expandedQuota != nil { - quotas = append(quotas, expandedQuota) - } - } - - return quotas -} - -func expandOpenZFSUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { - if len(conf) < 1 { - return nil - } - - out := fsx.OpenZFSUserOrGroupQuota{} - - if v, ok := conf["id"].(int); ok { - out.Id = aws.Int64(int64(v)) - } - - if v, ok := conf["storage_capacity_quota_gib"].(int); ok { - out.StorageCapacityQuotaGiB = aws.Int64(int64(v)) - } - - if v, ok := conf["type"].(string); ok { - out.Type = aws.String(v) - } - - return &out -} - -func expandOpenZFSNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { - exports := []*fsx.OpenZFSNfsExport{} - - for _, export := range cfg { - expandedExport := expandOpenZFSNFSExport(export.(map[string]interface{})) - if expandedExport != nil { - exports = append(exports, expandedExport) - } - } - - return exports -} - -func expandOpenZFSNFSExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { - out := fsx.OpenZFSNfsExport{} - - if v, ok := cfg["client_configurations"]; ok { - out.ClientConfigurations = expandOpenZFSClinetConfigurations(v.(*schema.Set).List()) - } - - return &out -} - -func expandOpenZFSClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { - configurations := []*fsx.OpenZFSClientConfiguration{} - - for _, configuration := range cfg { - expandedConfiguration := expandOpenZFSClientConfiguration(configuration.(map[string]interface{})) - if expandedConfiguration != nil { - configurations = append(configurations, expandedConfiguration) - } - } - - return configurations -} - -func expandOpenZFSClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { - out := fsx.OpenZFSClientConfiguration{} - - if v, ok := conf["clients"].(string); ok && len(v) > 0 { - out.Clients = aws.String(v) - } - - if v, ok := conf["options"].([]interface{}); ok { - out.Options = flex.ExpandStringList(v) - } - - return &out -} - -func flattenOpenZFSFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { +func flattenDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { if rs == nil { return []interface{}{} } @@ -827,7 +742,7 @@ func flattenOpenZFSFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []in return []interface{}{m} } -func flattenOpenZFSRootVolumeConfiguration(rs *fsx.Volume) []interface{} { +func flattenOpenZFSFileSystemRootVolume(rs *fsx.Volume) []interface{} { if rs == nil { return []interface{}{} } @@ -840,7 +755,7 @@ func flattenOpenZFSRootVolumeConfiguration(rs *fsx.Volume) []interface{} { m["data_compression_type"] = aws.StringValue(rs.OpenZFSConfiguration.DataCompressionType) } if rs.OpenZFSConfiguration.NfsExports != nil { - m["nfs_exports"] = flattenOpenZFSFileNFSExports(rs.OpenZFSConfiguration.NfsExports) + m["nfs_exports"] = flattenOpenZFSNfsExports(rs.OpenZFSConfiguration.NfsExports) } if rs.OpenZFSConfiguration.ReadOnly != nil { m["read_only"] = aws.BoolValue(rs.OpenZFSConfiguration.ReadOnly) @@ -849,69 +764,12 @@ func flattenOpenZFSRootVolumeConfiguration(rs *fsx.Volume) []interface{} { m["record_size_kib"] = aws.Int64Value(rs.OpenZFSConfiguration.RecordSizeKiB) } if rs.OpenZFSConfiguration.UserAndGroupQuotas != nil { - m["user_and_group_quotas"] = flattenOpenZFSFileUserAndGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) + m["user_and_group_quotas"] = flattenOpenZFSUserOrGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) } return []interface{}{m} } -func flattenOpenZFSFileNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { - exports := make([]map[string]interface{}, 0) - - for _, export := range rs { - if export != nil { - cfg := make(map[string]interface{}) - cfg["client_configurations"] = flattenOpenZFSClientConfigurations(export.ClientConfigurations) - exports = append(exports, cfg) - } - } - - if len(exports) > 0 { - return exports - } - - return nil -} - -func flattenOpenZFSClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { - configurations := make([]map[string]interface{}, 0) - - for _, configuration := range rs { - if configuration != nil { - cfg := make(map[string]interface{}) - cfg["clients"] = aws.StringValue(configuration.Clients) - cfg["options"] = flex.FlattenStringList(configuration.Options) - configurations = append(configurations, cfg) - } - } - - if len(configurations) > 0 { - return configurations - } - - return nil -} - -func flattenOpenZFSFileUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { - quotas := make([]map[string]interface{}, 0) - - for _, quota := range rs { - if quota != nil { - cfg := make(map[string]interface{}) - cfg["id"] = aws.Int64Value(quota.Id) - cfg["storage_capacity_quota_gib"] = aws.Int64Value(quota.StorageCapacityQuotaGiB) - cfg["type"] = aws.StringValue(quota.Type) - quotas = append(quotas, cfg) - } - } - - if len(quotas) > 0 { - return quotas - } - - return nil -} - func FindOpenZFSFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeOpenzfs) @@ -925,52 +783,3 @@ func FindOpenZFSFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (* return output, nil } - -func FindVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { - input := &fsx.DescribeVolumesInput{ - VolumeIds: aws.StringSlice([]string{id}), - } - - return findVolume(ctx, conn, input) -} - -func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) (*fsx.Volume, error) { - output, err := findVolumes(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) ([]*fsx.Volume, error) { - var output []*fsx.Volume - - err := conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Volumes { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - return output, nil -} diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index 11691f108f8..e71216f4390 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -27,14 +27,19 @@ import ( // @SDKResource("aws_fsx_openzfs_volume", name="OpenZFS Volume") // @Tags(identifierAttribute="arn") -func ResourceOpenzfsVolume() *schema.Resource { +func ResourceOpenZFSVolume() *schema.Resource { return &schema.Resource{ - CreateWithoutTimeout: resourceOpenzfsVolumeCreate, - ReadWithoutTimeout: resourceOpenzfsVolumeRead, - UpdateWithoutTimeout: resourceOpenzfsVolumeUpdate, - DeleteWithoutTimeout: resourceOpenzfsVolumeDelete, + CreateWithoutTimeout: resourceOpenZFSVolumeCreate, + ReadWithoutTimeout: resourceOpenZFSVolumeRead, + UpdateWithoutTimeout: resourceOpenZFSVolumeUpdate, + DeleteWithoutTimeout: resourceOpenZFSVolumeDelete, + Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("delete_volume_options", nil) + + return []*schema.ResourceData{d}, nil + }, }, Timeouts: &schema.ResourceTimeout{ @@ -59,6 +64,15 @@ func ResourceOpenzfsVolume() *schema.Resource { Default: "NONE", ValidateFunc: validation.StringInSlice(fsx.OpenZFSDataCompressionType_Values(), false), }, + "delete_volume_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(fsx.DeleteFileSystemOpenZFSOption_Values(), false), + }, + }, "name": { Type: schema.TypeString, Required: true, @@ -185,6 +199,7 @@ func ResourceOpenzfsVolume() *schema.Resource { Type: schema.TypeString, Default: fsx.VolumeTypeOpenzfs, Optional: true, + ForceNew: true, ValidateFunc: validation.StringInSlice(fsx.VolumeType_Values(), false), }, }, @@ -193,218 +208,216 @@ func ResourceOpenzfsVolume() *schema.Resource { } } -func resourceOpenzfsVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - input := &fsx.CreateVolumeInput{ - ClientRequestToken: aws.String(id.UniqueId()), - Name: aws.String(d.Get("name").(string)), - VolumeType: aws.String(d.Get("volume_type").(string)), - OpenZFSConfiguration: &fsx.CreateOpenZFSVolumeConfiguration{ - ParentVolumeId: aws.String(d.Get("parent_volume_id").(string)), - }, - Tags: getTagsIn(ctx), + openzfsConfig := &fsx.CreateOpenZFSVolumeConfiguration{ + ParentVolumeId: aws.String(d.Get("parent_volume_id").(string)), } if v, ok := d.GetOk("copy_tags_to_snapshots"); ok { - input.OpenZFSConfiguration.CopyTagsToSnapshots = aws.Bool(v.(bool)) + openzfsConfig.CopyTagsToSnapshots = aws.Bool(v.(bool)) } if v, ok := d.GetOk("data_compression_type"); ok { - input.OpenZFSConfiguration.DataCompressionType = aws.String(v.(string)) + openzfsConfig.DataCompressionType = aws.String(v.(string)) } if v, ok := d.GetOk("nfs_exports"); ok { - input.OpenZFSConfiguration.NfsExports = expandOpenzfsVolumeNFSExports(v.([]interface{})) + openzfsConfig.NfsExports = expandOpenZFSNfsExports(v.([]interface{})) + } + + if v, ok := d.GetOk("origin_snapshot"); ok { + openzfsConfig.OriginSnapshot = expandCreateOpenZFSOriginSnapshotConfiguration(v.([]interface{})) } if v, ok := d.GetOk("read_only"); ok { - input.OpenZFSConfiguration.ReadOnly = aws.Bool(v.(bool)) + openzfsConfig.ReadOnly = aws.Bool(v.(bool)) } if v, ok := d.GetOk("record_size_kib"); ok { - input.OpenZFSConfiguration.RecordSizeKiB = aws.Int64(int64(v.(int))) + openzfsConfig.RecordSizeKiB = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("storage_capacity_quota_gib"); ok { - input.OpenZFSConfiguration.StorageCapacityQuotaGiB = aws.Int64(int64(v.(int))) + openzfsConfig.StorageCapacityQuotaGiB = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("storage_capacity_reservation_gib"); ok { - input.OpenZFSConfiguration.StorageCapacityReservationGiB = aws.Int64(int64(v.(int))) + openzfsConfig.StorageCapacityReservationGiB = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("user_and_group_quotas"); ok { - input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenzfsVolumeUserAndGroupQuotas(v.(*schema.Set).List()) + openzfsConfig.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(v.(*schema.Set).List()) } - if v, ok := d.GetOk("origin_snapshot"); ok { - input.OpenZFSConfiguration.OriginSnapshot = expandOpenzfsCreateVolumeOriginSnapshot(v.([]interface{})) - - log.Printf("[DEBUG] Creating FSx OpenZFS Volume: %s", input) - result, err := conn.CreateVolumeWithContext(ctx, input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx OpenZFS Volume from snapshot: %s", err) - } - - d.SetId(aws.StringValue(result.Volume.VolumeId)) - } else { - log.Printf("[DEBUG] Creating FSx OpenZFS Volume: %s", input) - result, err := conn.CreateVolumeWithContext(ctx, input) + name := d.Get("name").(string) + input := &fsx.CreateVolumeInput{ + ClientRequestToken: aws.String(id.UniqueId()), + Name: aws.String(name), + OpenZFSConfiguration: openzfsConfig, + Tags: getTagsIn(ctx), + VolumeType: aws.String(d.Get("volume_type").(string)), + } - if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx OpenZFS Volume: %s", err) - } + output, err := conn.CreateVolumeWithContext(ctx, input) - d.SetId(aws.StringValue(result.Volume.VolumeId)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating FSx for OpenZFS Volume (%s): %s", name, err) } + d.SetId(aws.StringValue(output.Volume.VolumeId)) + if _, err := waitVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Volume(%s) create: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) create: %s", d.Id(), err) } - return append(diags, resourceOpenzfsVolumeRead(ctx, d, meta)...) + return append(diags, resourceOpenZFSVolumeRead(ctx, d, meta)...) } -func resourceOpenzfsVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - volume, err := FindVolumeByID(ctx, conn, d.Id()) + volume, err := FindOpenZFSVolumeByID(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] FSx OpenZFS volume (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] FSx for OpenZFS Volume (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading FSx OpenZFS Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading FSx for OpenZFS Volume (%s): %s", d.Id(), err) } openzfsConfig := volume.OpenZFSConfiguration - if volume.OntapConfiguration != nil { - return sdkdiag.AppendErrorf(diags, "expected FSx OpeZFS Volume, found FSx ONTAP Volume: %s", d.Id()) - } - - if openzfsConfig == nil { - return sdkdiag.AppendErrorf(diags, "describing FSx OpenZFS Volume (%s): empty Openzfs configuration", d.Id()) - } - d.Set("arn", volume.ResourceARN) d.Set("copy_tags_to_snapshots", openzfsConfig.CopyTagsToSnapshots) d.Set("data_compression_type", openzfsConfig.DataCompressionType) d.Set("name", volume.Name) + if err := d.Set("nfs_exports", flattenOpenZFSNfsExports(openzfsConfig.NfsExports)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting nfs_exports: %s", err) + } + if err := d.Set("origin_snapshot", flattenOpenZFSOriginSnapshotConfiguration(openzfsConfig.OriginSnapshot)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting nfs_exports: %s", err) + } d.Set("parent_volume_id", openzfsConfig.ParentVolumeId) d.Set("read_only", openzfsConfig.ReadOnly) d.Set("record_size_kib", openzfsConfig.RecordSizeKiB) d.Set("storage_capacity_quota_gib", openzfsConfig.StorageCapacityQuotaGiB) d.Set("storage_capacity_reservation_gib", openzfsConfig.StorageCapacityReservationGiB) - d.Set("volume_type", volume.VolumeType) - - if err := d.Set("origin_snapshot", flattenOpenzfsVolumeOriginSnapshot(openzfsConfig.OriginSnapshot)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting nfs_exports: %s", err) - } - - if err := d.Set("nfs_exports", flattenOpenzfsVolumeNFSExports(openzfsConfig.NfsExports)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting nfs_exports: %s", err) - } - - if err := d.Set("user_and_group_quotas", flattenOpenzfsVolumeUserAndGroupQuotas(openzfsConfig.UserAndGroupQuotas)); err != nil { + if err := d.Set("user_and_group_quotas", flattenOpenZFSUserOrGroupQuotas(openzfsConfig.UserAndGroupQuotas)); err != nil { return sdkdiag.AppendErrorf(diags, "setting user_and_group_quotas: %s", err) } + d.Set("volume_type", volume.VolumeType) return diags } -func resourceOpenzfsVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept("tags_all", "tags") { - input := &fsx.UpdateVolumeInput{ - ClientRequestToken: aws.String(id.UniqueId()), - VolumeId: aws.String(d.Id()), - OpenZFSConfiguration: &fsx.UpdateOpenZFSVolumeConfiguration{}, - } + if d.HasChangesExcept("tags", "tags_all") { + openzfsConfig := &fsx.UpdateOpenZFSVolumeConfiguration{} if d.HasChange("data_compression_type") { - input.OpenZFSConfiguration.DataCompressionType = aws.String(d.Get("data_compression_type").(string)) - } - - if d.HasChange("name") { - input.Name = aws.String(d.Get("name").(string)) + openzfsConfig.DataCompressionType = aws.String(d.Get("data_compression_type").(string)) } if d.HasChange("nfs_exports") { - input.OpenZFSConfiguration.NfsExports = expandOpenzfsVolumeNFSExports(d.Get("nfs_exports").([]interface{})) + openzfsConfig.NfsExports = expandOpenZFSNfsExports(d.Get("nfs_exports").([]interface{})) } if d.HasChange("read_only") { - input.OpenZFSConfiguration.ReadOnly = aws.Bool(d.Get("read_only").(bool)) + openzfsConfig.ReadOnly = aws.Bool(d.Get("read_only").(bool)) } if d.HasChange("record_size_kib") { - input.OpenZFSConfiguration.RecordSizeKiB = aws.Int64(int64(d.Get("record_size_kib").(int))) + openzfsConfig.RecordSizeKiB = aws.Int64(int64(d.Get("record_size_kib").(int))) } if d.HasChange("storage_capacity_quota_gib") { - input.OpenZFSConfiguration.StorageCapacityQuotaGiB = aws.Int64(int64(d.Get("storage_capacity_quota_gib").(int))) + openzfsConfig.StorageCapacityQuotaGiB = aws.Int64(int64(d.Get("storage_capacity_quota_gib").(int))) } if d.HasChange("storage_capacity_reservation_gib") { - input.OpenZFSConfiguration.StorageCapacityReservationGiB = aws.Int64(int64(d.Get("storage_capacity_reservation_gib").(int))) + openzfsConfig.StorageCapacityReservationGiB = aws.Int64(int64(d.Get("storage_capacity_reservation_gib").(int))) } if d.HasChange("user_and_group_quotas") { - input.OpenZFSConfiguration.UserAndGroupQuotas = expandOpenzfsVolumeUserAndGroupQuotas(d.Get("user_and_group_quotas").(*schema.Set).List()) + openzfsConfig.UserAndGroupQuotas = expandOpenZFSUserOrGroupQuotas(d.Get("user_and_group_quotas").(*schema.Set).List()) } + input := &fsx.UpdateVolumeInput{ + ClientRequestToken: aws.String(id.UniqueId()), + OpenZFSConfiguration: openzfsConfig, + VolumeId: aws.String(d.Id()), + } + + if d.HasChange("name") { + input.Name = aws.String(d.Get("name").(string)) + } + + startTime := time.Now() _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating FSx OpenZFS Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS Volume (%s): %s", d.Id(), err) + } + + if _, err := waitVolumeUpdated(ctx, conn, d.Id(), startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) update: %s", d.Id(), err) } - if _, err := waitVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Volume (%s) update: %s", d.Id(), err) + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) } } - return append(diags, resourceOpenzfsVolumeRead(ctx, d, meta)...) + return append(diags, resourceOpenZFSVolumeRead(ctx, d, meta)...) } -func resourceOpenzfsVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - log.Printf("[DEBUG] Deleting FSx OpenZFS Volume: %s", d.Id()) - _, err := conn.DeleteVolumeWithContext(ctx, &fsx.DeleteVolumeInput{ + input := &fsx.DeleteVolumeInput{ VolumeId: aws.String(d.Id()), - }) + } + + if v, ok := d.GetOk("delete_volume_options"); ok && len(v.([]interface{})) > 0 { + input.OpenZFSConfiguration = &fsx.DeleteVolumeOpenZFSConfiguration{ + Options: flex.ExpandStringList(v.([]interface{})), + } + } + + log.Printf("[DEBUG] Deleting FSx for OpenZFS Volume: %s", d.Id()) + _, err := conn.DeleteVolumeWithContext(ctx, input) if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting FSx OpenZFS Volume (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting FSx for OpenZFS Volume (%s): %s", d.Id(), err) } if _, err := waitVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Volume (%s) delete: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) delete: %s", d.Id(), err) } return diags } -func expandOpenzfsVolumeUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { +func expandOpenZFSUserOrGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { quotas := []*fsx.OpenZFSUserOrGroupQuota{} for _, quota := range cfg { - expandedQuota := expandOpenzfsVolumeUserAndGroupQuota(quota.(map[string]interface{})) + expandedQuota := expandOpenZFSUserOrGroupQuota(quota.(map[string]interface{})) if expandedQuota != nil { quotas = append(quotas, expandedQuota) } @@ -413,7 +426,7 @@ func expandOpenzfsVolumeUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUser return quotas } -func expandOpenzfsVolumeUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { +func expandOpenZFSUserOrGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { if len(conf) < 1 { return nil } @@ -435,11 +448,11 @@ func expandOpenzfsVolumeUserAndGroupQuota(conf map[string]interface{}) *fsx.Open return &out } -func expandOpenzfsVolumeNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { +func expandOpenZFSNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name exports := []*fsx.OpenZFSNfsExport{} for _, export := range cfg { - expandedExport := expandOpenzfsVolumeNFSExport(export.(map[string]interface{})) + expandedExport := expandOpenZFSNfsExport(export.(map[string]interface{})) if expandedExport != nil { exports = append(exports, expandedExport) } @@ -448,21 +461,21 @@ func expandOpenzfsVolumeNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { return exports } -func expandOpenzfsVolumeNFSExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { +func expandOpenZFSNfsExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name out := fsx.OpenZFSNfsExport{} if v, ok := cfg["client_configurations"]; ok { - out.ClientConfigurations = expandOpenzfsVolumeClinetConfigurations(v.(*schema.Set).List()) + out.ClientConfigurations = expandOpenZFSClientConfigurations(v.(*schema.Set).List()) } return &out } -func expandOpenzfsVolumeClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { +func expandOpenZFSClientConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { configurations := []*fsx.OpenZFSClientConfiguration{} for _, configuration := range cfg { - expandedConfiguration := expandOpenzfsVolumeClientConfiguration(configuration.(map[string]interface{})) + expandedConfiguration := expandOpenZFSClientConfiguration(configuration.(map[string]interface{})) if expandedConfiguration != nil { configurations = append(configurations, expandedConfiguration) } @@ -471,7 +484,7 @@ func expandOpenzfsVolumeClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSCl return configurations } -func expandOpenzfsVolumeClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { +func expandOpenZFSClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { out := fsx.OpenZFSClientConfiguration{} if v, ok := conf["clients"].(string); ok && len(v) > 0 { @@ -485,7 +498,7 @@ func expandOpenzfsVolumeClientConfiguration(conf map[string]interface{}) *fsx.Op return &out } -func expandOpenzfsCreateVolumeOriginSnapshot(cfg []interface{}) *fsx.CreateOpenZFSOriginSnapshotConfiguration { +func expandCreateOpenZFSOriginSnapshotConfiguration(cfg []interface{}) *fsx.CreateOpenZFSOriginSnapshotConfiguration { if len(cfg) < 1 { return nil } @@ -505,13 +518,13 @@ func expandOpenzfsCreateVolumeOriginSnapshot(cfg []interface{}) *fsx.CreateOpenZ return &out } -func flattenOpenzfsVolumeNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { +func flattenOpenZFSNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { // nosemgrep:ci.caps4-in-func-name exports := make([]map[string]interface{}, 0) for _, export := range rs { if export != nil { cfg := make(map[string]interface{}) - cfg["client_configurations"] = flattenOpenzfsVolumeClientConfigurations(export.ClientConfigurations) + cfg["client_configurations"] = flattenOpenZFSClientConfigurations(export.ClientConfigurations) exports = append(exports, cfg) } } @@ -523,7 +536,7 @@ func flattenOpenzfsVolumeNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]int return nil } -func flattenOpenzfsVolumeClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { +func flattenOpenZFSClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { configurations := make([]map[string]interface{}, 0) for _, configuration := range rs { @@ -542,7 +555,7 @@ func flattenOpenzfsVolumeClientConfigurations(rs []*fsx.OpenZFSClientConfigurati return nil } -func flattenOpenzfsVolumeUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { +func flattenOpenZFSUserOrGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { quotas := make([]map[string]interface{}, 0) for _, quota := range rs { @@ -562,7 +575,7 @@ func flattenOpenzfsVolumeUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) [ return nil } -func flattenOpenzfsVolumeOriginSnapshot(rs *fsx.OpenZFSOriginSnapshotConfiguration) []interface{} { +func flattenOpenZFSOriginSnapshotConfiguration(rs *fsx.OpenZFSOriginSnapshotConfiguration) []interface{} { if rs == nil { return []interface{}{} } @@ -577,3 +590,17 @@ func flattenOpenzfsVolumeOriginSnapshot(rs *fsx.OpenZFSOriginSnapshotConfigurati return []interface{}{m} } + +func FindOpenZFSVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { + output, err := findVolumeByIDAndType(ctx, conn, id, fsx.VolumeTypeOpenzfs) + + if err != nil { + return nil, err + } + + if output.OpenZFSConfiguration == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output, nil +} diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go index 0a4a4006843..cce5ec5cf00 100644 --- a/internal/service/fsx/openzfs_volume_test.go +++ b/internal/service/fsx/openzfs_volume_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func TestAccFSxOpenzfsVolume_basic(t *testing.T) { +func TestAccFSxOpenZFSVolume_basic(t *testing.T) { ctx := acctest.Context(t) var volume fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -30,15 +30,16 @@ func TestAccFSxOpenzfsVolume_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), resource.TestCheckResourceAttr(resourceName, "data_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.#", "0"), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "1"), @@ -60,10 +61,33 @@ func TestAccFSxOpenzfsVolume_basic(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_parentVolume(t *testing.T) { +func TestAccFSxOpenZFSVolume_disappears(t *testing.T) { + ctx := acctest.Context(t) + var volume fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccOpenZFSVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceOpenZFSVolume(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccFSxOpenZFSVolume_parentVolume(t *testing.T) { ctx := acctest.Context(t) var volume, volume2 fsx.Volume - var volumeId string resourceName := "aws_fsx_openzfs_volume.test" resourceName2 := "aws_fsx_openzfs_volume.test2" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -73,17 +97,16 @@ func TestAccFSxOpenzfsVolume_parentVolume(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_parent(rName, rName2), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume), - testAccCheckOpenzfsVolumeExists(ctx, resourceName2, &volume2), - testAccCheckOpenzfsVolumeGetID(resourceName, &volumeId), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume), + testAccCheckOpenZFSVolumeExists(ctx, resourceName2, &volume2), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), acctest.MatchResourceAttrRegionalARN(resourceName2, "arn", "fsx", regexache.MustCompile(`volume/fs-.+/fsvol-.+`)), - resource.TestCheckResourceAttrPtr(resourceName2, "parent_volume_id", &volumeId), + resource.TestCheckResourceAttrPair(resourceName2, "parent_volume_id", resourceName, "id"), ), }, { @@ -95,7 +118,7 @@ func TestAccFSxOpenzfsVolume_parentVolume(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_tags(t *testing.T) { +func TestAccFSxOpenZFSVolume_tags(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2, volume3 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -105,12 +128,12 @@ func TestAccFSxOpenzfsVolume_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -123,8 +146,8 @@ func TestAccFSxOpenzfsVolume_tags(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -133,8 +156,8 @@ func TestAccFSxOpenzfsVolume_tags(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume3), - testAccCheckOpenzfsVolumeNotRecreated(&volume2, &volume3), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume3), + testAccCheckOpenZFSVolumeNotRecreated(&volume2, &volume3), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -143,7 +166,7 @@ func TestAccFSxOpenzfsVolume_tags(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_copyTags(t *testing.T) { +func TestAccFSxOpenZFSVolume_copyTags(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -153,36 +176,44 @@ func TestAccFSxOpenzfsVolume_copyTags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_copyTags(rName, "key1", "value1", "true"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "true"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.0", "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), - resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "true"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "delete_volume_options", + }, }, { Config: testAccOpenZFSVolumeConfig_copyTags(rName, "key1", "value1", "false"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeRecreated(&volume1, &volume2), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "delete_volume_options.0", "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), }, }, }) } -func TestAccFSxOpenzfsVolume_name(t *testing.T) { +func TestAccFSxOpenZFSVolume_name(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -193,12 +224,12 @@ func TestAccFSxOpenzfsVolume_name(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "name", rName), ), }, @@ -210,8 +241,8 @@ func TestAccFSxOpenzfsVolume_name(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_basic(rName2), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "name", rName2), ), }, @@ -219,7 +250,7 @@ func TestAccFSxOpenzfsVolume_name(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { +func TestAccFSxOpenZFSVolume_dataCompressionType(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -229,12 +260,12 @@ func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_dataCompression(rName, "ZSTD"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "data_compression_type", "ZSTD"), ), }, @@ -246,8 +277,8 @@ func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_dataCompression(rName, "NONE"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "data_compression_type", "NONE"), ), }, @@ -255,7 +286,7 @@ func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { +func TestAccFSxOpenZFSVolume_readOnly(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -265,12 +296,12 @@ func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_readOnly(rName, "false"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "read_only", "false"), ), }, @@ -282,8 +313,8 @@ func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_readOnly(rName, "true"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "read_only", "true"), ), }, @@ -291,7 +322,7 @@ func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_recordSizeKib(t *testing.T) { +func TestAccFSxOpenZFSVolume_recordSizeKib(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -301,12 +332,12 @@ func TestAccFSxOpenzfsVolume_recordSizeKib(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_recordSizeKib(rName, 8), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "record_size_kib", "8"), ), }, @@ -318,8 +349,8 @@ func TestAccFSxOpenzfsVolume_recordSizeKib(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_recordSizeKib(rName, 1024), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "record_size_kib", "1024"), ), }, @@ -327,7 +358,7 @@ func TestAccFSxOpenzfsVolume_recordSizeKib(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { +func TestAccFSxOpenZFSVolume_storageCapacity(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -337,12 +368,12 @@ func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_storageCapacity(rName, 30, 20), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "storage_capacity_quota_gib", "30"), resource.TestCheckResourceAttr(resourceName, "storage_capacity_reservation_gib", "20"), ), @@ -355,8 +386,8 @@ func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_storageCapacity(rName, 40, 30), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "storage_capacity_quota_gib", "40"), resource.TestCheckResourceAttr(resourceName, "storage_capacity_reservation_gib", "30"), ), @@ -365,7 +396,7 @@ func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { +func TestAccFSxOpenZFSVolume_nfsExports(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -375,12 +406,12 @@ func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_nfsExports1(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "1"), resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.clients", "10.0.1.0/24"), @@ -397,8 +428,8 @@ func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_nfsExports2(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "nfs_exports.0.client_configurations.*", map[string]string{ @@ -417,7 +448,7 @@ func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { }) } -func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { +func TestAccFSxOpenZFSVolume_userAndGroupQuotas(t *testing.T) { ctx := acctest.Context(t) var volume1, volume2 fsx.Volume resourceName := "aws_fsx_openzfs_volume.test" @@ -427,12 +458,12 @@ func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsVolumeDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSVolumeConfig_userAndGroupQuotas1(rName, 256), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume1), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ "id": "10", @@ -449,8 +480,8 @@ func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { { Config: testAccOpenZFSVolumeConfig_userAndGroupQuotas2(rName, 128, 1024), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsVolumeExists(ctx, resourceName, &volume2), - testAccCheckOpenzfsVolumeNotRecreated(&volume1, &volume2), + testAccCheckOpenZFSVolumeExists(ctx, resourceName, &volume2), + testAccCheckOpenZFSVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "4"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ "id": "10", @@ -478,31 +509,28 @@ func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { }) } -func testAccCheckOpenzfsVolumeExists(ctx context.Context, resourceName string, volume *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - volume1, err := tffsx.FindVolumeByID(ctx, conn, rs.Primary.ID) + output, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if volume == nil { - return fmt.Errorf("FSx OpenZFS Volume (%s) not found", rs.Primary.ID) - } - - *volume = *volume1 + *v = *output return nil } } -func testAccCheckOpenzfsVolumeDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) @@ -511,85 +539,59 @@ func testAccCheckOpenzfsVolumeDestroy(ctx context.Context) resource.TestCheckFun continue } - volume, err := tffsx.FindVolumeByID(ctx, conn, rs.Primary.ID) + _, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { continue } - if volume != nil { - return fmt.Errorf("FSx OpenZFS Volume (%s) still exists", rs.Primary.ID) + if err != nil { + return err } - } - return nil - } -} -func testAccCheckOpenzfsVolumeGetID(resourceName string, volumeId *string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("FSx for OpenZFS Volume %s still exists", rs.Primary.ID) } - - *volumeId = rs.Primary.ID - return nil } } -func testAccCheckOpenzfsVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.VolumeId) != aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx OpenZFS Volume (%s) recreated", aws.StringValue(i.VolumeId)) + return fmt.Errorf("FSx for OpenZFS Volume (%s) recreated", aws.StringValue(i.VolumeId)) } return nil } } -func testAccCheckOpenzfsVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.VolumeId) == aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx OpenZFS Volume (%s) not recreated", aws.StringValue(i.VolumeId)) + return fmt.Errorf("FSx for OpenZFS Volume (%s) not recreated", aws.StringValue(i.VolumeId)) } return nil } } -func testAccOpenzfsVolumeBaseConfig(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -data "aws_partition" "current" {} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - - tags = { - Name = %[1]q - } -} - +func testAccOpenZFSVolumeConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 + + tags = { + Name = %[1]q + } } `, rName)) } func testAccOpenZFSVolumeConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -598,7 +600,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_parent(rName, rName2 string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -612,7 +614,7 @@ resource "aws_fsx_openzfs_volume" "test2" { } func testAccOpenZFSVolumeConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -625,7 +627,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -640,7 +642,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_copyTags(rName, tagKey1, tagValue1, copyTags string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -649,12 +651,14 @@ resource "aws_fsx_openzfs_volume" "test" { tags = { %[2]q = %[3]q } + + delete_volume_options = ["DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"] } `, rName, tagKey1, tagValue1, copyTags)) } func testAccOpenZFSVolumeConfig_dataCompression(rName, dType string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -664,7 +668,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_readOnly(rName, readOnly string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -674,7 +678,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_recordSizeKib(rName string, recordSizeKib int) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -684,7 +688,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_storageCapacity(rName string, storageQuota, storageReservation int) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -695,7 +699,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_nfsExports1(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -711,7 +715,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_nfsExports2(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -730,7 +734,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_userAndGroupQuotas1(rName string, quotaSize int) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id @@ -744,7 +748,7 @@ resource "aws_fsx_openzfs_volume" "test" { } func testAccOpenZFSVolumeConfig_userAndGroupQuotas2(rName string, userQuota, groupQuota int) string { - return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSVolumeConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id diff --git a/internal/service/fsx/service_package_gen.go b/internal/service/fsx/service_package_gen.go index bd51a355f99..a3a7ca036b9 100644 --- a/internal/service/fsx/service_package_gen.go +++ b/internal/service/fsx/service_package_gen.go @@ -102,7 +102,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceOntapVolume, + Factory: ResourceONTAPVolume, TypeName: "aws_fsx_ontap_volume", Name: "ONTAP Volume", Tags: &types.ServicePackageResourceTags{ @@ -126,7 +126,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceOpenzfsVolume, + Factory: ResourceOpenZFSVolume, TypeName: "aws_fsx_openzfs_volume", Name: "OpenZFS Volume", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/fsx/status.go b/internal/service/fsx/status.go index 5ffe34db105..84848f7cb96 100644 --- a/internal/service/fsx/status.go +++ b/internal/service/fsx/status.go @@ -43,22 +43,6 @@ func statusFileCache(ctx context.Context, conn *fsx.FSx, id string) retry.StateR } } -func statusVolume(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindVolumeByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Lifecycle), nil - } -} - func statusSnapshot(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindSnapshotByID(ctx, conn, id) diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 7fe2bd1681c..d088424cb05 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -291,7 +291,7 @@ func sweepONTAPVolumes(region string) error { continue } - r := ResourceOntapVolume() + r := ResourceONTAPVolume() d := r.Data(nil) d.SetId(aws.StringValue(v.VolumeId)) d.Set("skip_final_backup", true) @@ -393,7 +393,7 @@ func sweepOpenZFSVolume(region string) error { continue } - r := ResourceOpenzfsVolume() + r := ResourceOpenZFSVolume() d := r.Data(nil) d.SetId(aws.StringValue(v.VolumeId)) diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 86cab76105c..69a98e51658 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -115,72 +115,6 @@ func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func waitVolumeCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecycleCreating, fsx.VolumeLifecyclePending}, - Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitVolumeUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecyclePending}, - Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 150 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitVolumeDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable, fsx.VolumeLifecycleDeleting}, - Target: []string{}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - if status, details := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) - } - - return output, err - } - - return nil, err -} - func waitSnapshotCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { stateConf := &retry.StateChangeConf{ Pending: []string{fsx.SnapshotLifecycleCreating, fsx.SnapshotLifecyclePending}, diff --git a/internal/service/fsx/windows_file_system.go b/internal/service/fsx/windows_file_system.go index bb6f3189d8c..5151a304ba8 100644 --- a/internal/service/fsx/windows_file_system.go +++ b/internal/service/fsx/windows_file_system.go @@ -488,7 +488,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "associating FSx for Windows File Server File System (%s) aliases: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasAssociation, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasAssociation, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemAliasAssociation, err) } } @@ -505,7 +505,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "disassociating FSx for Windows File Server File System (%s) aliases: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasDisassociation, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasDisassociation, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemAliasDisassociation, err) } } @@ -535,7 +535,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } } @@ -591,7 +591,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) update: %s", d.Id(), err) } - if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) } } diff --git a/website/docs/r/fsx_ontap_file_system.html.markdown b/website/docs/r/fsx_ontap_file_system.html.markdown index 3ce1a2d55bc..c262d366b88 100644 --- a/website/docs/r/fsx_ontap_file_system.html.markdown +++ b/website/docs/r/fsx_ontap_file_system.html.markdown @@ -36,7 +36,7 @@ This resource supports the following arguments: * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set. -* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See [Disk Iops Configuration](#disk-iops-configuration) Below. +* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See [Disk Iops Configuration](#disk-iops-configuration) below. * `endpoint_ip_address_range` - (Optional) Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range. * `storage_type` - (Optional) - The filesystem storage type. defaults to `SSD`. * `fsx_admin_password` - (Optional) The ONTAP administrative password for the fsxadmin user that you can use to administer your file system using the ONTAP CLI and REST API. diff --git a/website/docs/r/fsx_ontap_volume.html.markdown b/website/docs/r/fsx_ontap_volume.html.markdown index 4c155daf313..ab734089c3a 100644 --- a/website/docs/r/fsx_ontap_volume.html.markdown +++ b/website/docs/r/fsx_ontap_volume.html.markdown @@ -49,18 +49,46 @@ resource "aws_fsx_ontap_volume" "test" { This resource supports the following arguments: * `name` - (Required) The name of the Volume. You can use a maximum of 203 alphanumeric characters, plus the underscore (_) special character. +* `bypass_snaplock_enterprise_retention` - (Optional) Setting this to `true` allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. +* `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to `false`. * `junction_path` - (Optional) Specifies the location in the storage virtual machine's namespace where the volume is mounted. The junction_path must have a leading forward slash, such as `/vol3` * `ontap_volume_type` - (Optional) Specifies the type of volume, valid values are `RW`, `DP`. Default value is `RW`. These can be set by the ONTAP CLI or API. This setting is used as part of migration and replication [Migrating to Amazon FSx for NetApp ONTAP](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/migrating-fsx-ontap.html) * `security_style` - (Optional) Specifies the volume security style, Valid values are `UNIX`, `NTFS`, and `MIXED`. * `size_in_megabytes` - (Required) Specifies the size of the volume, in megabytes (MB), that you are creating. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the volume is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. +* `snaplock_configuration` - (Optional) The SnapLock configuration for an FSx for ONTAP volume. See [SnapLock Configuration](#snaplock-configuration) below. +* `snapshot_policy` - (Optional) Specifies the snapshot policy for the volume. See [snapshot policies](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies) in the Amazon FSx ONTAP User Guide * `storage_efficiency_enabled` - (Optional) Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume. * `storage_virtual_machine_id` - (Required) Specifies the storage virtual machine in which to create the volume. * `tags` - (Optional) A map of tags to assign to the volume. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tiering_policy` - (Optional) The data tiering policy for an FSx for ONTAP volume. See [Tiering Policy](#tiering-policy) below. -### tiering_policy +### SnapLock Configuration -The `tiering_policy` configuration block supports the following arguments: +* `audit_log_volume` - (Optional) Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default value is `false`. +* `autocommit_period` - (Optional) The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume. See [Autocommit Period](#autocommit-period) below. +* `privileged_delete` - (Optional) Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Valid values: `DISABLED`, `ENABLED`, `PERMANENTLY_DISABLED`. The default value is `DISABLED`. +* `retention_period` - (Optional) The retention period of an FSx for ONTAP SnapLock volume. See [SnapLock Retention Period](#snaplock-retention-period) below. +* `snaplock_type` - (Required) Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. Valid values: `COMPLIANCE`, `ENTERPRISE`. +* `volume_append_mode_enabled` - (Optional) Enables or disables volume-append mode on an FSx for ONTAP SnapLock volume. The default value is `false`. + +### Autocommit Period + +* `type` - (Required) The type of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. Setting this value to `NONE` disables autocommit. Valid values: `MINUTES`, `HOURS`, `DAYS`, `MONTHS`, `YEARS`, `NONE`. +* `value` - (Optional) The amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. + +### SnapLock Retention Period + +* `default_retention` - (Required) The retention period assigned to a write once, read many (WORM) file by default if an explicit retention period is not set for an FSx for ONTAP SnapLock volume. The default retention period must be greater than or equal to the minimum retention period and less than or equal to the maximum retention period. See [Retention Period](#retention-period) below. +* `maximum_retention` - (Required) The longest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [Retention Period](#retention-period) below. +* `minimum_retention` - (Required) The shortest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume. See [Retention Period](#retention-period) below. + +### Retention Period + +* `type` - (Required) The type of time for the retention period of an FSx for ONTAP SnapLock volume. Set it to one of the valid types. If you set it to `INFINITE`, the files are retained forever. If you set it to `UNSPECIFIED`, the files are retained until you set an explicit retention period. Valid values: `SECONDS`, `MINUTES`, `HOURS`, `DAYS`, `MONTHS`, `YEARS`, `INFINITE`, `UNSPECIFIED`. +* `value` - (Optional) The amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. + +### Tiering Policy * `name` - (Required) Specifies the tiering policy for the ONTAP volume for moving data to the capacity pool storage. Valid values are `SNAPSHOT_ONLY`, `AUTO`, `ALL`, `NONE`. Default value is `SNAPSHOT_ONLY`. * `cooling_period` - (Optional) Specifies the number of days that user data in a volume must remain inactive before it is considered "cold" and moved to the capacity pool. Used with `AUTO` and `SNAPSHOT_ONLY` tiering policies only. Valid values are whole numbers between 2 and 183. Default values are 31 days for `AUTO` and 2 days for `SNAPSHOT_ONLY`. diff --git a/website/docs/r/fsx_openzfs_volume.html.markdown b/website/docs/r/fsx_openzfs_volume.html.markdown index bb6b71adeb1..40b7d4def58 100644 --- a/website/docs/r/fsx_openzfs_volume.html.markdown +++ b/website/docs/r/fsx_openzfs_volume.html.markdown @@ -29,6 +29,7 @@ This resource supports the following arguments: * `origin_snapshot` - (Optional) The ARN of the source snapshot to create the volume from. * `copy_tags_to_snapshots` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. * `data_compression_type` - (Optional) Method used to compress the data on the volume. Valid values are `NONE` or `ZSTD`. Child volumes that don't specify compression option will inherit from parent volume. This option on file system applies to the root volume. +* `delete_volume_options` - (Optional) Whether to delete all child volumes and snapshots. Valid values: `DELETE_CHILD_VOLUMES_AND_SNAPSHOTS`. This configuration must be applied separately before attempting to delete the resource to have the desired behavior.. * `nfs_exports` - (Optional) NFS export configuration for the root volume. Exactly 1 item. See [NFS Exports](#nfs-exports) Below. * `read_only` - (Optional) specifies whether the volume is read-only. Default is false. * `record_size_kib` - (Optional) The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are `4`, `8`, `16`, `32`, `64`, `128`, `256`, `512`, or `1024` KiB. The default is `128` KiB.