From db0b1ab98d1a7fdbb645a5c1d6729da81059a92d Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Fri, 15 Mar 2024 18:21:39 +0000 Subject: [PATCH] Regenerated Clients --- .../01d13318dedf4a8b86a12928a86d4385.json | 8 + .../68d33d6c9b3844de8b0afd66b0961e69.json | 8 + .../698e90ef43a341a89c0aa557dcd7fb00.json | 8 + .../818f206affcd4f9d950f25dd0cfde2cc.json | 8 + .../84459f7a0648494d843b8097d9bf846d.json | 8 + .../8f0fe62a63624abf83c0578669f3591c.json | 8 + .../cd0ad7fee82a4255999963afeaa72ff0.json | 8 + .../ff0092594e4d408eb9ecb048b3bbfe2a.json | 8 + .../backup/api_op_ListBackupJobSummaries.go | 9 +- service/backup/api_op_ListBackupJobs.go | 8 +- .../api_op_ListRecoveryPointsByResource.go | 7 + service/backup/api_op_StopBackupJob.go | 5 +- service/backup/deserializers.go | 9 + .../backup/internal/endpoints/endpoints.go | 3 + service/backup/serializers.go | 4 + service/backup/types/types.go | 6 +- service/codebuild/api_op_CreateFleet.go | 7 + service/codebuild/api_op_UpdateFleet.go | 7 + service/codebuild/deserializers.go | 9 + service/codebuild/serializers.go | 10 + service/codebuild/types/enums.go | 18 + service/codebuild/types/types.go | 7 + .../internal/endpoints/endpoints.go | 6 + .../internal/endpoints/endpoints.go | 6 + .../internal/endpoints/endpoints.go | 15 + .../connect/api_op_CreateSecurityProfile.go | 8 + .../connect/api_op_UpdateSecurityProfile.go | 8 + service/connect/deserializers.go | 50 ++ service/connect/serializers.go | 111 ++++ service/connect/types/types.go | 61 ++ service/ec2/deserializers.go | 605 ++++++++++++++++++ service/ec2/types/types.go | 103 ++- .../internal/endpoints/endpoints.go | 3 + .../internal/endpoints/endpoints.go | 6 + .../internal/endpoints/endpoints.go | 3 + .../internal/endpoints/endpoints.go | 3 + service/kinesisanalyticsv2/types/enums.go | 2 + service/kinesisanalyticsv2/types/types.go | 24 +- service/s3/api_op_AbortMultipartUpload.go | 2 +- service/s3/api_op_CompleteMultipartUpload.go | 6 +- service/s3/api_op_CopyObject.go | 43 +- service/s3/api_op_CreateBucket.go | 19 +- service/s3/api_op_CreateMultipartUpload.go | 2 +- service/s3/api_op_DeleteBucket.go | 2 +- service/s3/api_op_DeleteBucketPolicy.go | 2 +- service/s3/api_op_DeleteObject.go | 29 +- service/s3/api_op_DeleteObjects.go | 2 +- .../api_op_GetBucketLifecycleConfiguration.go | 18 +- service/s3/api_op_GetBucketPolicy.go | 2 +- service/s3/api_op_GetObject.go | 2 +- service/s3/api_op_GetObjectAttributes.go | 2 +- service/s3/api_op_HeadBucket.go | 10 +- service/s3/api_op_HeadObject.go | 2 +- service/s3/api_op_ListMultipartUploads.go | 10 +- service/s3/api_op_ListObjects.go | 6 +- service/s3/api_op_ListObjectsV2.go | 6 +- service/s3/api_op_ListParts.go | 7 +- .../api_op_PutBucketLifecycleConfiguration.go | 16 +- service/s3/api_op_PutBucketPolicy.go | 2 +- service/s3/api_op_PutObject.go | 2 +- service/s3/api_op_RestoreObject.go | 34 +- service/s3/api_op_UploadPart.go | 2 +- service/s3/api_op_UploadPartCopy.go | 4 +- service/s3/types/types.go | 23 +- service/sagemaker/api_op_CreateDomain.go | 6 +- .../sagemaker/api_op_CreateFeatureGroup.go | 4 +- service/sagemaker/types/enums.go | 164 +++++ service/sagemaker/types/types.go | 17 +- .../signer/internal/endpoints/endpoints.go | 8 + .../textract/internal/endpoints/endpoints.go | 5 + .../api_op_UpdateDevice.go | 4 - service/workspacesthinclient/doc.go | 2 +- service/workspacesthinclient/serializers.go | 5 - 73 files changed, 1496 insertions(+), 161 deletions(-) create mode 100644 .changelog/01d13318dedf4a8b86a12928a86d4385.json create mode 100644 .changelog/68d33d6c9b3844de8b0afd66b0961e69.json create mode 100644 .changelog/698e90ef43a341a89c0aa557dcd7fb00.json create mode 100644 .changelog/818f206affcd4f9d950f25dd0cfde2cc.json create mode 100644 .changelog/84459f7a0648494d843b8097d9bf846d.json create mode 100644 .changelog/8f0fe62a63624abf83c0578669f3591c.json create mode 100644 .changelog/cd0ad7fee82a4255999963afeaa72ff0.json create mode 100644 .changelog/ff0092594e4d408eb9ecb048b3bbfe2a.json diff --git a/.changelog/01d13318dedf4a8b86a12928a86d4385.json b/.changelog/01d13318dedf4a8b86a12928a86d4385.json new file mode 100644 index 00000000000..5a408eb4b3b --- /dev/null +++ b/.changelog/01d13318dedf4a8b86a12928a86d4385.json @@ -0,0 +1,8 @@ +{ + "id": "01d13318-dedf-4a8b-86a1-2928a86d4385", + "type": "feature", + "description": "This release adds Hierarchy based Access Control fields to Security Profile public APIs and adds support for UserAttributeFilter to SearchUsers API.", + "modules": [ + "service/connect" + ] +} \ No newline at end of file diff --git a/.changelog/68d33d6c9b3844de8b0afd66b0961e69.json b/.changelog/68d33d6c9b3844de8b0afd66b0961e69.json new file mode 100644 index 00000000000..977dde06ec1 --- /dev/null +++ b/.changelog/68d33d6c9b3844de8b0afd66b0961e69.json @@ -0,0 +1,8 @@ +{ + "id": "68d33d6c-9b38-44de-8b0a-fd66b0961e69", + "type": "documentation", + "description": "Documentation updates for Amazon S3.", + "modules": [ + "service/s3" + ] +} \ No newline at end of file diff --git a/.changelog/698e90ef43a341a89c0aa557dcd7fb00.json b/.changelog/698e90ef43a341a89c0aa557dcd7fb00.json new file mode 100644 index 00000000000..7ed2977073f --- /dev/null +++ b/.changelog/698e90ef43a341a89c0aa557dcd7fb00.json @@ -0,0 +1,8 @@ +{ + "id": "698e90ef-43a3-41a8-9c0a-a557dcd7fb00", + "type": "feature", + "description": "AWS CodeBuild now supports overflow behavior on Reserved Capacity.", + "modules": [ + "service/codebuild" + ] +} \ No newline at end of file diff --git a/.changelog/818f206affcd4f9d950f25dd0cfde2cc.json b/.changelog/818f206affcd4f9d950f25dd0cfde2cc.json new file mode 100644 index 00000000000..ca0ab4fb43f --- /dev/null +++ b/.changelog/818f206affcd4f9d950f25dd0cfde2cc.json @@ -0,0 +1,8 @@ +{ + "id": "818f206a-ffcd-4f9d-950f-25dd0cfde2cc", + "type": "feature", + "description": "Adds m6i, m6id, m7i, c6i, c6id, c7i, r6i r6id, r7i, p5 instance type support to Sagemaker Notebook Instances and miscellaneous wording fixes for previous Sagemaker documentation.", + "modules": [ + "service/sagemaker" + ] +} \ No newline at end of file diff --git a/.changelog/84459f7a0648494d843b8097d9bf846d.json b/.changelog/84459f7a0648494d843b8097d9bf846d.json new file mode 100644 index 00000000000..d7cb24ce1c8 --- /dev/null +++ b/.changelog/84459f7a0648494d843b8097d9bf846d.json @@ -0,0 +1,8 @@ +{ + "id": "84459f7a-0648-494d-843b-8097d9bf846d", + "type": "feature", + "description": "Add media accelerator and neuron device information on the describe instance types API.", + "modules": [ + "service/ec2" + ] +} \ No newline at end of file diff --git a/.changelog/8f0fe62a63624abf83c0578669f3591c.json b/.changelog/8f0fe62a63624abf83c0578669f3591c.json new file mode 100644 index 00000000000..06d086a6669 --- /dev/null +++ b/.changelog/8f0fe62a63624abf83c0578669f3591c.json @@ -0,0 +1,8 @@ +{ + "id": "8f0fe62a-6362-4abf-83c0-578669f3591c", + "type": "feature", + "description": "Removed unused parameter kmsKeyArn from UpdateDeviceRequest", + "modules": [ + "service/workspacesthinclient" + ] +} \ No newline at end of file diff --git a/.changelog/cd0ad7fee82a4255999963afeaa72ff0.json b/.changelog/cd0ad7fee82a4255999963afeaa72ff0.json new file mode 100644 index 00000000000..f8e7596b818 --- /dev/null +++ b/.changelog/cd0ad7fee82a4255999963afeaa72ff0.json @@ -0,0 +1,8 @@ +{ + "id": "cd0ad7fe-e82a-4255-9999-63afeaa72ff0", + "type": "feature", + "description": "Support for Flink 1.18 in Managed Service for Apache Flink", + "modules": [ + "service/kinesisanalyticsv2" + ] +} \ No newline at end of file diff --git a/.changelog/ff0092594e4d408eb9ecb048b3bbfe2a.json b/.changelog/ff0092594e4d408eb9ecb048b3bbfe2a.json new file mode 100644 index 00000000000..01355c616cd --- /dev/null +++ b/.changelog/ff0092594e4d408eb9ecb048b3bbfe2a.json @@ -0,0 +1,8 @@ +{ + "id": "ff009259-4e4d-408e-b9ec-b048b3bbfe2a", + "type": "feature", + "description": "This release introduces a boolean attribute ManagedByAWSBackupOnly as part of ListRecoveryPointsByResource api to filter the recovery points based on ownership. This attribute can be used to filter out the recovery points protected by AWSBackup.", + "modules": [ + "service/backup" + ] +} \ No newline at end of file diff --git a/service/backup/api_op_ListBackupJobSummaries.go b/service/backup/api_op_ListBackupJobSummaries.go index cf70d053e9f..ce7132bab8e 100644 --- a/service/backup/api_op_ListBackupJobSummaries.go +++ b/service/backup/api_op_ListBackupJobSummaries.go @@ -77,7 +77,14 @@ type ListBackupJobSummariesInput struct { // This parameter returns the job count for jobs with the specified state. The the // value ANY returns count of all states. AGGREGATE_ALL aggregates job counts for - // all states and returns the sum. + // all states and returns the sum. Completed with issues is a status found only in + // the Backup console. For API, this status refers to jobs with a state of + // COMPLETED and a MessageCategory with a value other than SUCCESS ; that is, the + // status is completed but comes with a status message. To obtain the job count for + // Completed with issues , run two GET requests, and subtract the second, smaller + // number: GET + // /audit/backup-job-summaries?AggregationPeriod=FOURTEEN_DAYS&State=COMPLETED GET + // /audit/backup-job-summaries?AggregationPeriod=FOURTEEN_DAYS&MessageCategory=SUCCESS&State=COMPLETED State types.BackupJobStatus noSmithyDocumentSerde diff --git a/service/backup/api_op_ListBackupJobs.go b/service/backup/api_op_ListBackupJobs.go index a3d4c152583..682fc9338ec 100644 --- a/service/backup/api_op_ListBackupJobs.go +++ b/service/backup/api_op_ListBackupJobs.go @@ -90,7 +90,13 @@ type ListBackupJobsInput struct { // - VirtualMachine for virtual machines ByResourceType *string - // Returns only backup jobs that are in the specified state. + // Returns only backup jobs that are in the specified state. Completed with issues + // is a status found only in the Backup console. For API, this status refers to + // jobs with a state of COMPLETED and a MessageCategory with a value other than + // SUCCESS ; that is, the status is completed but comes with a status message. To + // obtain the job count for Completed with issues , run two GET requests, and + // subtract the second, smaller number: GET /backup-jobs/?state=COMPLETED GET + // /backup-jobs/?messageCategory=SUCCESS&state=COMPLETED ByState types.BackupJobState // The maximum number of items to be returned. diff --git a/service/backup/api_op_ListRecoveryPointsByResource.go b/service/backup/api_op_ListRecoveryPointsByResource.go index f63698f49ee..447dddacc91 100644 --- a/service/backup/api_op_ListRecoveryPointsByResource.go +++ b/service/backup/api_op_ListRecoveryPointsByResource.go @@ -37,6 +37,13 @@ type ListRecoveryPointsByResourceInput struct { // This member is required. ResourceArn *string + // This attribute filters recovery points based on ownership. If this is set to + // TRUE , the response will contain recovery points associated with the selected + // resources that are managed by Backup. If this is set to FALSE , the response + // will contain all recovery points associated with the selected resource. Type: + // Boolean + ManagedByAWSBackupOnly bool + // The maximum number of items to be returned. Amazon RDS requires a value of at // least 20. MaxResults *int32 diff --git a/service/backup/api_op_StopBackupJob.go b/service/backup/api_op_StopBackupJob.go index 07008d934cc..c8be2fd908c 100644 --- a/service/backup/api_op_StopBackupJob.go +++ b/service/backup/api_op_StopBackupJob.go @@ -12,8 +12,9 @@ import ( // Attempts to cancel a job to create a one-time backup of a resource. This action // is not supported for the following services: Amazon FSx for Windows File Server, -// Amazon FSx for Lustre, FSx for ONTAP , Amazon FSx for OpenZFS, Amazon DocumentDB -// (with MongoDB compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune. +// Amazon FSx for Lustre, Amazon FSx for NetApp ONTAP , Amazon FSx for OpenZFS, +// Amazon DocumentDB (with MongoDB compatibility), Amazon RDS, Amazon Aurora, and +// Amazon Neptune. func (c *Client) StopBackupJob(ctx context.Context, params *StopBackupJobInput, optFns ...func(*Options)) (*StopBackupJobOutput, error) { if params == nil { params = &StopBackupJobInput{} diff --git a/service/backup/deserializers.go b/service/backup/deserializers.go index 5dbb578b2aa..702c5e68d38 100644 --- a/service/backup/deserializers.go +++ b/service/backup/deserializers.go @@ -19849,6 +19849,15 @@ func awsRestjson1_deserializeDocumentRecoveryPointByResource(v **types.RecoveryP sv.StatusMessage = ptr.String(jtv) } + case "VaultType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected VaultType to be of type string, got %T instead", value) + } + sv.VaultType = types.VaultType(jtv) + } + default: _, _ = key, value diff --git a/service/backup/internal/endpoints/endpoints.go b/service/backup/internal/endpoints/endpoints.go index efd393822ca..ed710bde32d 100644 --- a/service/backup/internal/endpoints/endpoints.go +++ b/service/backup/internal/endpoints/endpoints.go @@ -175,6 +175,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, diff --git a/service/backup/serializers.go b/service/backup/serializers.go index 177553c0fa4..85fc756ef5a 100644 --- a/service/backup/serializers.go +++ b/service/backup/serializers.go @@ -4736,6 +4736,10 @@ func awsRestjson1_serializeOpHttpBindingsListRecoveryPointsByResourceInput(v *Li return fmt.Errorf("unsupported serialization of nil %T", v) } + if v.ManagedByAWSBackupOnly { + encoder.SetQuery("managedByAWSBackupOnly").Boolean(v.ManagedByAWSBackupOnly) + } + if v.MaxResults != nil { encoder.SetQuery("maxResults").Integer(*v.MaxResults) } diff --git a/service/backup/types/types.go b/service/backup/types/types.go index 54250c6bc4d..4d1b32d208b 100644 --- a/service/backup/types/types.go +++ b/service/backup/types/types.go @@ -986,7 +986,8 @@ type FrameworkControl struct { // The scope of a control. The control scope defines what the control will // evaluate. Three examples of control scopes are: a specific backup plan, all - // backup plans with a specific tag, or all backup plans. + // backup plans with a specific tag, or all backup plans. For more information, see + // ControlScope . (https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ControlScope.html) ControlScope *ControlScope noSmithyDocumentSerde @@ -1289,6 +1290,9 @@ type RecoveryPointByResource struct { // A message explaining the reason of the recovery point deletion failure. StatusMessage *string + // This is the type of vault in which the described recovery point is stored. + VaultType VaultType + noSmithyDocumentSerde } diff --git a/service/codebuild/api_op_CreateFleet.go b/service/codebuild/api_op_CreateFleet.go index 947ad93c93a..a1c79635fd1 100644 --- a/service/codebuild/api_op_CreateFleet.go +++ b/service/codebuild/api_op_CreateFleet.go @@ -96,6 +96,13 @@ type CreateFleetInput struct { // This member is required. Name *string + // The compute fleet overflow behavior. + // - For overflow behavior QUEUE , your overflow builds need to wait on the + // existing fleet instance to become available. + // - For overflow behavior ON_DEMAND , your overflow builds run on CodeBuild + // on-demand. + OverflowBehavior types.FleetOverflowBehavior + // The scaling configuration of the compute fleet. ScalingConfiguration *types.ScalingConfigurationInput diff --git a/service/codebuild/api_op_UpdateFleet.go b/service/codebuild/api_op_UpdateFleet.go index d3dcd533f1f..9a532baebe8 100644 --- a/service/codebuild/api_op_UpdateFleet.go +++ b/service/codebuild/api_op_UpdateFleet.go @@ -90,6 +90,13 @@ type UpdateFleetInput struct { // in the CodeBuild user guide. EnvironmentType types.EnvironmentType + // The compute fleet overflow behavior. + // - For overflow behavior QUEUE , your overflow builds need to wait on the + // existing fleet instance to become available. + // - For overflow behavior ON_DEMAND , your overflow builds run on CodeBuild + // on-demand. + OverflowBehavior types.FleetOverflowBehavior + // The scaling configuration of the compute fleet. ScalingConfiguration *types.ScalingConfigurationInput diff --git a/service/codebuild/deserializers.go b/service/codebuild/deserializers.go index 310df9f27cb..d1f8f5e2334 100644 --- a/service/codebuild/deserializers.go +++ b/service/codebuild/deserializers.go @@ -8332,6 +8332,15 @@ func awsAwsjson11_deserializeDocumentFleet(v **types.Fleet, value interface{}) e sv.Name = ptr.String(jtv) } + case "overflowBehavior": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FleetOverflowBehavior to be of type string, got %T instead", value) + } + sv.OverflowBehavior = types.FleetOverflowBehavior(jtv) + } + case "scalingConfiguration": if err := awsAwsjson11_deserializeDocumentScalingConfigurationOutput(&sv.ScalingConfiguration, value); err != nil { return err diff --git a/service/codebuild/serializers.go b/service/codebuild/serializers.go index 070bbe79844..5906fe501c1 100644 --- a/service/codebuild/serializers.go +++ b/service/codebuild/serializers.go @@ -3804,6 +3804,11 @@ func awsAwsjson11_serializeOpDocumentCreateFleetInput(v *CreateFleetInput, value ok.String(*v.Name) } + if len(v.OverflowBehavior) > 0 { + ok := object.Key("overflowBehavior") + ok.String(string(v.OverflowBehavior)) + } + if v.ScalingConfiguration != nil { ok := object.Key("scalingConfiguration") if err := awsAwsjson11_serializeDocumentScalingConfigurationInput(v.ScalingConfiguration, ok); err != nil { @@ -5081,6 +5086,11 @@ func awsAwsjson11_serializeOpDocumentUpdateFleetInput(v *UpdateFleetInput, value ok.String(string(v.EnvironmentType)) } + if len(v.OverflowBehavior) > 0 { + ok := object.Key("overflowBehavior") + ok.String(string(v.OverflowBehavior)) + } + if v.ScalingConfiguration != nil { ok := object.Key("scalingConfiguration") if err := awsAwsjson11_serializeDocumentScalingConfigurationInput(v.ScalingConfiguration, ok); err != nil { diff --git a/service/codebuild/types/enums.go b/service/codebuild/types/enums.go index 90c4b1fd35e..e495b6458b9 100644 --- a/service/codebuild/types/enums.go +++ b/service/codebuild/types/enums.go @@ -352,6 +352,24 @@ func (FleetContextCode) Values() []FleetContextCode { } } +type FleetOverflowBehavior string + +// Enum values for FleetOverflowBehavior +const ( + FleetOverflowBehaviorQueue FleetOverflowBehavior = "QUEUE" + FleetOverflowBehaviorOnDemand FleetOverflowBehavior = "ON_DEMAND" +) + +// Values returns all known values for FleetOverflowBehavior. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (FleetOverflowBehavior) Values() []FleetOverflowBehavior { + return []FleetOverflowBehavior{ + "QUEUE", + "ON_DEMAND", + } +} + type FleetScalingMetricType string // Enum values for FleetScalingMetricType diff --git a/service/codebuild/types/types.go b/service/codebuild/types/types.go index f607575efdf..d4dabe23c6a 100644 --- a/service/codebuild/types/types.go +++ b/service/codebuild/types/types.go @@ -808,6 +808,13 @@ type Fleet struct { // The name of the compute fleet. Name *string + // The compute fleet overflow behavior. + // - For overflow behavior QUEUE , your overflow builds need to wait on the + // existing fleet instance to become available. + // - For overflow behavior ON_DEMAND , your overflow builds run on CodeBuild + // on-demand. + OverflowBehavior FleetOverflowBehavior + // The scaling configuration of the compute fleet. ScalingConfiguration *ScalingConfigurationOutput diff --git a/service/cognitoidentity/internal/endpoints/endpoints.go b/service/cognitoidentity/internal/endpoints/endpoints.go index 53c49d1905b..fd8cbb661cd 100644 --- a/service/cognitoidentity/internal/endpoints/endpoints.go +++ b/service/cognitoidentity/internal/endpoints/endpoints.go @@ -169,6 +169,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-north-1", }: endpoints.Endpoint{}, @@ -223,6 +226,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "il-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{}, diff --git a/service/cognitoidentityprovider/internal/endpoints/endpoints.go b/service/cognitoidentityprovider/internal/endpoints/endpoints.go index 8e059d9d98c..72501e5745b 100644 --- a/service/cognitoidentityprovider/internal/endpoints/endpoints.go +++ b/service/cognitoidentityprovider/internal/endpoints/endpoints.go @@ -169,6 +169,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-north-1", }: endpoints.Endpoint{}, @@ -223,6 +226,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "il-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{}, diff --git a/service/comprehendmedical/internal/endpoints/endpoints.go b/service/comprehendmedical/internal/endpoints/endpoints.go index eef0cd5ece1..75a07564cc7 100644 --- a/service/comprehendmedical/internal/endpoints/endpoints.go +++ b/service/comprehendmedical/internal/endpoints/endpoints.go @@ -145,12 +145,27 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "fips-ca-central-1", + }: endpoints.Endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "fips-us-east-1", }: endpoints.Endpoint{ diff --git a/service/connect/api_op_CreateSecurityProfile.go b/service/connect/api_op_CreateSecurityProfile.go index f17fc29ab6a..70685f33455 100644 --- a/service/connect/api_op_CreateSecurityProfile.go +++ b/service/connect/api_op_CreateSecurityProfile.go @@ -40,6 +40,10 @@ type CreateSecurityProfileInput struct { // This member is required. SecurityProfileName *string + // The identifier of the hierarchy group that a security profile uses to restrict + // access to resources in Amazon Connect. + AllowedAccessControlHierarchyGroupId *string + // The list of tags that a security profile uses to restrict access to resources // in Amazon Connect. AllowedAccessControlTags map[string]string @@ -51,6 +55,10 @@ type CreateSecurityProfileInput struct { // The description of the security profile. Description *string + // The list of resources that a security profile applies hierarchy restrictions to + // in Amazon Connect. Following are acceptable ResourceNames: User . + HierarchyRestrictedResources []string + // Permissions assigned to the security profile. For a list of valid permissions, // see List of security profile permissions (https://docs.aws.amazon.com/connect/latest/adminguide/security-profile-list.html) // . diff --git a/service/connect/api_op_UpdateSecurityProfile.go b/service/connect/api_op_UpdateSecurityProfile.go index e6cd6ee9c30..4e788177efb 100644 --- a/service/connect/api_op_UpdateSecurityProfile.go +++ b/service/connect/api_op_UpdateSecurityProfile.go @@ -40,6 +40,10 @@ type UpdateSecurityProfileInput struct { // This member is required. SecurityProfileId *string + // The identifier of the hierarchy group that a security profile uses to restrict + // access to resources in Amazon Connect. + AllowedAccessControlHierarchyGroupId *string + // The list of tags that a security profile uses to restrict access to resources // in Amazon Connect. AllowedAccessControlTags map[string]string @@ -51,6 +55,10 @@ type UpdateSecurityProfileInput struct { // The description of the security profile. Description *string + // The list of resources that a security profile applies hierarchy restrictions to + // in Amazon Connect. Following are acceptable ResourceNames: User . + HierarchyRestrictedResources []string + // The permissions granted to a security profile. For a list of valid permissions, // see List of security profile permissions (https://docs.aws.amazon.com/connect/latest/adminguide/security-profile-list.html) // . diff --git a/service/connect/deserializers.go b/service/connect/deserializers.go index e68d45e13f2..c12b7deb3e7 100644 --- a/service/connect/deserializers.go +++ b/service/connect/deserializers.go @@ -42454,6 +42454,42 @@ func awsRestjson1_deserializeDocumentHierarchyPathReference(v **types.HierarchyP return nil } +func awsRestjson1_deserializeDocumentHierarchyRestrictedResourceList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected HierarchyRestrictedResourceName to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocumentHierarchyStructure(v **types.HierarchyStructure, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -50070,6 +50106,15 @@ func awsRestjson1_deserializeDocumentSecurityProfile(v **types.SecurityProfile, for key, value := range shape { switch key { + case "AllowedAccessControlHierarchyGroupId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected HierarchyGroupId to be of type string, got %T instead", value) + } + sv.AllowedAccessControlHierarchyGroupId = ptr.String(jtv) + } + case "AllowedAccessControlTags": if err := awsRestjson1_deserializeDocumentAllowedAccessControlTags(&sv.AllowedAccessControlTags, value); err != nil { return err @@ -50093,6 +50138,11 @@ func awsRestjson1_deserializeDocumentSecurityProfile(v **types.SecurityProfile, sv.Description = ptr.String(jtv) } + case "HierarchyRestrictedResources": + if err := awsRestjson1_deserializeDocumentHierarchyRestrictedResourceList(&sv.HierarchyRestrictedResources, value); err != nil { + return err + } + case "Id": if value != nil { jtv, ok := value.(string) diff --git a/service/connect/serializers.go b/service/connect/serializers.go index cf1d313adfd..c11f15f3939 100644 --- a/service/connect/serializers.go +++ b/service/connect/serializers.go @@ -3639,6 +3639,11 @@ func awsRestjson1_serializeOpDocumentCreateSecurityProfileInput(v *CreateSecurit object := value.Object() defer object.Close() + if v.AllowedAccessControlHierarchyGroupId != nil { + ok := object.Key("AllowedAccessControlHierarchyGroupId") + ok.String(*v.AllowedAccessControlHierarchyGroupId) + } + if v.AllowedAccessControlTags != nil { ok := object.Key("AllowedAccessControlTags") if err := awsRestjson1_serializeDocumentAllowedAccessControlTags(v.AllowedAccessControlTags, ok); err != nil { @@ -3658,6 +3663,13 @@ func awsRestjson1_serializeOpDocumentCreateSecurityProfileInput(v *CreateSecurit ok.String(*v.Description) } + if v.HierarchyRestrictedResources != nil { + ok := object.Key("HierarchyRestrictedResources") + if err := awsRestjson1_serializeDocumentHierarchyRestrictedResourceList(v.HierarchyRestrictedResources, ok); err != nil { + return err + } + } + if v.Permissions != nil { ok := object.Key("Permissions") if err := awsRestjson1_serializeDocumentPermissionsList(v.Permissions, ok); err != nil { @@ -20757,6 +20769,11 @@ func awsRestjson1_serializeOpDocumentUpdateSecurityProfileInput(v *UpdateSecurit object := value.Object() defer object.Close() + if v.AllowedAccessControlHierarchyGroupId != nil { + ok := object.Key("AllowedAccessControlHierarchyGroupId") + ok.String(*v.AllowedAccessControlHierarchyGroupId) + } + if v.AllowedAccessControlTags != nil { ok := object.Key("AllowedAccessControlTags") if err := awsRestjson1_serializeDocumentAllowedAccessControlTags(v.AllowedAccessControlTags, ok); err != nil { @@ -20776,6 +20793,13 @@ func awsRestjson1_serializeOpDocumentUpdateSecurityProfileInput(v *UpdateSecurit ok.String(*v.Description) } + if v.HierarchyRestrictedResources != nil { + ok := object.Key("HierarchyRestrictedResources") + if err := awsRestjson1_serializeDocumentHierarchyRestrictedResourceList(v.HierarchyRestrictedResources, ok); err != nil { + return err + } + } + if v.Permissions != nil { ok := object.Key("Permissions") if err := awsRestjson1_serializeDocumentPermissionsList(v.Permissions, ok); err != nil { @@ -22201,6 +22225,40 @@ func awsRestjson1_serializeDocumentAssignContactCategoryActionDefinition(v *type return nil } +func awsRestjson1_serializeDocumentAttributeAndCondition(v *types.AttributeAndCondition, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.HierarchyGroupCondition != nil { + ok := object.Key("HierarchyGroupCondition") + if err := awsRestjson1_serializeDocumentHierarchyGroupCondition(v.HierarchyGroupCondition, ok); err != nil { + return err + } + } + + if v.TagConditions != nil { + ok := object.Key("TagConditions") + if err := awsRestjson1_serializeDocumentTagAndConditionList(v.TagConditions, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentAttributeOrConditionList(v []types.AttributeAndCondition, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentAttributeAndCondition(&v[i], av); err != nil { + return err + } + } + return nil +} + func awsRestjson1_serializeDocumentAttributes(v map[string]string, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -22461,6 +22519,41 @@ func awsRestjson1_serializeDocumentControlPlaneTagFilter(v *types.ControlPlaneTa return nil } +func awsRestjson1_serializeDocumentControlPlaneUserAttributeFilter(v *types.ControlPlaneUserAttributeFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AndCondition != nil { + ok := object.Key("AndCondition") + if err := awsRestjson1_serializeDocumentAttributeAndCondition(v.AndCondition, ok); err != nil { + return err + } + } + + if v.HierarchyGroupCondition != nil { + ok := object.Key("HierarchyGroupCondition") + if err := awsRestjson1_serializeDocumentHierarchyGroupCondition(v.HierarchyGroupCondition, ok); err != nil { + return err + } + } + + if v.OrConditions != nil { + ok := object.Key("OrConditions") + if err := awsRestjson1_serializeDocumentAttributeOrConditionList(v.OrConditions, ok); err != nil { + return err + } + } + + if v.TagCondition != nil { + ok := object.Key("TagCondition") + if err := awsRestjson1_serializeDocumentTagCondition(v.TagCondition, ok); err != nil { + return err + } + } + + return nil +} + func awsRestjson1_serializeDocumentCreateCaseActionDefinition(v *types.CreateCaseActionDefinition, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -23363,6 +23456,17 @@ func awsRestjson1_serializeDocumentHierarchyLevelUpdate(v *types.HierarchyLevelU return nil } +func awsRestjson1_serializeDocumentHierarchyRestrictedResourceList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsRestjson1_serializeDocumentHierarchyStructureUpdate(v *types.HierarchyStructureUpdate, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -25919,6 +26023,13 @@ func awsRestjson1_serializeDocumentUserSearchFilter(v *types.UserSearchFilter, v } } + if v.UserAttributeFilter != nil { + ok := object.Key("UserAttributeFilter") + if err := awsRestjson1_serializeDocumentControlPlaneUserAttributeFilter(v.UserAttributeFilter, ok); err != nil { + return err + } + } + return nil } diff --git a/service/connect/types/types.go b/service/connect/types/types.go index 0a49f8b6cec..1796f677db5 100644 --- a/service/connect/types/types.go +++ b/service/connect/types/types.go @@ -282,6 +282,18 @@ type Attribute struct { noSmithyDocumentSerde } +// A list of conditions which would be applied together with an AND condition. +type AttributeAndCondition struct { + + // A leaf node condition which can be used to specify a hierarchy group condition. + HierarchyGroupCondition *HierarchyGroupCondition + + // A leaf node condition which can be used to specify a tag condition. + TagConditions []TagCondition + + noSmithyDocumentSerde +} + // Has audio-specific configurations as the operating parameter for Echo Reduction. type AudioFeatures struct { @@ -814,6 +826,34 @@ type ControlPlaneTagFilter struct { noSmithyDocumentSerde } +// An object that can be used to specify Tag conditions or Hierarchy Group +// conditions inside the SearchFilter . This accepts an OR of AND (List of List) +// input where: +// - The top level list specifies conditions that need to be applied with OR +// operator +// - The inner list specifies conditions that need to be applied with AND +// operator. +// +// Only one field can be populated. Maximum number of allowed Tag conditions is +// 25. Maximum number of allowed Hierarchy Group conditions is 20. +type ControlPlaneUserAttributeFilter struct { + + // A list of conditions which would be applied together with an AND condition. + AndCondition *AttributeAndCondition + + // A leaf node condition which can be used to specify a hierarchy group condition. + HierarchyGroupCondition *HierarchyGroupCondition + + // A list of conditions which would be applied together with an OR condition. + OrConditions []AttributeAndCondition + + // A leaf node condition which can be used to specify a tag condition, for + // example, HAVE BPO = 123 . + TagCondition *TagCondition + + noSmithyDocumentSerde +} + // The CreateCase action definition. type CreateCaseActionDefinition struct { @@ -4380,6 +4420,10 @@ type SecurityKey struct { // Contains information about a security profile. type SecurityProfile struct { + // The identifier of the hierarchy group that a security profile uses to restrict + // access to resources in Amazon Connect. + AllowedAccessControlHierarchyGroupId *string + // The list of tags that a security profile uses to restrict access to resources // in Amazon Connect. AllowedAccessControlTags map[string]string @@ -4390,6 +4434,10 @@ type SecurityProfile struct { // The description of the security profile. Description *string + // The list of resources that a security profile applies hierarchy restrictions to + // in Amazon Connect. Following are acceptable ResourceNames: User . + HierarchyRestrictedResources []string + // The identifier for the security profile. Id *string @@ -5318,6 +5366,19 @@ type UserSearchFilter struct { // - Inner list specifies conditions that need to be applied with AND operator. TagFilter *ControlPlaneTagFilter + // An object that can be used to specify Tag conditions or Hierarchy Group + // conditions inside the SearchFilter. This accepts an OR of AND (List of List) + // input where: + // - The top level list specifies conditions that need to be applied with OR + // operator. + // - The inner list specifies conditions that need to be applied with AND + // operator. + // Only one field can be populated. This object can’t be used along with + // TagFilter. Request can either contain TagFilter or UserAttributeFilter if + // SearchFilter is specified, combination of both is not supported and such request + // will throw AccessDeniedException. + UserAttributeFilter *ControlPlaneUserAttributeFilter + noSmithyDocumentSerde } diff --git a/service/ec2/deserializers.go b/service/ec2/deserializers.go index af45acc39e6..cdb4c28ebd3 100644 --- a/service/ec2/deserializers.go +++ b/service/ec2/deserializers.go @@ -86950,6 +86950,12 @@ func awsEc2query_deserializeDocumentInstanceTypeInfo(v **types.InstanceTypeInfo, sv.InstanceType = types.InstanceType(xtv) } + case strings.EqualFold("mediaAcceleratorInfo", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMediaAcceleratorInfo(&sv.MediaAcceleratorInfo, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("memoryInfo", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentMemoryInfo(&sv.MemoryInfo, nodeDecoder); err != nil { @@ -86962,6 +86968,12 @@ func awsEc2query_deserializeDocumentInstanceTypeInfo(v **types.InstanceTypeInfo, return err } + case strings.EqualFold("neuronInfo", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentNeuronInfo(&sv.NeuronInfo, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("nitroEnclavesSupport", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -99504,6 +99516,271 @@ func awsEc2query_deserializeDocumentManagedPrefixListSetUnwrapped(v *[]types.Man *v = sv return nil } +func awsEc2query_deserializeDocumentMediaAcceleratorInfo(v **types.MediaAcceleratorInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MediaAcceleratorInfo + if *v == nil { + sv = &types.MediaAcceleratorInfo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("accelerators", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMediaDeviceInfoList(&sv.Accelerators, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("totalMediaMemoryInMiB", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.TotalMediaMemoryInMiB = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMediaDeviceInfo(v **types.MediaDeviceInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MediaDeviceInfo + if *v == nil { + sv = &types.MediaDeviceInfo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("count", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Count = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("manufacturer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Manufacturer = ptr.String(xtv) + } + + case strings.EqualFold("memoryInfo", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMediaDeviceMemoryInfo(&sv.MemoryInfo, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMediaDeviceInfoList(v *[]types.MediaDeviceInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MediaDeviceInfo + if *v == nil { + sv = make([]types.MediaDeviceInfo, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.MediaDeviceInfo + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentMediaDeviceInfo(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMediaDeviceInfoListUnwrapped(v *[]types.MediaDeviceInfo, decoder smithyxml.NodeDecoder) error { + var sv []types.MediaDeviceInfo + if *v == nil { + sv = make([]types.MediaDeviceInfo, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MediaDeviceInfo + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentMediaDeviceInfo(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentMediaDeviceMemoryInfo(v **types.MediaDeviceMemoryInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MediaDeviceMemoryInfo + if *v == nil { + sv = &types.MediaDeviceMemoryInfo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("sizeInMiB", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.SizeInMiB = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentMemoryGiBPerVCpu(v **types.MemoryGiBPerVCpu, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -103964,6 +104241,334 @@ func awsEc2query_deserializeDocumentNetworkNodesListUnwrapped(v *[]string, decod *v = sv return nil } +func awsEc2query_deserializeDocumentNeuronDeviceCoreInfo(v **types.NeuronDeviceCoreInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NeuronDeviceCoreInfo + if *v == nil { + sv = &types.NeuronDeviceCoreInfo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("count", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Count = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("version", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Version = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentNeuronDeviceInfo(v **types.NeuronDeviceInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NeuronDeviceInfo + if *v == nil { + sv = &types.NeuronDeviceInfo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("coreInfo", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentNeuronDeviceCoreInfo(&sv.CoreInfo, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("count", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Count = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("memoryInfo", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentNeuronDeviceMemoryInfo(&sv.MemoryInfo, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentNeuronDeviceInfoList(v *[]types.NeuronDeviceInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.NeuronDeviceInfo + if *v == nil { + sv = make([]types.NeuronDeviceInfo, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.NeuronDeviceInfo + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentNeuronDeviceInfo(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentNeuronDeviceInfoListUnwrapped(v *[]types.NeuronDeviceInfo, decoder smithyxml.NodeDecoder) error { + var sv []types.NeuronDeviceInfo + if *v == nil { + sv = make([]types.NeuronDeviceInfo, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.NeuronDeviceInfo + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentNeuronDeviceInfo(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentNeuronDeviceMemoryInfo(v **types.NeuronDeviceMemoryInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NeuronDeviceMemoryInfo + if *v == nil { + sv = &types.NeuronDeviceMemoryInfo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("sizeInMiB", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.SizeInMiB = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentNeuronInfo(v **types.NeuronInfo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NeuronInfo + if *v == nil { + sv = &types.NeuronInfo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("neuronDevices", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentNeuronDeviceInfoList(&sv.NeuronDevices, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("totalNeuronDeviceMemoryInMiB", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.TotalNeuronDeviceMemoryInMiB = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentNitroTpmInfo(v **types.NitroTpmInfo, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/ec2/types/types.go b/service/ec2/types/types.go index 45a6e23810c..f2aba2a5c5d 100644 --- a/service/ec2/types/types.go +++ b/service/ec2/types/types.go @@ -7380,12 +7380,18 @@ type InstanceTypeInfo struct { // in the Amazon EC2 User Guide. InstanceType InstanceType + // Describes the media accelerator settings for the instance type. + MediaAcceleratorInfo *MediaAcceleratorInfo + // Describes the memory for the instance type. MemoryInfo *MemoryInfo // Describes the network settings for the instance type. NetworkInfo *NetworkInfo + // Describes the Neuron accelerator settings for the instance type. + NeuronInfo *NeuronInfo + // Indicates whether Nitro Enclaves is supported. NitroEnclavesSupport NitroEnclavesSupport @@ -10080,6 +10086,46 @@ type ManagedPrefixList struct { noSmithyDocumentSerde } +// Describes the media accelerators for the instance type. +type MediaAcceleratorInfo struct { + + // Describes the media accelerators for the instance type. + Accelerators []MediaDeviceInfo + + // The total size of the memory for the media accelerators for the instance type, + // in MiB. + TotalMediaMemoryInMiB *int32 + + noSmithyDocumentSerde +} + +// Describes the media accelerators for the instance type. +type MediaDeviceInfo struct { + + // The number of media accelerators for the instance type. + Count *int32 + + // The manufacturer of the media accelerator. + Manufacturer *string + + // Describes the memory available to the media accelerator. + MemoryInfo *MediaDeviceMemoryInfo + + // The name of the media accelerator. + Name *string + + noSmithyDocumentSerde +} + +// Describes the memory available to the media accelerator. +type MediaDeviceMemoryInfo struct { + + // The size of the memory available to each media accelerator, in MiB. + SizeInMiB *int32 + + noSmithyDocumentSerde +} + // The minimum and maximum amount of memory per vCPU, in GiB. type MemoryGiBPerVCpu struct { @@ -11195,6 +11241,58 @@ type NetworkInterfacePrivateIpAddress struct { noSmithyDocumentSerde } +// Describes the cores available to the neuron accelerator. +type NeuronDeviceCoreInfo struct { + + // The number of cores available to the neuron accelerator. + Count *int32 + + // The version of the neuron accelerator. + Version *int32 + + noSmithyDocumentSerde +} + +// Describes the neuron accelerators for the instance type. +type NeuronDeviceInfo struct { + + // Describes the cores available to each neuron accelerator. + CoreInfo *NeuronDeviceCoreInfo + + // The number of neuron accelerators for the instance type. + Count *int32 + + // Describes the memory available to each neuron accelerator. + MemoryInfo *NeuronDeviceMemoryInfo + + // The name of the neuron accelerator. + Name *string + + noSmithyDocumentSerde +} + +// Describes the memory available to the neuron accelerator. +type NeuronDeviceMemoryInfo struct { + + // The size of the memory available to the neuron accelerator, in MiB. + SizeInMiB *int32 + + noSmithyDocumentSerde +} + +// Describes the neuron accelerators for the instance type. +type NeuronInfo struct { + + // Describes the neuron accelerators for the instance type. + NeuronDevices []NeuronDeviceInfo + + // The total size of the memory for the neuron accelerators for the instance type, + // in MiB. + TotalNeuronDeviceMemoryInMiB *int32 + + noSmithyDocumentSerde +} + // Describes a DHCP configuration option. type NewDhcpConfiguration struct { @@ -14680,7 +14778,10 @@ type SpotFleetRequestConfigData struct { // The launch specifications for the Spot Fleet request. If you specify // LaunchSpecifications , you can't specify LaunchTemplateConfigs . If you include - // On-Demand capacity in your request, you must use LaunchTemplateConfigs . + // On-Demand capacity in your request, you must use LaunchTemplateConfigs . If an + // AMI specified in a launch specification is deregistered or disabled, no new + // instances can be launched from the AMI. For fleets of type maintain , the target + // capacity will not be maintained. LaunchSpecifications []SpotFleetLaunchSpecification // The launch template and overrides. If you specify LaunchTemplateConfigs , you diff --git a/service/emrcontainers/internal/endpoints/endpoints.go b/service/emrcontainers/internal/endpoints/endpoints.go index 4c7f065bd2d..ec1109a4e4b 100644 --- a/service/emrcontainers/internal/endpoints/endpoints.go +++ b/service/emrcontainers/internal/endpoints/endpoints.go @@ -184,6 +184,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{}, diff --git a/service/identitystore/internal/endpoints/endpoints.go b/service/identitystore/internal/endpoints/endpoints.go index 637c5141462..663341b10ca 100644 --- a/service/identitystore/internal/endpoints/endpoints.go +++ b/service/identitystore/internal/endpoints/endpoints.go @@ -157,6 +157,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{}, @@ -184,6 +187,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{}, diff --git a/service/kinesisanalytics/internal/endpoints/endpoints.go b/service/kinesisanalytics/internal/endpoints/endpoints.go index 514f747fc6f..532b7364d77 100644 --- a/service/kinesisanalytics/internal/endpoints/endpoints.go +++ b/service/kinesisanalytics/internal/endpoints/endpoints.go @@ -175,6 +175,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, diff --git a/service/kinesisanalyticsv2/internal/endpoints/endpoints.go b/service/kinesisanalyticsv2/internal/endpoints/endpoints.go index 8ce004efae9..f5ceff5f73c 100644 --- a/service/kinesisanalyticsv2/internal/endpoints/endpoints.go +++ b/service/kinesisanalyticsv2/internal/endpoints/endpoints.go @@ -175,6 +175,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, diff --git a/service/kinesisanalyticsv2/types/enums.go b/service/kinesisanalyticsv2/types/enums.go index 7837c66cd17..83ce65739ca 100644 --- a/service/kinesisanalyticsv2/types/enums.go +++ b/service/kinesisanalyticsv2/types/enums.go @@ -225,6 +225,7 @@ const ( RuntimeEnvironmentZeppelinFlink20 RuntimeEnvironment = "ZEPPELIN-FLINK-2_0" RuntimeEnvironmentFlink115 RuntimeEnvironment = "FLINK-1_15" RuntimeEnvironmentZeppelinFlink30 RuntimeEnvironment = "ZEPPELIN-FLINK-3_0" + RuntimeEnvironmentFlink118 RuntimeEnvironment = "FLINK-1_18" ) // Values returns all known values for RuntimeEnvironment. Note that this can be @@ -241,6 +242,7 @@ func (RuntimeEnvironment) Values() []RuntimeEnvironment { "ZEPPELIN-FLINK-2_0", "FLINK-1_15", "ZEPPELIN-FLINK-3_0", + "FLINK-1_18", } } diff --git a/service/kinesisanalyticsv2/types/types.go b/service/kinesisanalyticsv2/types/types.go index 899c9c34221..09c2658d693 100644 --- a/service/kinesisanalyticsv2/types/types.go +++ b/service/kinesisanalyticsv2/types/types.go @@ -409,8 +409,8 @@ type CatalogConfigurationUpdate struct { // Describes an application's checkpointing configuration. Checkpointing is the // process of persisting application state for fault tolerance. For more -// information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.8/concepts/programming-model.html#checkpoints-for-fault-tolerance) -// in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/) +// information, see Checkpoints for Fault Tolerance (https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/dev/datastream/fault-tolerance/checkpointing/#enabling-and-configuring-checkpointing) +// in the Apache Flink Documentation (https://nightlies.apache.org/flink/flink-docs-release-1.18/) // . type CheckpointConfiguration struct { @@ -442,8 +442,8 @@ type CheckpointConfiguration struct { // Describes the minimum time in milliseconds after a checkpoint operation // completes that a new checkpoint operation can start. If a checkpoint operation // takes longer than the CheckpointInterval , the application otherwise performs - // continual checkpoint operations. For more information, see Tuning Checkpointing (https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/large_state_tuning.html#tuning-checkpointing) - // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/) + // continual checkpoint operations. For more information, see Tuning Checkpointing (https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/ops/state/large_state_tuning/#tuning-checkpointing) + // in the Apache Flink Documentation (https://nightlies.apache.org/flink/flink-docs-release-1.18/) // . If CheckpointConfiguration.ConfigurationType is DEFAULT , the application will // use a MinPauseBetweenCheckpoints value of 5000, even if this value is set using // this API or in application code. @@ -778,8 +778,8 @@ type FlinkApplicationConfiguration struct { // Describes an application's checkpointing configuration. Checkpointing is the // process of persisting application state for fault tolerance. For more - // information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.8/concepts/programming-model.html#checkpoints-for-fault-tolerance) - // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/) + // information, see Checkpoints for Fault Tolerance (https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/dev/datastream/fault-tolerance/checkpointing/#enabling-and-configuring-checkpointing) + // in the Apache Flink Documentation (https://nightlies.apache.org/flink/flink-docs-release-1.18/) // . CheckpointConfiguration *CheckpointConfiguration @@ -803,8 +803,8 @@ type FlinkApplicationConfigurationDescription struct { CheckpointConfigurationDescription *CheckpointConfigurationDescription // The job plan for an application. For more information about the job plan, see - // Jobs and Scheduling (https://ci.apache.org/projects/flink/flink-docs-release-1.8/internals/job_scheduling.html) - // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/) + // Jobs and Scheduling (https://nightlies.apache.org/flink/flink-docs-release-1.18/internals/job_scheduling.html) + // in the Apache Flink Documentation (https://nightlies.apache.org/flink/flink-docs-release-1.18/) // . To retrieve the job plan for the application, use the // DescribeApplicationRequest$IncludeAdditionalDetails parameter of the // DescribeApplication operation. @@ -849,8 +849,8 @@ type FlinkRunConfiguration struct { // skip a state that cannot be mapped to the new program. This will happen if the // program is updated between snapshots to remove stateful parameters, and state // data in the snapshot no longer corresponds to valid application data. For more - // information, see Allowing Non-Restored State (https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/savepoints.html#allowing-non-restored-state) - // in the Apache Flink documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/) + // information, see Allowing Non-Restored State (https://nightlies.apache.org/flink/flink-docs-release-1.18/docs/ops/state/savepoints/#allowing-non-restored-state) + // in the Apache Flink documentation (https://nightlies.apache.org/flink/flink-docs-release-1.18/) // . This value defaults to false . If you update your application without // specifying this parameter, AllowNonRestoredState will be set to false , even if // it was previously set to true . @@ -1592,8 +1592,8 @@ type OutputUpdate struct { // Describes parameters for how a Managed Service for Apache Flink application // executes multiple tasks simultaneously. For more information about parallelism, -// see Parallel Execution (https://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/parallel.html) -// in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/) +// see Parallel Execution (https://nightlies.apache.org/flink/flink-docs-release-1.18/dev/parallel.html) +// in the Apache Flink Documentation (https://nightlies.apache.org/flink/flink-docs-release-1.18/) // . type ParallelismConfiguration struct { diff --git a/service/s3/api_op_AbortMultipartUpload.go b/service/s3/api_op_AbortMultipartUpload.go index f0f5464d0c3..c71060e0822 100644 --- a/service/s3/api_op_AbortMultipartUpload.go +++ b/service/s3/api_op_AbortMultipartUpload.go @@ -75,7 +75,7 @@ type AbortMultipartUploadInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the diff --git a/service/s3/api_op_CompleteMultipartUpload.go b/service/s3/api_op_CompleteMultipartUpload.go index abe00fe527a..8f89d780eed 100644 --- a/service/s3/api_op_CompleteMultipartUpload.go +++ b/service/s3/api_op_CompleteMultipartUpload.go @@ -38,8 +38,8 @@ import ( // the request as appropriate). If the condition persists, the SDKs throw an // exception (or, for the SDKs that don't use exceptions, they return an error). // Note that if CompleteMultipartUpload fails, applications should be prepared to -// retry the failed requests. For more information, see Amazon S3 Error Best -// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) +// retry any failed requests (including 500 error responses). For more information, +// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) // . You can't use Content-Type: application/x-www-form-urlencoded for the // CompleteMultipartUpload requests. Also, if you don't provide a Content-Type // header, CompleteMultipartUpload can still return a 200 OK response. For more @@ -118,7 +118,7 @@ type CompleteMultipartUploadInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the diff --git a/service/s3/api_op_CopyObject.go b/service/s3/api_op_CopyObject.go index 3e335574e74..c7990bab1de 100644 --- a/service/s3/api_op_CopyObject.go +++ b/service/s3/api_op_CopyObject.go @@ -31,9 +31,12 @@ import ( // endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) // in the Amazon S3 User Guide. Both the Region that you want to copy the object // from and the Region that you want to copy the object to must be enabled for your -// account. Amazon S3 transfer acceleration does not support cross-Region copies. -// If you request a cross-Region copy using a transfer acceleration endpoint, you -// get a 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// account. For more information about how to enable a Region for your account, see +// Enable or disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone) +// in the Amazon Web Services Account Management Guide. Amazon S3 transfer +// acceleration does not support cross-Region copies. If you request a cross-Region +// copy using a transfer acceleration endpoint, you get a 400 Bad Request error. +// For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) // . Authentication and authorization All CopyObject requests must be // authenticated and signed by using IAM credentials (access key ID and secret // access key for the IAM identities). All headers with the x-amz- prefix, @@ -51,7 +54,7 @@ import ( // - If the source object is in a general purpose bucket, you must have // s3:GetObject permission to read the source object that is being copied. // - If the destination bucket is a general purpose bucket, you must have -// s3:PubObject permission to write the object copy to the destination bucket. +// s3:PutObject permission to write the object copy to the destination bucket. // - Directory bucket permissions - You must have permissions in a bucket policy // or an IAM identity-based policy based on the source and destination bucket types // in a CopyObject operation. @@ -84,24 +87,26 @@ import ( // - If the error occurs during the copy operation, the error response is // embedded in the 200 OK response. For example, in a cross-region copy, you may // encounter throttling and receive a 200 OK response. For more information, see -// Resolve the Error 200 response when copying objects to Amazon S3 . The 200 OK -// status code means the copy was accepted, but it doesn't mean the copy is -// complete. Another example is when you disconnect from Amazon S3 before the copy -// is complete, Amazon S3 might cancel the copy and you may receive a 200 OK -// response. You must stay connected to Amazon S3 until the entire response is -// successfully received and processed. If you call this API operation directly, -// make sure to design your application to parse the content of the response and -// handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this -// condition. The SDKs detect the embedded error and apply error handling per your -// configuration settings (including automatically retrying the request as -// appropriate). If the condition persists, the SDKs throw an exception (or, for -// the SDKs that don't use exceptions, they return an error). +// Resolve the Error 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror) +// . The 200 OK status code means the copy was accepted, but it doesn't mean the +// copy is complete. Another example is when you disconnect from Amazon S3 before +// the copy is complete, Amazon S3 might cancel the copy and you may receive a +// 200 OK response. You must stay connected to Amazon S3 until the entire +// response is successfully received and processed. If you call this API operation +// directly, make sure to design your application to parse the content of the +// response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs +// handle this condition. The SDKs detect the embedded error and apply error +// handling per your configuration settings (including automatically retrying the +// request as appropriate). If the condition persists, the SDKs throw an exception +// (or, for the SDKs that don't use exceptions, they return an error). // // Charge The copy request charge is based on the storage class and Region that // you specify for the destination object. The request can also result in a data // retrieval charge for the source if the source storage class bills for data -// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/) -// . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// retrieval. If the copy source is in a different region, the data transfer is +// billed to the copy source account. For pricing information, see Amazon S3 +// pricing (http://aws.amazon.com/s3/pricing/) . HTTP Host header syntax Directory +// buckets - The HTTP Host header syntax is // Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are // related to CopyObject : // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) @@ -128,7 +133,7 @@ type CopyObjectInput struct { // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style // requests are not supported. Directory bucket names must be unique in the chosen // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an diff --git a/service/s3/api_op_CreateBucket.go b/service/s3/api_op_CreateBucket.go index 78963086962..b39244bcfe6 100644 --- a/service/s3/api_op_CreateBucket.go +++ b/service/s3/api_op_CreateBucket.go @@ -55,11 +55,18 @@ import ( // required. // - S3 Object Ownership - If your CreateBucket request includes the // x-amz-object-ownership header, then the s3:PutBucketOwnershipControls -// permission is required. If your CreateBucket request sets BucketOwnerEnforced -// for Amazon S3 Object Ownership and specifies a bucket ACL that provides access -// to an external Amazon Web Services account, your request fails with a 400 -// error and returns the InvalidBucketAcLWithObjectOwnership error code. For more -// information, see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) +// permission is required. To set an ACL on a bucket as part of a CreateBucket +// request, you must explicitly set S3 Object Ownership for the bucket to a +// different value than the default, BucketOwnerEnforced . Additionally, if your +// desired bucket ACL grants public access, you must first create the bucket +// (without the bucket ACL) and then explicitly disable Block Public Access on the +// bucket before using PutBucketAcl to set the ACL. If you try to create a bucket +// with a public ACL, the request will fail. For the majority of modern use cases +// in S3, we recommend that you keep all Block Public Access settings enabled and +// keep ACLs disabled. If you would like to share data with users outside of your +// account, you can use bucket policies as needed. For more information, see +// Controlling ownership of objects and disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) // in the Amazon S3 User Guide. // - S3 Block Public Access - If your specific use case requires granting public // access to your S3 resources, you can disable Block Public Access. Specifically, @@ -115,7 +122,7 @@ type CreateBucketInput struct { // https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide diff --git a/service/s3/api_op_CreateMultipartUpload.go b/service/s3/api_op_CreateMultipartUpload.go index 07954e89ef9..c083c32d8ef 100644 --- a/service/s3/api_op_CreateMultipartUpload.go +++ b/service/s3/api_op_CreateMultipartUpload.go @@ -169,7 +169,7 @@ type CreateMultipartUploadInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the diff --git a/service/s3/api_op_DeleteBucket.go b/service/s3/api_op_DeleteBucket.go index cd61f94b427..30e1381bd67 100644 --- a/service/s3/api_op_DeleteBucket.go +++ b/service/s3/api_op_DeleteBucket.go @@ -64,7 +64,7 @@ type DeleteBucketInput struct { // format https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide diff --git a/service/s3/api_op_DeleteBucketPolicy.go b/service/s3/api_op_DeleteBucketPolicy.go index e8ecb07e661..b8e1f56a143 100644 --- a/service/s3/api_op_DeleteBucketPolicy.go +++ b/service/s3/api_op_DeleteBucketPolicy.go @@ -73,7 +73,7 @@ type DeleteBucketPolicyInput struct { // https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide diff --git a/service/s3/api_op_DeleteObject.go b/service/s3/api_op_DeleteObject.go index 9eb33d7e252..c1e5ff73a93 100644 --- a/service/s3/api_op_DeleteObject.go +++ b/service/s3/api_op_DeleteObject.go @@ -16,13 +16,25 @@ import ( // Removes an object from a bucket. The behavior depends on the bucket's // versioning state: // -// - If versioning is enabled, the operation removes the null version (if there -// is one) of an object and inserts a delete marker, which becomes the latest -// version of the object. If there isn't a null version, Amazon S3 does not remove -// any objects but will still respond that the command was successful. +// - If bucket versioning is not enabled, the operation permanently deletes the +// object. // -// - If versioning is suspended or not enabled, the operation permanently -// deletes the object. +// - If bucket versioning is enabled, the operation inserts a delete marker, +// which becomes the current version of the object. To permanently delete an object +// in a versioned bucket, you must include the object’s versionId in the request. +// For more information about versioning-enabled buckets, see Deleting object +// versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html) +// . +// +// - If bucket versioning is suspended, the operation removes the object that +// has a null versionId , if there is one, and inserts a delete marker that +// becomes the current version of the object. If there isn't an object with a null +// versionId , and all versions of the object have a versionId , Amazon S3 does +// not remove the object and only inserts a delete marker. To permanently delete an +// object that has a versionId , you must include the object’s versionId in the +// request. For more information about versioning-suspended buckets, see +// Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html) +// . // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is @@ -59,7 +71,8 @@ import ( // - s3:DeleteObject - To delete an object from a bucket, you must always have // the s3:DeleteObject permission. // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versiong-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// versioning-enabled bucket, you must have the s3:DeleteObjectVersion +// permission. // - Directory bucket permissions - To grant access to this API operation on a // directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) // API operation for session-based authorization. Specifically, you grant the @@ -100,7 +113,7 @@ type DeleteObjectInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the diff --git a/service/s3/api_op_DeleteObjects.go b/service/s3/api_op_DeleteObjects.go index 2d0cd7bfb34..05f82cf7566 100644 --- a/service/s3/api_op_DeleteObjects.go +++ b/service/s3/api_op_DeleteObjects.go @@ -107,7 +107,7 @@ type DeleteObjectsInput struct { // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . // Path-style requests are not supported. Directory bucket names must be unique in // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an diff --git a/service/s3/api_op_GetBucketLifecycleConfiguration.go b/service/s3/api_op_GetBucketLifecycleConfiguration.go index 9a9c35a1070..ddcbbe09674 100644 --- a/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -16,13 +16,17 @@ import ( // This operation is not supported by directory buckets. Bucket lifecycle // configuration now supports specifying a lifecycle rule using an object key name -// prefix, one or more object tags, or a combination of both. Accordingly, this -// section describes the latest API. The response describes the new filter element -// that you can use to specify a filter to select a subset of objects to which the -// rule applies. If you are using a previous version of the lifecycle -// configuration, it still works. For the earlier action, see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// . Returns the lifecycle configuration information set on the bucket. For -// information about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// prefix, one or more object tags, object size, or any combination of these. +// Accordingly, this section describes the latest API. The previous version of the +// API supported filtering based only on an object key name prefix, which is +// supported for backward compatibility. For the related API description, see +// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// . Accordingly, this section describes the latest API. The response describes the +// new filter element that you can use to specify a filter to select a subset of +// objects to which the rule applies. If you are using a previous version of the +// lifecycle configuration, it still works. For the earlier action, Returns the +// lifecycle configuration information set on the bucket. For information about +// lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) // . To use this operation, you must have permission to perform the // s3:GetLifecycleConfiguration action. The bucket owner has this permission, by // default. The bucket owner can grant this permission to others. For more diff --git a/service/s3/api_op_GetBucketPolicy.go b/service/s3/api_op_GetBucketPolicy.go index 7f64c3c4d40..c2f98f93695 100644 --- a/service/s3/api_op_GetBucketPolicy.go +++ b/service/s3/api_op_GetBucketPolicy.go @@ -76,7 +76,7 @@ type GetBucketPolicyInput struct { // format https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide Access points - When you use this API operation with diff --git a/service/s3/api_op_GetObject.go b/service/s3/api_op_GetObject.go index adc3849e6ed..a64f5964e11 100644 --- a/service/s3/api_op_GetObject.go +++ b/service/s3/api_op_GetObject.go @@ -132,7 +132,7 @@ type GetObjectInput struct { // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style // requests are not supported. Directory bucket names must be unique in the chosen // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an diff --git a/service/s3/api_op_GetObjectAttributes.go b/service/s3/api_op_GetObjectAttributes.go index 98bd9ca85a6..dd1b9257cdb 100644 --- a/service/s3/api_op_GetObjectAttributes.go +++ b/service/s3/api_op_GetObjectAttributes.go @@ -126,7 +126,7 @@ type GetObjectAttributesInput struct { // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . // Path-style requests are not supported. Directory bucket names must be unique in // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an diff --git a/service/s3/api_op_HeadBucket.go b/service/s3/api_op_HeadBucket.go index 3b4f933a874..5f5958916a9 100644 --- a/service/s3/api_op_HeadBucket.go +++ b/service/s3/api_op_HeadBucket.go @@ -22,9 +22,9 @@ import ( // you have permission to access it. If the bucket does not exist or you do not // have permission to access it, the HEAD request returns a generic 400 Bad Request // , 403 Forbidden or 404 Not Found code. A message body is not included, so you -// cannot determine the exception beyond these error codes. Directory buckets - You -// must make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format +// cannot determine the exception beyond these HTTP response codes. Directory +// buckets - You must make requests for this API operation to the Zonal endpoint. +// These endpoints support virtual-hosted-style requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests // are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) // in the Amazon S3 User Guide. Authentication and authorization All HeadBucket @@ -77,7 +77,7 @@ type HeadBucketInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the @@ -127,7 +127,7 @@ type HeadBucketOutput struct { // The name of the location where the bucket will be created. For directory // buckets, the AZ ID of the Availability Zone where the bucket is created. An - // example AZ ID value is usw2-az2 . This functionality is only supported by + // example AZ ID value is usw2-az1 . This functionality is only supported by // directory buckets. BucketLocationName *string diff --git a/service/s3/api_op_HeadObject.go b/service/s3/api_op_HeadObject.go index bc83debb27b..5b7e9b6c351 100644 --- a/service/s3/api_op_HeadObject.go +++ b/service/s3/api_op_HeadObject.go @@ -122,7 +122,7 @@ type HeadObjectInput struct { // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . // Path-style requests are not supported. Directory bucket names must be unique in // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an diff --git a/service/s3/api_op_ListMultipartUploads.go b/service/s3/api_op_ListMultipartUploads.go index b8ccff8c209..183773651a0 100644 --- a/service/s3/api_op_ListMultipartUploads.go +++ b/service/s3/api_op_ListMultipartUploads.go @@ -101,7 +101,7 @@ type ListMultipartUploadsInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the @@ -258,8 +258,12 @@ type ListMultipartUploadsOutput struct { // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // Upload ID after which listing began. This functionality is not supported for - // directory buckets. + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker . This functionality is not + // supported for directory buckets. UploadIdMarker *string // Container for elements related to a particular multipart upload. A response can diff --git a/service/s3/api_op_ListObjects.go b/service/s3/api_op_ListObjects.go index 2a83f60d719..9a3bf3e0244 100644 --- a/service/s3/api_op_ListObjects.go +++ b/service/s3/api_op_ListObjects.go @@ -48,7 +48,7 @@ type ListObjectsInput struct { // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . // Path-style requests are not supported. Directory bucket names must be unique in // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an @@ -141,7 +141,9 @@ type ListObjectsOutput struct { // MaxKeys value. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object keys in the response. If using + // url , non-ASCII characters used in an object's key name will be URL encoded. For + // example, the object test_file(3).png will appear as test_file%283%29.png. EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that diff --git a/service/s3/api_op_ListObjectsV2.go b/service/s3/api_op_ListObjectsV2.go index 4c59d09f058..ee09d3cbde6 100644 --- a/service/s3/api_op_ListObjectsV2.go +++ b/service/s3/api_op_ListObjectsV2.go @@ -84,7 +84,7 @@ type ListObjectsV2Input struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the @@ -122,7 +122,9 @@ type ListObjectsV2Input struct { // in the Amazon S3 User Guide. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object keys in the response. If using + // url , non-ASCII characters used in an object's key name will be URL encoded. For + // example, the object test_file(3).png will appear as test_file%283%29.png. EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide diff --git a/service/s3/api_op_ListParts.go b/service/s3/api_op_ListParts.go index 22bd1692d31..3f3946d935c 100644 --- a/service/s3/api_op_ListParts.go +++ b/service/s3/api_op_ListParts.go @@ -86,7 +86,7 @@ type ListPartsInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the @@ -225,9 +225,8 @@ type ListPartsOutput struct { // returned as the object owner for all the parts. Owner *types.Owner - // When a list is truncated, this element specifies the last part in the list, as - // well as the value to use for the part-number-marker request parameter in a - // subsequent request. + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. PartNumberMarker *string // Container for elements related to a particular part. A response can contain diff --git a/service/s3/api_op_PutBucketLifecycleConfiguration.go b/service/s3/api_op_PutBucketLifecycleConfiguration.go index 143b68cf584..88096fdd132 100644 --- a/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -22,21 +22,19 @@ import ( // lifecycle configuration. For information about lifecycle configuration, see // Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) // . Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The previous version of the -// API supported filtering based only on an object key name prefix, which is -// supported for backward compatibility. For the related API description, see -// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// an object key name prefix, one or more object tags, object size, or any +// combination of these. Accordingly, this section describes the latest API. The +// previous version of the API supported filtering based only on an object key name +// prefix, which is supported for backward compatibility. For the related API +// description, see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) // . Rules You specify the lifecycle configuration in your request body. The // lifecycle configuration is specified as XML consisting of one or more rules. An // Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not // adjustable. Each rule consists of the following: -// // - A filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, or a combination of both. -// +// filter can be based on a key name prefix, object tags, object size, or any +// combination of these. // - A status indicating whether the rule is in effect. -// // - One or more lifecycle transition and expiration actions that you want // Amazon S3 to perform on the objects identified by the filter. If the state of // your bucket is versioning-enabled or versioning-suspended, you can have many diff --git a/service/s3/api_op_PutBucketPolicy.go b/service/s3/api_op_PutBucketPolicy.go index cf6dbec6e06..88e3f2633fe 100644 --- a/service/s3/api_op_PutBucketPolicy.go +++ b/service/s3/api_op_PutBucketPolicy.go @@ -79,7 +79,7 @@ type PutBucketPolicyInput struct { // https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 // ). For information about bucket naming restrictions, see Directory bucket // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide diff --git a/service/s3/api_op_PutObject.go b/service/s3/api_op_PutObject.go index 57e1a5470fc..d57e0026ef7 100644 --- a/service/s3/api_op_PutObject.go +++ b/service/s3/api_op_PutObject.go @@ -111,7 +111,7 @@ type PutObjectInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the diff --git a/service/s3/api_op_RestoreObject.go b/service/s3/api_op_RestoreObject.go index e6e974d1a3b..3b6aad85b8b 100644 --- a/service/s3/api_op_RestoreObject.go +++ b/service/s3/api_op_RestoreObject.go @@ -17,7 +17,6 @@ import ( // This operation is not supported by directory buckets. Restores an archived copy // of an object back into Amazon S3 This functionality is not supported for Amazon // S3 on Outposts. This action performs the following types of requests: -// - select - Perform a select query on an archived object // - restore an archive - Restore an archived object // // For more information about the S3 structure in the request body, see the @@ -28,36 +27,6 @@ import ( // - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) // in the Amazon S3 User Guide // -// Define the SQL expression for the SELECT type of restoration for your query in -// the request body's SelectParameters structure. You can use expressions like the -// following examples. -// - The following expression returns all records from the specified object. -// SELECT * FROM Object -// - Assuming that you are not using any headers for data stored in the object, -// you can specify columns with positional headers. SELECT s._1, s._2 FROM -// Object s WHERE s._3 > 100 -// - If you have headers and you set the fileHeaderInfo in the CSV structure in -// the request body to USE , you can specify headers in the query. (If you set -// the fileHeaderInfo field to IGNORE , the first row is skipped for the query.) -// You cannot mix ordinal positions with header column names. SELECT s.Id, -// s.FirstName, s.SSN FROM S3Object s -// -// When making a select request, you can also do the following: -// - To expedite your queries, specify the Expedited tier. For more information -// about tiers, see "Restoring Archives," later in this topic. -// - Specify details about the data serialization format of both the input -// object that is being queried and the serialization of the CSV-encoded query -// results. -// -// The following are additional important facts about the select feature: -// - The output results are new Amazon S3 objects. Unlike archive retrievals, -// they are stored until explicitly deleted-manually or through a lifecycle -// configuration. -// - You can issue more than one select request on the same Amazon S3 object. -// Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests. -// - Amazon S3 accepts a select request even if the object has already been -// restored. A select request doesn’t return error response 409 . -// // Permissions To use this operation, you must have permissions to perform the // s3:RestoreObject action. The bucket owner has this permission by default and can // grant this permission to others. For more information about permissions, see @@ -141,8 +110,7 @@ import ( // // - Code: RestoreAlreadyInProgress // -// - Cause: Object restore is already in progress. (This error does not apply to -// SELECT type requests.) +// - Cause: Object restore is already in progress. // // - HTTP Status Code: 409 Conflict // diff --git a/service/s3/api_op_UploadPart.go b/service/s3/api_op_UploadPart.go index 34d59aab1ba..ff731979498 100644 --- a/service/s3/api_op_UploadPart.go +++ b/service/s3/api_op_UploadPart.go @@ -132,7 +132,7 @@ type UploadPartInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the diff --git a/service/s3/api_op_UploadPartCopy.go b/service/s3/api_op_UploadPartCopy.go index c9ccf3f056e..d42dc60cd3d 100644 --- a/service/s3/api_op_UploadPartCopy.go +++ b/service/s3/api_op_UploadPartCopy.go @@ -53,7 +53,7 @@ import ( // - If the source object is in a general purpose bucket, you must have the // s3:GetObject permission to read the source object that is being copied. // - If the destination bucket is a general purpose bucket, you must have the -// s3:PubObject permission to write the object copy to the destination bucket. +// s3:PutObject permission to write the object copy to the destination bucket. // For information about permissions required to use the multipart upload API, see // Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) // in the Amazon S3 User Guide. @@ -124,7 +124,7 @@ type UploadPartCopyInput struct { // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. Access points - When you use this action with an // access point, you must provide the alias of the access point in place of the diff --git a/service/s3/types/types.go b/service/s3/types/types.go index d3f7593fe76..4299b57cc68 100644 --- a/service/s3/types/types.go +++ b/service/s3/types/types.go @@ -1280,8 +1280,15 @@ type ExistingObjectReplication struct { noSmithyDocumentSerde } -// Specifies the Amazon S3 object key name to filter on and whether to filter on -// the suffix or prefix of the key name. +// Specifies the Amazon S3 object key name to filter on. An object key name is the +// name assigned to an object in your Amazon S3 bucket. You specify whether to +// filter on the suffix or prefix of the object key name. A prefix is a specific +// string of characters at the beginning of an object key name, which you can use +// to organize objects. For example, you can start the key names of related objects +// with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to +// find objects in a bucket with key names that have the same prefix. A suffix is +// similar to a prefix, but it is at the end of the object key name instead of at +// the beginning. type FilterRule struct { // The object key name prefix or suffix identifying one or more objects to which @@ -1783,7 +1790,9 @@ type LifecycleRuleAndOperator struct { } // The Filter is used to identify objects that a Lifecycle Rule applies to. A -// Filter must have exactly one of Prefix , Tag , or And specified. +// Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , +// ObjectSizeLessThan , or And specified. If the Filter element is left empty, the +// Lifecycle Rule applies to all objects in the bucket. // // The following types satisfy this interface: // @@ -1855,8 +1864,8 @@ func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} type LocationInfo struct { // The name of the location where the bucket will be created. For directory - // buckets, the AZ ID of the Availability Zone where the bucket will be created. An - // example AZ ID value is usw2-az2 . + // buckets, the name of the location is the AZ ID of the Availability Zone where + // the bucket will be created. An example AZ ID value is usw2-az1 . Name *string // The type of location where the bucket will be created. @@ -3137,8 +3146,8 @@ type ServerSideEncryptionByDefault struct { // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services // KMS key ID to use for the default encryption. This parameter is allowed if and - // only if SSEAlgorithm is set to aws:kms . You can specify the key ID, key alias, - // or the Amazon Resource Name (ARN) of the KMS key. + // only if SSEAlgorithm is set to aws:kms or aws:kms:dsse . You can specify the key + // ID, key alias, or the Amazon Resource Name (ARN) of the KMS key. // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab diff --git a/service/sagemaker/api_op_CreateDomain.go b/service/sagemaker/api_op_CreateDomain.go index f14a25b3e4f..c469fb4b6e5 100644 --- a/service/sagemaker/api_op_CreateDomain.go +++ b/service/sagemaker/api_op_CreateDomain.go @@ -114,9 +114,9 @@ type CreateDomainInput struct { // Deprecated: This property is deprecated, use KmsKeyId instead. HomeEfsFileSystemKmsKeyId *string - // SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to - // the domain with an Amazon Web Services managed key by default. For more control, - // specify a customer managed key. + // SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached + // to the domain with an Amazon Web Services managed key by default. For more + // control, specify a customer managed key. KmsKeyId *string // Tags to associated with the Domain. Each tag consists of a key and an optional diff --git a/service/sagemaker/api_op_CreateFeatureGroup.go b/service/sagemaker/api_op_CreateFeatureGroup.go index 40a5f4e9e8e..a1229a8bc3f 100644 --- a/service/sagemaker/api_op_CreateFeatureGroup.go +++ b/service/sagemaker/api_op_CreateFeatureGroup.go @@ -64,8 +64,8 @@ type CreateFeatureGroupInput struct { // The name of the FeatureGroup . The name must be unique within an Amazon Web // Services Region in an Amazon Web Services account. The name: // - Must start and end with an alphanumeric character. - // - Can only contain alphanumeric character and hyphens. Spaces are not - // allowed. + // - Can only include alphanumeric characters, underscores, and hyphens. Spaces + // are not allowed. // // This member is required. FeatureGroupName *string diff --git a/service/sagemaker/types/enums.go b/service/sagemaker/types/enums.go index 443d885e4e2..62c61d40ed8 100644 --- a/service/sagemaker/types/enums.go +++ b/service/sagemaker/types/enums.go @@ -2885,6 +2885,88 @@ const ( InstanceTypeMlInf124xlarge InstanceType = "ml.inf1.24xlarge" InstanceTypeMlP4d24xlarge InstanceType = "ml.p4d.24xlarge" InstanceTypeMlP4de24xlarge InstanceType = "ml.p4de.24xlarge" + InstanceTypeMlP548xlarge InstanceType = "ml.p5.48xlarge" + InstanceTypeMlM6iLarge InstanceType = "ml.m6i.large" + InstanceTypeMlM6iXlarge InstanceType = "ml.m6i.xlarge" + InstanceTypeMlM6i2xlarge InstanceType = "ml.m6i.2xlarge" + InstanceTypeMlM6i4xlarge InstanceType = "ml.m6i.4xlarge" + InstanceTypeMlM6i8xlarge InstanceType = "ml.m6i.8xlarge" + InstanceTypeMlM6i12xlarge InstanceType = "ml.m6i.12xlarge" + InstanceTypeMlM6i16xlarge InstanceType = "ml.m6i.16xlarge" + InstanceTypeMlM6i24xlarge InstanceType = "ml.m6i.24xlarge" + InstanceTypeMlM6i32xlarge InstanceType = "ml.m6i.32xlarge" + InstanceTypeMlM7iLarge InstanceType = "ml.m7i.large" + InstanceTypeMlM7iXlarge InstanceType = "ml.m7i.xlarge" + InstanceTypeMlM7i2xlarge InstanceType = "ml.m7i.2xlarge" + InstanceTypeMlM7i4xlarge InstanceType = "ml.m7i.4xlarge" + InstanceTypeMlM7i8xlarge InstanceType = "ml.m7i.8xlarge" + InstanceTypeMlM7i12xlarge InstanceType = "ml.m7i.12xlarge" + InstanceTypeMlM7i16xlarge InstanceType = "ml.m7i.16xlarge" + InstanceTypeMlM7i24xlarge InstanceType = "ml.m7i.24xlarge" + InstanceTypeMlM7i48xlarge InstanceType = "ml.m7i.48xlarge" + InstanceTypeMlC6iLarge InstanceType = "ml.c6i.large" + InstanceTypeMlC6iXlarge InstanceType = "ml.c6i.xlarge" + InstanceTypeMlC6i2xlarge InstanceType = "ml.c6i.2xlarge" + InstanceTypeMlC6i4xlarge InstanceType = "ml.c6i.4xlarge" + InstanceTypeMlC6i8xlarge InstanceType = "ml.c6i.8xlarge" + InstanceTypeMlC6i12xlarge InstanceType = "ml.c6i.12xlarge" + InstanceTypeMlC6i16xlarge InstanceType = "ml.c6i.16xlarge" + InstanceTypeMlC6i24xlarge InstanceType = "ml.c6i.24xlarge" + InstanceTypeMlC6i32xlarge InstanceType = "ml.c6i.32xlarge" + InstanceTypeMlC7iLarge InstanceType = "ml.c7i.large" + InstanceTypeMlC7iXlarge InstanceType = "ml.c7i.xlarge" + InstanceTypeMlC7i2xlarge InstanceType = "ml.c7i.2xlarge" + InstanceTypeMlC7i4xlarge InstanceType = "ml.c7i.4xlarge" + InstanceTypeMlC7i8xlarge InstanceType = "ml.c7i.8xlarge" + InstanceTypeMlC7i12xlarge InstanceType = "ml.c7i.12xlarge" + InstanceTypeMlC7i16xlarge InstanceType = "ml.c7i.16xlarge" + InstanceTypeMlC7i24xlarge InstanceType = "ml.c7i.24xlarge" + InstanceTypeMlC7i48xlarge InstanceType = "ml.c7i.48xlarge" + InstanceTypeMlR6iLarge InstanceType = "ml.r6i.large" + InstanceTypeMlR6iXlarge InstanceType = "ml.r6i.xlarge" + InstanceTypeMlR6i2xlarge InstanceType = "ml.r6i.2xlarge" + InstanceTypeMlR6i4xlarge InstanceType = "ml.r6i.4xlarge" + InstanceTypeMlR6i8xlarge InstanceType = "ml.r6i.8xlarge" + InstanceTypeMlR6i12xlarge InstanceType = "ml.r6i.12xlarge" + InstanceTypeMlR6i16xlarge InstanceType = "ml.r6i.16xlarge" + InstanceTypeMlR6i24xlarge InstanceType = "ml.r6i.24xlarge" + InstanceTypeMlR6i32xlarge InstanceType = "ml.r6i.32xlarge" + InstanceTypeMlR7iLarge InstanceType = "ml.r7i.large" + InstanceTypeMlR7iXlarge InstanceType = "ml.r7i.xlarge" + InstanceTypeMlR7i2xlarge InstanceType = "ml.r7i.2xlarge" + InstanceTypeMlR7i4xlarge InstanceType = "ml.r7i.4xlarge" + InstanceTypeMlR7i8xlarge InstanceType = "ml.r7i.8xlarge" + InstanceTypeMlR7i12xlarge InstanceType = "ml.r7i.12xlarge" + InstanceTypeMlR7i16xlarge InstanceType = "ml.r7i.16xlarge" + InstanceTypeMlR7i24xlarge InstanceType = "ml.r7i.24xlarge" + InstanceTypeMlR7i48xlarge InstanceType = "ml.r7i.48xlarge" + InstanceTypeMlM6idLarge InstanceType = "ml.m6id.large" + InstanceTypeMlM6idXlarge InstanceType = "ml.m6id.xlarge" + InstanceTypeMlM6id2xlarge InstanceType = "ml.m6id.2xlarge" + InstanceTypeMlM6id4xlarge InstanceType = "ml.m6id.4xlarge" + InstanceTypeMlM6id8xlarge InstanceType = "ml.m6id.8xlarge" + InstanceTypeMlM6id12xlarge InstanceType = "ml.m6id.12xlarge" + InstanceTypeMlM6id16xlarge InstanceType = "ml.m6id.16xlarge" + InstanceTypeMlM6id24xlarge InstanceType = "ml.m6id.24xlarge" + InstanceTypeMlM6id32xlarge InstanceType = "ml.m6id.32xlarge" + InstanceTypeMlC6idLarge InstanceType = "ml.c6id.large" + InstanceTypeMlC6idXlarge InstanceType = "ml.c6id.xlarge" + InstanceTypeMlC6id2xlarge InstanceType = "ml.c6id.2xlarge" + InstanceTypeMlC6id4xlarge InstanceType = "ml.c6id.4xlarge" + InstanceTypeMlC6id8xlarge InstanceType = "ml.c6id.8xlarge" + InstanceTypeMlC6id12xlarge InstanceType = "ml.c6id.12xlarge" + InstanceTypeMlC6id16xlarge InstanceType = "ml.c6id.16xlarge" + InstanceTypeMlC6id24xlarge InstanceType = "ml.c6id.24xlarge" + InstanceTypeMlC6id32xlarge InstanceType = "ml.c6id.32xlarge" + InstanceTypeMlR6idLarge InstanceType = "ml.r6id.large" + InstanceTypeMlR6idXlarge InstanceType = "ml.r6id.xlarge" + InstanceTypeMlR6id2xlarge InstanceType = "ml.r6id.2xlarge" + InstanceTypeMlR6id4xlarge InstanceType = "ml.r6id.4xlarge" + InstanceTypeMlR6id8xlarge InstanceType = "ml.r6id.8xlarge" + InstanceTypeMlR6id12xlarge InstanceType = "ml.r6id.12xlarge" + InstanceTypeMlR6id16xlarge InstanceType = "ml.r6id.16xlarge" + InstanceTypeMlR6id24xlarge InstanceType = "ml.r6id.24xlarge" + InstanceTypeMlR6id32xlarge InstanceType = "ml.r6id.32xlarge" ) // Values returns all known values for InstanceType. Note that this can be @@ -2967,6 +3049,88 @@ func (InstanceType) Values() []InstanceType { "ml.inf1.24xlarge", "ml.p4d.24xlarge", "ml.p4de.24xlarge", + "ml.p5.48xlarge", + "ml.m6i.large", + "ml.m6i.xlarge", + "ml.m6i.2xlarge", + "ml.m6i.4xlarge", + "ml.m6i.8xlarge", + "ml.m6i.12xlarge", + "ml.m6i.16xlarge", + "ml.m6i.24xlarge", + "ml.m6i.32xlarge", + "ml.m7i.large", + "ml.m7i.xlarge", + "ml.m7i.2xlarge", + "ml.m7i.4xlarge", + "ml.m7i.8xlarge", + "ml.m7i.12xlarge", + "ml.m7i.16xlarge", + "ml.m7i.24xlarge", + "ml.m7i.48xlarge", + "ml.c6i.large", + "ml.c6i.xlarge", + "ml.c6i.2xlarge", + "ml.c6i.4xlarge", + "ml.c6i.8xlarge", + "ml.c6i.12xlarge", + "ml.c6i.16xlarge", + "ml.c6i.24xlarge", + "ml.c6i.32xlarge", + "ml.c7i.large", + "ml.c7i.xlarge", + "ml.c7i.2xlarge", + "ml.c7i.4xlarge", + "ml.c7i.8xlarge", + "ml.c7i.12xlarge", + "ml.c7i.16xlarge", + "ml.c7i.24xlarge", + "ml.c7i.48xlarge", + "ml.r6i.large", + "ml.r6i.xlarge", + "ml.r6i.2xlarge", + "ml.r6i.4xlarge", + "ml.r6i.8xlarge", + "ml.r6i.12xlarge", + "ml.r6i.16xlarge", + "ml.r6i.24xlarge", + "ml.r6i.32xlarge", + "ml.r7i.large", + "ml.r7i.xlarge", + "ml.r7i.2xlarge", + "ml.r7i.4xlarge", + "ml.r7i.8xlarge", + "ml.r7i.12xlarge", + "ml.r7i.16xlarge", + "ml.r7i.24xlarge", + "ml.r7i.48xlarge", + "ml.m6id.large", + "ml.m6id.xlarge", + "ml.m6id.2xlarge", + "ml.m6id.4xlarge", + "ml.m6id.8xlarge", + "ml.m6id.12xlarge", + "ml.m6id.16xlarge", + "ml.m6id.24xlarge", + "ml.m6id.32xlarge", + "ml.c6id.large", + "ml.c6id.xlarge", + "ml.c6id.2xlarge", + "ml.c6id.4xlarge", + "ml.c6id.8xlarge", + "ml.c6id.12xlarge", + "ml.c6id.16xlarge", + "ml.c6id.24xlarge", + "ml.c6id.32xlarge", + "ml.r6id.large", + "ml.r6id.xlarge", + "ml.r6id.2xlarge", + "ml.r6id.4xlarge", + "ml.r6id.8xlarge", + "ml.r6id.12xlarge", + "ml.r6id.16xlarge", + "ml.r6id.24xlarge", + "ml.r6id.32xlarge", } } diff --git a/service/sagemaker/types/types.go b/service/sagemaker/types/types.go index 4014ced7f4b..e563d80506d 100644 --- a/service/sagemaker/types/types.go +++ b/service/sagemaker/types/types.go @@ -2999,9 +2999,11 @@ type ContainerDefinition struct { // ContainerHostName parameter of every ContainerDefinition in that pipeline. ContainerHostname *string - // The environment variables to set in the Docker container. Each key and value in - // the Environment string to string map can have length of up to 1024. We support - // up to 16 entries in the map. + // The environment variables to set in the Docker container. The maximum length of + // each key and value in the Environment map is 1024 bytes. The maximum length of + // all keys and values in the map, combined, is 32 KB. If you pass multiple + // containers to a CreateModel request, then the maximum length of all of their + // maps, combined, is also 32 KB. Environment map[string]string // The path where inference code is stored. This can be either in Amazon EC2 @@ -4954,7 +4956,10 @@ type FailStepMetadata struct { type FeatureDefinition struct { // The name of a feature. The type must be a string. FeatureName cannot be any of - // the following: is_deleted , write_time , api_invocation_time . + // the following: is_deleted , write_time , api_invocation_time . The name: + // - Must start and end with an alphanumeric character. + // - Can only include alphanumeric characters, underscores, and hyphens. Spaces + // are not allowed. // // This member is required. FeatureName *string @@ -13461,8 +13466,8 @@ type S3ModelDataSource struct { noSmithyDocumentSerde } -// The Amazon Simple Storage (Amazon S3) location and and security configuration -// for OfflineStore . +// The Amazon Simple Storage (Amazon S3) location and security configuration for +// OfflineStore . type S3StorageConfig struct { // The S3 URI, or location in Amazon S3, of OfflineStore . S3 URIs have a format diff --git a/service/signer/internal/endpoints/endpoints.go b/service/signer/internal/endpoints/endpoints.go index 2dfe51e71f1..5f7df02b054 100644 --- a/service/signer/internal/endpoints/endpoints.go +++ b/service/signer/internal/endpoints/endpoints.go @@ -630,5 +630,13 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsUsGov, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + }, }, } diff --git a/service/textract/internal/endpoints/endpoints.go b/service/textract/internal/endpoints/endpoints.go index e4069b4af7d..49c01a08b06 100644 --- a/service/textract/internal/endpoints/endpoints.go +++ b/service/textract/internal/endpoints/endpoints.go @@ -310,6 +310,11 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsIso, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-iso-b", diff --git a/service/workspacesthinclient/api_op_UpdateDevice.go b/service/workspacesthinclient/api_op_UpdateDevice.go index 96d579c5d7a..f1470bab584 100644 --- a/service/workspacesthinclient/api_op_UpdateDevice.go +++ b/service/workspacesthinclient/api_op_UpdateDevice.go @@ -37,10 +37,6 @@ type UpdateDeviceInput struct { // The ID of the software set to apply. DesiredSoftwareSetId *string - // The Amazon Resource Name (ARN) of the Key Management Service key to use for the - // update. - KmsKeyArn *string - // The name of the device to update. Name *string diff --git a/service/workspacesthinclient/doc.go b/service/workspacesthinclient/doc.go index 3256a5f9de8..e303c1baab3 100644 --- a/service/workspacesthinclient/doc.go +++ b/service/workspacesthinclient/doc.go @@ -3,7 +3,7 @@ // Package workspacesthinclient provides the API client, operations, and parameter // types for Amazon WorkSpaces Thin Client. // -// Amazon WorkSpaces Thin Client is a affordable device built to work with Amazon +// Amazon WorkSpaces Thin Client is an affordable device built to work with Amazon // Web Services End User Computing (EUC) virtual desktops to provide users with a // complete cloud desktop solution. WorkSpaces Thin Client is a compact device // designed to connect up to two monitors and USB devices like a keyboard, mouse, diff --git a/service/workspacesthinclient/serializers.go b/service/workspacesthinclient/serializers.go index 89a694466fb..520ad47aa08 100644 --- a/service/workspacesthinclient/serializers.go +++ b/service/workspacesthinclient/serializers.go @@ -1067,11 +1067,6 @@ func awsRestjson1_serializeOpDocumentUpdateDeviceInput(v *UpdateDeviceInput, val ok.String(*v.DesiredSoftwareSetId) } - if v.KmsKeyArn != nil { - ok := object.Key("kmsKeyArn") - ok.String(*v.KmsKeyArn) - } - if v.Name != nil { ok := object.Key("name") ok.String(*v.Name)