From 021d53ccfdd95072d18c896310f5a02dd7931415 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Wed, 23 Feb 2022 11:20:47 -0800 Subject: [PATCH] Release v1.43.5 (2022-02-23) (#4293) Release v1.43.5 (2022-02-23) === ### Service Client Updates * `service/lambda`: Updates service API, documentation, and waiters * Lambda releases .NET 6 managed runtime to be available in all commercial regions. * `service/textract`: Updates service API * `service/transfer`: Updates service API and documentation * The file input selection feature provides the ability to use either the originally uploaded file or the output file from the previous workflow step, enabling customers to make multiple copies of the original file while keeping the source file intact for file archival. --- CHANGELOG.md | 10 + aws/endpoints/defaults.go | 207 +++++++++++++++++++ aws/version.go | 2 +- models/apis/lambda/2015-03-31/api-2.json | 1 + models/apis/lambda/2015-03-31/docs-2.json | 10 +- models/apis/lambda/2015-03-31/waiters-2.json | 56 ++++- models/apis/textract/2018-06-27/api-2.json | 11 +- models/apis/transfer/2018-11-05/api-2.json | 21 +- models/apis/transfer/2018-11-05/docs-2.json | 23 ++- models/endpoints/endpoints.json | 136 ++++++++++++ service/lambda/api.go | 28 ++- service/lambda/lambdaiface/interface.go | 6 + service/lambda/waiters.go | 112 ++++++++++ service/textract/api.go | 20 ++ service/transfer/api.go | 116 +++++++---- 15 files changed, 689 insertions(+), 70 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7bfa43bd9b..f9cfa73c4a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.43.5 (2022-02-23) +=== + +### Service Client Updates +* `service/lambda`: Updates service API, documentation, and waiters + * Lambda releases .NET 6 managed runtime to be available in all commercial regions. +* `service/textract`: Updates service API +* `service/transfer`: Updates service API and documentation + * The file input selection feature provides the ability to use either the originally uploaded file or the output file from the previous workflow step, enabling customers to make multiple copies of the original file while keeping the source file intact for file archival. + Release v1.43.4 (2022-02-22) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 6b99e8db8c2..d81b107f167 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -1669,6 +1669,147 @@ var awsPartition = partition{ }, }, }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, "apigateway": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -21963,6 +22104,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "api.tunneling.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "apigateway": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -23471,6 +23622,14 @@ var awsusgovPartition = partition{ }, }, "acm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", @@ -23761,6 +23920,54 @@ var awsusgovPartition = partition{ }, }, }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "apigateway": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index 7a53b789d3e..ec125eca874 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.43.4" +const SDKVersion = "1.43.5" diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index be8a5f8f5c4..8032cd60005 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -3235,6 +3235,7 @@ "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", + "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", diff --git a/models/apis/lambda/2015-03-31/docs-2.json b/models/apis/lambda/2015-03-31/docs-2.json index 7e647a254ea..80f9da66617 100644 --- a/models/apis/lambda/2015-03-31/docs-2.json +++ b/models/apis/lambda/2015-03-31/docs-2.json @@ -57,7 +57,7 @@ "UpdateAlias": "

Updates the configuration of a Lambda function alias.

", "UpdateCodeSigningConfig": "

Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.

", "UpdateEventSourceMapping": "

Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.

For details about how to configure different event sources, see the following topics.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

For information about which configuration parameters apply to each event source, see the following topics.

", - "UpdateFunctionCode": "

Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing.

The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.

For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.

", + "UpdateFunctionCode": "

Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing.

If the function's package type is Image, you must specify the code package in ImageUri as the URI of a container image in the Amazon ECR registry.

If the function's package type is Zip, you must specify the deployment package as a .zip file archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide the function code inline using the ZipFile field.

The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64).

The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.

For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.

", "UpdateFunctionConfiguration": "

Modify the version-specific settings of a Lambda function.

When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Function States.

These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.

To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an account or Amazon Web Services service, use AddPermission.

", "UpdateFunctionEventInvokeConfig": "

Updates the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" }, @@ -204,7 +204,7 @@ "InvocationRequest$Payload": "

The JSON that you want to provide to your Lambda function as input.

You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.

", "InvocationResponse$Payload": "

The response from the function, or an error object.

", "LayerVersionContentInput$ZipFile": "

The base64-encoded contents of the layer archive. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.

", - "UpdateFunctionCodeRequest$ZipFile": "

The base64-encoded contents of the deployment package. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.

" + "UpdateFunctionCodeRequest$ZipFile": "

The base64-encoded contents of the deployment package. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you. Use only with a function defined with a .zip file archive deployment package.

" } }, "BlobStream": { @@ -1606,7 +1606,7 @@ "refs": { "FunctionCode$S3Bucket": "

An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.

", "LayerVersionContentInput$S3Bucket": "

The Amazon S3 bucket of the layer archive.

", - "UpdateFunctionCodeRequest$S3Bucket": "

An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.

" + "UpdateFunctionCodeRequest$S3Bucket": "

An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account. Use only with a function defined with a .zip file archive deployment package.

" } }, "S3Key": { @@ -1614,7 +1614,7 @@ "refs": { "FunctionCode$S3Key": "

The Amazon S3 key of the deployment package.

", "LayerVersionContentInput$S3Key": "

The Amazon S3 key of the layer archive.

", - "UpdateFunctionCodeRequest$S3Key": "

The Amazon S3 key of the deployment package.

" + "UpdateFunctionCodeRequest$S3Key": "

The Amazon S3 key of the deployment package. Use only with a function defined with a .zip file archive deployment package.

" } }, "S3ObjectVersion": { @@ -1850,7 +1850,7 @@ "UnsupportedMediaTypeException$message": null, "UpdateAliasRequest$RevisionId": "

Only update the alias if the revision ID matches the ID that's specified. Use this option to avoid modifying an alias that has changed since you last read it.

", "UpdateEventSourceMappingRequest$UUID": "

The identifier of the event source mapping.

", - "UpdateFunctionCodeRequest$ImageUri": "

URI of a container image in the Amazon ECR registry.

", + "UpdateFunctionCodeRequest$ImageUri": "

URI of a container image in the Amazon ECR registry. Do not use for a function defined with a .zip file archive.

", "UpdateFunctionCodeRequest$RevisionId": "

Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

", "UpdateFunctionConfigurationRequest$RevisionId": "

Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

" } diff --git a/models/apis/lambda/2015-03-31/waiters-2.json b/models/apis/lambda/2015-03-31/waiters-2.json index af39bfc169c..b4c18f64310 100644 --- a/models/apis/lambda/2015-03-31/waiters-2.json +++ b/models/apis/lambda/2015-03-31/waiters-2.json @@ -22,7 +22,7 @@ "delay": 5, "maxAttempts": 60, "operation": "GetFunctionConfiguration", - "description": "Waits for the function's State to be Active.", + "description": "Waits for the function's State to be Active. This waiter uses GetFunctionConfiguration API. This should be used after new function creation.", "acceptors": [ { "state": "success", @@ -48,7 +48,7 @@ "delay": 5, "maxAttempts": 60, "operation": "GetFunctionConfiguration", - "description": "Waits for the function's LastUpdateStatus to be Successful.", + "description": "Waits for the function's LastUpdateStatus to be Successful. This waiter uses GetFunctionConfiguration API. This should be used after function updates.", "acceptors": [ { "state": "success", @@ -69,6 +69,58 @@ "expected": "InProgress" } ] + }, + "FunctionActiveV2": { + "delay": 1, + "maxAttempts": 300, + "operation": "GetFunction", + "description": "Waits for the function's State to be Active. This waiter uses GetFunction API. This should be used after new function creation.", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Configuration.State", + "expected": "Active" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Configuration.State", + "expected": "Failed" + }, + { + "state": "retry", + "matcher": "path", + "argument": "Configuration.State", + "expected": "Pending" + } + ] + }, + "FunctionUpdatedV2": { + "delay": 1, + "maxAttempts": 300, + "operation": "GetFunction", + "description": "Waits for the function's LastUpdateStatus to be Successful. This waiter uses GetFunction API. This should be used after function updates.", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Configuration.LastUpdateStatus", + "expected": "Successful" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Configuration.LastUpdateStatus", + "expected": "Failed" + }, + { + "state": "retry", + "matcher": "path", + "argument": "Configuration.LastUpdateStatus", + "expected": "InProgress" + } + ] } } } diff --git a/models/apis/textract/2018-06-27/api-2.json b/models/apis/textract/2018-06-27/api-2.json index 66e4a519491..ef352b38554 100644 --- a/models/apis/textract/2018-06-27/api-2.json +++ b/models/apis/textract/2018-06-27/api-2.json @@ -324,7 +324,9 @@ "WORD", "TABLE", "CELL", - "SELECTION_ELEMENT" + "SELECTION_ELEMENT", + "MERGED_CELL", + "TITLE" ] }, "BoundingBox":{ @@ -404,7 +406,8 @@ "type":"string", "enum":[ "KEY", - "VALUE" + "VALUE", + "COLUMN_HEADER" ] }, "EntityTypes":{ @@ -800,7 +803,9 @@ "enum":[ "VALUE", "CHILD", - "COMPLEX_FEATURES" + "COMPLEX_FEATURES", + "MERGED_CELL", + "TITLE" ] }, "RoleArn":{ diff --git a/models/apis/transfer/2018-11-05/api-2.json b/models/apis/transfer/2018-11-05/api-2.json index 593fd1303a4..f48ca227604 100644 --- a/models/apis/transfer/2018-11-05/api-2.json +++ b/models/apis/transfer/2018-11-05/api-2.json @@ -554,7 +554,8 @@ "members":{ "Name":{"shape":"WorkflowStepName"}, "DestinationFileLocation":{"shape":"InputFileLocation"}, - "OverwriteExisting":{"shape":"OverwriteExisting"} + "OverwriteExisting":{"shape":"OverwriteExisting"}, + "SourceFileLocation":{"shape":"SourceFileLocation"} } }, "CreateAccessRequest":{ @@ -666,7 +667,8 @@ "members":{ "Name":{"shape":"WorkflowStepName"}, "Target":{"shape":"CustomStepTarget"}, - "TimeoutSeconds":{"shape":"CustomStepTimeoutSeconds"} + "TimeoutSeconds":{"shape":"CustomStepTimeoutSeconds"}, + "SourceFileLocation":{"shape":"SourceFileLocation"} } }, "CustomStepStatus":{ @@ -721,7 +723,8 @@ "DeleteStepDetails":{ "type":"structure", "members":{ - "Name":{"shape":"WorkflowStepName"} + "Name":{"shape":"WorkflowStepName"}, + "SourceFileLocation":{"shape":"SourceFileLocation"} } }, "DeleteUserRequest":{ @@ -967,9 +970,9 @@ }, "EfsPath":{ "type":"string", - "max":100, + "max":65536, "min":1, - "pattern":"^(\\/|(\\/(?!\\.)+[^$#<>;`|&?{}^*/\\n]+){1,4})$" + "pattern":"^[^\\x00]+$" }, "EndpointDetails":{ "type":"structure", @@ -1663,6 +1666,11 @@ "min":3, "pattern":"^[\\w-]*$" }, + "SourceFileLocation":{ + "type":"string", + "max":256, + "pattern":"^\\$\\{(\\w+.)+\\w+\\}$" + }, "SourceIp":{ "type":"string", "max":32, @@ -1769,7 +1777,8 @@ "type":"structure", "members":{ "Name":{"shape":"WorkflowStepName"}, - "Tags":{"shape":"S3Tags"} + "Tags":{"shape":"S3Tags"}, + "SourceFileLocation":{"shape":"SourceFileLocation"} } }, "TagValue":{ diff --git a/models/apis/transfer/2018-11-05/docs-2.json b/models/apis/transfer/2018-11-05/docs-2.json index 34bb50ed206..a763f50325f 100644 --- a/models/apis/transfer/2018-11-05/docs-2.json +++ b/models/apis/transfer/2018-11-05/docs-2.json @@ -439,7 +439,7 @@ } }, "HomeDirectoryMapEntry": { - "base": "

Represents an object that contains entries and targets for HomeDirectoryMappings.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", + "base": "

Represents an object that contains entries and targets for HomeDirectoryMappings.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

", "refs": { "HomeDirectoryMappings$member": null } @@ -447,12 +447,12 @@ "HomeDirectoryMappings": { "base": null, "refs": { - "CreateAccessRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", - "CreateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", + "CreateAccessRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

", + "CreateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

", "DescribedAccess$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

In most cases, you can use this value instead of the session policy to lock down the associated access to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

", "DescribedUser$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

", - "UpdateAccessRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", - "UpdateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "UpdateAccessRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

", + "UpdateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

" } }, "HomeDirectoryType": { @@ -510,7 +510,7 @@ "InputFileLocation": { "base": "

Specifies the location for the file being copied. Only applicable for the Copy type of workflow steps.

", "refs": { - "CopyStepDetails$DestinationFileLocation": null + "CopyStepDetails$DestinationFileLocation": "

Specifies the location for the file being copied. Only applicable for Copy type workflow steps. Use ${Transfer:username} in this field to parametrize the destination prefix by username.

" } }, "InternalServiceError": { @@ -703,7 +703,7 @@ "InvalidRequestException$Message": null, "ResourceExistsException$Message": null, "ResourceNotFoundException$Message": null, - "TestIdentityProviderResponse$Message": "

A message that indicates whether the test was successful or not.

" + "TestIdentityProviderResponse$Message": "

A message that indicates whether the test was successful or not.

If an empty string is returned, the most likely cause is that the authentication failed due to an incorrect username or password.

" } }, "NextToken": { @@ -1059,6 +1059,15 @@ "UserDetails$SessionId": "

The system-assigned unique identifier for a session that corresponds to the workflow.

" } }, + "SourceFileLocation": { + "base": null, + "refs": { + "CopyStepDetails$SourceFileLocation": "

Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.

", + "CustomStepDetails$SourceFileLocation": "

Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.

", + "DeleteStepDetails$SourceFileLocation": "

Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.

", + "TagStepDetails$SourceFileLocation": "

Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.

" + } + }, "SourceIp": { "base": null, "refs": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 54a395e30da..cfae92934a4 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -977,6 +977,94 @@ } } }, + "api.tunneling.iot" : { + "defaults" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "apigateway" : { "endpoints" : { "af-south-1" : { }, @@ -12851,6 +12939,12 @@ "cn-northwest-1" : { } } }, + "api.tunneling.iot" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "apigateway" : { "endpoints" : { "cn-north-1" : { }, @@ -13852,6 +13946,12 @@ } }, "acm" : { + "defaults" : { + "variants" : [ { + "hostname" : "acm.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, "endpoints" : { "us-gov-east-1" : { "credentialScope" : { @@ -14055,6 +14155,42 @@ } } }, + "api.tunneling.iot" : { + "defaults" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "apigateway" : { "endpoints" : { "us-gov-east-1" : { }, diff --git a/service/lambda/api.go b/service/lambda/api.go index 3899a182285..dba65ef2896 100644 --- a/service/lambda/api.go +++ b/service/lambda/api.go @@ -6024,6 +6024,18 @@ func (c *Lambda) UpdateFunctionCodeRequest(input *UpdateFunctionCodeInput) (req // the code package must be signed by a trusted publisher. For more information, // see Configuring code signing (https://docs.aws.amazon.com/lambda/latest/dg/configuration-trustedcode.html). // +// If the function's package type is Image, you must specify the code package +// in ImageUri as the URI of a container image (https://docs.aws.amazon.com/lambda/latest/dg/lambda-images.html) +// in the Amazon ECR registry. +// +// If the function's package type is Zip, you must specify the deployment package +// as a .zip file archive (https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-package.html#gettingstarted-package-zip). +// Enter the Amazon S3 bucket and key of the code .zip file location. You can +// also provide the function code inline using the ZipFile field. +// +// The code in the deployment package must be compatible with the target instruction +// set architecture of the function (x86-64 or arm64). +// // The function's code is locked when you publish a version. You can't modify // the code of a published version, only the unpublished version. // @@ -17875,7 +17887,8 @@ type UpdateFunctionCodeInput struct { // FunctionName is a required field FunctionName *string `location:"uri" locationName:"FunctionName" min:"1" type:"string" required:"true"` - // URI of a container image in the Amazon ECR registry. + // URI of a container image in the Amazon ECR registry. Do not use for a function + // defined with a .zip file archive. ImageUri *string `type:"string"` // Set to true to publish a new version of the function after updating the code. @@ -17888,17 +17901,20 @@ type UpdateFunctionCodeInput struct { RevisionId *string `type:"string"` // An Amazon S3 bucket in the same Amazon Web Services Region as your function. - // The bucket can be in a different Amazon Web Services account. + // The bucket can be in a different Amazon Web Services account. Use only with + // a function defined with a .zip file archive deployment package. S3Bucket *string `min:"3" type:"string"` - // The Amazon S3 key of the deployment package. + // The Amazon S3 key of the deployment package. Use only with a function defined + // with a .zip file archive deployment package. S3Key *string `min:"1" type:"string"` // For versioned objects, the version of the deployment package object to use. S3ObjectVersion *string `min:"1" type:"string"` // The base64-encoded contents of the deployment package. Amazon Web Services - // SDK and Amazon Web Services CLI clients handle the encoding for you. + // SDK and Amazon Web Services CLI clients handle the encoding for you. Use + // only with a function defined with a .zip file archive deployment package. // // ZipFile is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UpdateFunctionCodeInput's @@ -18815,6 +18831,9 @@ const ( // RuntimeDotnetcore31 is a Runtime enum value RuntimeDotnetcore31 = "dotnetcore3.1" + // RuntimeDotnet6 is a Runtime enum value + RuntimeDotnet6 = "dotnet6" + // RuntimeNodejs43Edge is a Runtime enum value RuntimeNodejs43Edge = "nodejs4.3-edge" @@ -18856,6 +18875,7 @@ func Runtime_Values() []string { RuntimeDotnetcore20, RuntimeDotnetcore21, RuntimeDotnetcore31, + RuntimeDotnet6, RuntimeNodejs43Edge, RuntimeGo1X, RuntimeRuby25, diff --git a/service/lambda/lambdaiface/interface.go b/service/lambda/lambdaiface/interface.go index 2271210d63f..bf1ea7cb361 100644 --- a/service/lambda/lambdaiface/interface.go +++ b/service/lambda/lambdaiface/interface.go @@ -325,11 +325,17 @@ type LambdaAPI interface { WaitUntilFunctionActive(*lambda.GetFunctionConfigurationInput) error WaitUntilFunctionActiveWithContext(aws.Context, *lambda.GetFunctionConfigurationInput, ...request.WaiterOption) error + WaitUntilFunctionActiveV2(*lambda.GetFunctionInput) error + WaitUntilFunctionActiveV2WithContext(aws.Context, *lambda.GetFunctionInput, ...request.WaiterOption) error + WaitUntilFunctionExists(*lambda.GetFunctionInput) error WaitUntilFunctionExistsWithContext(aws.Context, *lambda.GetFunctionInput, ...request.WaiterOption) error WaitUntilFunctionUpdated(*lambda.GetFunctionConfigurationInput) error WaitUntilFunctionUpdatedWithContext(aws.Context, *lambda.GetFunctionConfigurationInput, ...request.WaiterOption) error + + WaitUntilFunctionUpdatedV2(*lambda.GetFunctionInput) error + WaitUntilFunctionUpdatedV2WithContext(aws.Context, *lambda.GetFunctionInput, ...request.WaiterOption) error } var _ LambdaAPI = (*lambda.Lambda)(nil) diff --git a/service/lambda/waiters.go b/service/lambda/waiters.go index e9ec3d44482..411f5220421 100644 --- a/service/lambda/waiters.go +++ b/service/lambda/waiters.go @@ -65,6 +65,62 @@ func (c *Lambda) WaitUntilFunctionActiveWithContext(ctx aws.Context, input *GetF return w.WaitWithContext(ctx) } +// WaitUntilFunctionActiveV2 uses the AWS Lambda API operation +// GetFunction to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *Lambda) WaitUntilFunctionActiveV2(input *GetFunctionInput) error { + return c.WaitUntilFunctionActiveV2WithContext(aws.BackgroundContext(), input) +} + +// WaitUntilFunctionActiveV2WithContext is an extended version of WaitUntilFunctionActiveV2. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) WaitUntilFunctionActiveV2WithContext(ctx aws.Context, input *GetFunctionInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilFunctionActiveV2", + MaxAttempts: 300, + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Configuration.State", + Expected: "Active", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Configuration.State", + Expected: "Failed", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Configuration.State", + Expected: "Pending", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetFunctionInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetFunctionRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + // WaitUntilFunctionExists uses the AWS Lambda API operation // GetFunction to wait for a condition to be met before returning. // If the condition is not met within the max attempt window, an error will @@ -171,3 +227,59 @@ func (c *Lambda) WaitUntilFunctionUpdatedWithContext(ctx aws.Context, input *Get return w.WaitWithContext(ctx) } + +// WaitUntilFunctionUpdatedV2 uses the AWS Lambda API operation +// GetFunction to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *Lambda) WaitUntilFunctionUpdatedV2(input *GetFunctionInput) error { + return c.WaitUntilFunctionUpdatedV2WithContext(aws.BackgroundContext(), input) +} + +// WaitUntilFunctionUpdatedV2WithContext is an extended version of WaitUntilFunctionUpdatedV2. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lambda) WaitUntilFunctionUpdatedV2WithContext(ctx aws.Context, input *GetFunctionInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilFunctionUpdatedV2", + MaxAttempts: 300, + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Configuration.LastUpdateStatus", + Expected: "Successful", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Configuration.LastUpdateStatus", + Expected: "Failed", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Configuration.LastUpdateStatus", + Expected: "InProgress", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetFunctionInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetFunctionRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/service/textract/api.go b/service/textract/api.go index 46335448397..8aaf3eb63a5 100644 --- a/service/textract/api.go +++ b/service/textract/api.go @@ -5332,6 +5332,12 @@ const ( // BlockTypeSelectionElement is a BlockType enum value BlockTypeSelectionElement = "SELECTION_ELEMENT" + + // BlockTypeMergedCell is a BlockType enum value + BlockTypeMergedCell = "MERGED_CELL" + + // BlockTypeTitle is a BlockType enum value + BlockTypeTitle = "TITLE" ) // BlockType_Values returns all elements of the BlockType enum @@ -5344,6 +5350,8 @@ func BlockType_Values() []string { BlockTypeTable, BlockTypeCell, BlockTypeSelectionElement, + BlockTypeMergedCell, + BlockTypeTitle, } } @@ -5369,6 +5377,9 @@ const ( // EntityTypeValue is a EntityType enum value EntityTypeValue = "VALUE" + + // EntityTypeColumnHeader is a EntityType enum value + EntityTypeColumnHeader = "COLUMN_HEADER" ) // EntityType_Values returns all elements of the EntityType enum @@ -5376,6 +5387,7 @@ func EntityType_Values() []string { return []string{ EntityTypeKey, EntityTypeValue, + EntityTypeColumnHeader, } } @@ -5428,6 +5440,12 @@ const ( // RelationshipTypeComplexFeatures is a RelationshipType enum value RelationshipTypeComplexFeatures = "COMPLEX_FEATURES" + + // RelationshipTypeMergedCell is a RelationshipType enum value + RelationshipTypeMergedCell = "MERGED_CELL" + + // RelationshipTypeTitle is a RelationshipType enum value + RelationshipTypeTitle = "TITLE" ) // RelationshipType_Values returns all elements of the RelationshipType enum @@ -5436,6 +5454,8 @@ func RelationshipType_Values() []string { RelationshipTypeValue, RelationshipTypeChild, RelationshipTypeComplexFeatures, + RelationshipTypeMergedCell, + RelationshipTypeTitle, } } diff --git a/service/transfer/api.go b/service/transfer/api.go index 15b88690cff..9462d16cb61 100644 --- a/service/transfer/api.go +++ b/service/transfer/api.go @@ -3677,8 +3677,9 @@ func (s *ConflictException) RequestID() string { type CopyStepDetails struct { _ struct{} `type:"structure"` - // Specifies the location for the file being copied. Only applicable for the - // Copy type of workflow steps. + // Specifies the location for the file being copied. Only applicable for Copy + // type workflow steps. Use ${Transfer:username} in this field to parametrize + // the destination prefix by username. DestinationFileLocation *InputFileLocation `type:"structure"` // The name of the step, used as an identifier. @@ -3687,6 +3688,17 @@ type CopyStepDetails struct { // A flag that indicates whether or not to overwrite an existing file of the // same name. The default is FALSE. OverwriteExisting *string `type:"string" enum:"OverwriteExisting"` + + // Specifies which file to use as input to the workflow step: either the output + // from the previous step, or the originally uploaded file for the workflow. + // + // * Enter ${previous.file} to use the previous file as the input. In this + // case, this workflow step uses the output file from the previous workflow + // step as input. This is the default value. + // + // * Enter ${original.file} to use the originally-uploaded file location + // as input for this step. + SourceFileLocation *string `type:"string"` } // String returns the string representation. @@ -3740,6 +3752,12 @@ func (s *CopyStepDetails) SetOverwriteExisting(v string) *CopyStepDetails { return s } +// SetSourceFileLocation sets the SourceFileLocation field's value. +func (s *CopyStepDetails) SetSourceFileLocation(v string) *CopyStepDetails { + s.SourceFileLocation = &v + return s +} + type CreateAccessInput struct { _ struct{} `type:"structure"` @@ -3788,14 +3806,6 @@ type CreateAccessInput struct { // The following is an Entry and Target pair example for chroot. // // [ { "Entry:": "/", "Target": "/bucket_name/home/mydirectory" } ] - // - // If the target of a logical directory entry does not exist in Amazon S3 or - // EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API - // or EFS API to create 0 byte objects as place holders for your directory. - // If using the CLI, use the s3api or efsapi call instead of s3 or efs so you - // can use the put-object operation. For example, you use the following: aws - // s3api put-object --bucket bucketname --key path/to/folder/. Make sure that - // the end of the key name ends in a / for it to be considered a folder. HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` // The type of landing directory (folder) you want your users' home directory @@ -4388,14 +4398,6 @@ type CreateUserInput struct { // The following is an Entry and Target pair example for chroot. // // [ { "Entry:": "/", "Target": "/bucket_name/home/mydirectory" } ] - // - // If the target of a logical directory entry does not exist in Amazon S3 or - // EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API - // or EFS API to create 0 byte objects as place holders for your directory. - // If using the CLI, use the s3api or efsapi call instead of s3 or efs so you - // can use the put-object operation. For example, you use the following: aws - // s3api put-object --bucket bucketname --key path/to/folder/. Make sure that - // the end of the key name ends in a / for it to be considered a folder. HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` // The type of landing directory (folder) you want your users' home directory @@ -4817,6 +4819,17 @@ type CustomStepDetails struct { // The name of the step, used as an identifier. Name *string `type:"string"` + // Specifies which file to use as input to the workflow step: either the output + // from the previous step, or the originally uploaded file for the workflow. + // + // * Enter ${previous.file} to use the previous file as the input. In this + // case, this workflow step uses the output file from the previous workflow + // step as input. This is the default value. + // + // * Enter ${original.file} to use the originally-uploaded file location + // as input for this step. + SourceFileLocation *string `type:"string"` + // The ARN for the lambda function that is being called. Target *string `type:"string"` @@ -4861,6 +4874,12 @@ func (s *CustomStepDetails) SetName(v string) *CustomStepDetails { return s } +// SetSourceFileLocation sets the SourceFileLocation field's value. +func (s *CustomStepDetails) SetSourceFileLocation(v string) *CustomStepDetails { + s.SourceFileLocation = &v + return s +} + // SetTarget sets the Target field's value. func (s *CustomStepDetails) SetTarget(v string) *CustomStepDetails { s.Target = &v @@ -5158,6 +5177,17 @@ type DeleteStepDetails struct { // The name of the step, used as an identifier. Name *string `type:"string"` + + // Specifies which file to use as input to the workflow step: either the output + // from the previous step, or the originally uploaded file for the workflow. + // + // * Enter ${previous.file} to use the previous file as the input. In this + // case, this workflow step uses the output file from the previous workflow + // step as input. This is the default value. + // + // * Enter ${original.file} to use the originally-uploaded file location + // as input for this step. + SourceFileLocation *string `type:"string"` } // String returns the string representation. @@ -5184,6 +5214,12 @@ func (s *DeleteStepDetails) SetName(v string) *DeleteStepDetails { return s } +// SetSourceFileLocation sets the SourceFileLocation field's value. +func (s *DeleteStepDetails) SetSourceFileLocation(v string) *DeleteStepDetails { + s.SourceFileLocation = &v + return s +} + type DeleteUserInput struct { _ struct{} `type:"structure"` @@ -7105,14 +7141,6 @@ func (s *FileLocation) SetS3FileLocation(v *S3FileLocation) *FileLocation { // The following is an Entry and Target pair example for chroot. // // [ { "Entry:": "/", "Target": "/bucket_name/home/mydirectory" } ] -// -// If the target of a logical directory entry does not exist in Amazon S3 or -// EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API -// or EFS API to create 0 byte objects as place holders for your directory. -// If using the CLI, use the s3api or efsapi call instead of s3 or efs so you -// can use the put-object operation. For example, you use the following: aws -// s3api put-object --bucket bucketname --key path/to/folder/. Make sure that -// the end of the key name ends in a / for it to be considered a folder. type HomeDirectoryMapEntry struct { _ struct{} `type:"structure"` @@ -10068,6 +10096,17 @@ type TagStepDetails struct { // The name of the step, used as an identifier. Name *string `type:"string"` + // Specifies which file to use as input to the workflow step: either the output + // from the previous step, or the originally uploaded file for the workflow. + // + // * Enter ${previous.file} to use the previous file as the input. In this + // case, this workflow step uses the output file from the previous workflow + // step as input. This is the default value. + // + // * Enter ${original.file} to use the originally-uploaded file location + // as input for this step. + SourceFileLocation *string `type:"string"` + // Array that contains from 1 to 10 key/value pairs. Tags []*S3Tag `min:"1" type:"list"` } @@ -10119,6 +10158,12 @@ func (s *TagStepDetails) SetName(v string) *TagStepDetails { return s } +// SetSourceFileLocation sets the SourceFileLocation field's value. +func (s *TagStepDetails) SetSourceFileLocation(v string) *TagStepDetails { + s.SourceFileLocation = &v + return s +} + // SetTags sets the Tags field's value. func (s *TagStepDetails) SetTags(v []*S3Tag) *TagStepDetails { s.Tags = v @@ -10235,6 +10280,9 @@ type TestIdentityProviderOutput struct { _ struct{} `type:"structure"` // A message that indicates whether the test was successful or not. + // + // If an empty string is returned, the most likely cause is that the authentication + // failed due to an incorrect username or password. Message *string `type:"string"` // The response that is returned from your API Gateway. @@ -10501,14 +10549,6 @@ type UpdateAccessInput struct { // The following is an Entry and Target pair example for chroot. // // [ { "Entry:": "/", "Target": "/bucket_name/home/mydirectory" } ] - // - // If the target of a logical directory entry does not exist in Amazon S3 or - // EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API - // or EFS API to create 0 byte objects as place holders for your directory. - // If using the CLI, use the s3api or efsapi call instead of s3 or efs so you - // can use the put-object operation. For example, you use the following: aws - // s3api put-object --bucket bucketname --key path/to/folder/. Make sure that - // the end of the key name ends in a / for it to be considered a folder. HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` // The type of landing directory (folder) you want your users' home directory @@ -11047,14 +11087,6 @@ type UpdateUserInput struct { // The following is an Entry and Target pair example for chroot. // // [ { "Entry:": "/", "Target": "/bucket_name/home/mydirectory" } ] - // - // If the target of a logical directory entry does not exist in Amazon S3 or - // EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API - // or EFS API to create 0 byte objects as place holders for your directory. - // If using the CLI, use the s3api or efsapi call instead of s3 or efs so you - // can use the put-object operation. For example, you use the following: aws - // s3api put-object --bucket bucketname --key path/to/folder/. Make sure that - // the end of the key name ends in a / for it to be considered a folder. HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` // The type of landing directory (folder) you want your users' home directory