diff --git a/codegen/sdk-codegen/aws-models/glue.json b/codegen/sdk-codegen/aws-models/glue.json index d40a524e23f..139eed251d2 100644 --- a/codegen/sdk-codegen/aws-models/glue.json +++ b/codegen/sdk-codegen/aws-models/glue.json @@ -6986,6 +6986,12 @@ "traits": { "smithy.api#documentation": "
Specifies Apache Iceberg data store targets.
" } + }, + "HudiTargets": { + "target": "com.amazonaws.glue#HudiTargetList", + "traits": { + "smithy.api#documentation": "Specifies Apache Hudi data store targets.
" + } } }, "traits": { @@ -8123,7 +8129,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "The type of predefined worker that is allocated when a job runs. Accepts a value of\n Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m\n emory, 128 GB disk), and provides up to 8 Ray workers based on the\n autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.
\nFor the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
An array of Amazon S3 location strings for Hudi, each indicating the root folder with which the metadata files for a Hudi table resides. The Hudi folder may be located in a child folder of the root folder.
\nThe crawler will scan all folders underneath a path for a Hudi folder.
" + } + }, + "ConnectionName": { + "target": "com.amazonaws.glue#ConnectionName", + "traits": { + "smithy.api#documentation": "The name of the connection to use to connect to the Hudi target. If your Hudi files are stored in buckets that require VPC authorization, you can set their connection properties here.
" + } + }, + "Exclusions": { + "target": "com.amazonaws.glue#PathList", + "traits": { + "smithy.api#documentation": "A list of glob patterns used to exclude from the crawl.\n For more information, see Catalog Tables with a Crawler.
" + } + }, + "MaximumTraversalDepth": { + "target": "com.amazonaws.glue#NullableInteger", + "traits": { + "smithy.api#documentation": "The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies an Apache Hudi data source.
" + } + }, "com.amazonaws.glue#HudiTargetCompressionType": { "type": "enum", "members": { @@ -19770,6 +19808,12 @@ } } }, + "com.amazonaws.glue#HudiTargetList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#HudiTarget" + } + }, "com.amazonaws.glue#IcebergInput": { "type": "structure", "members": { @@ -20663,7 +20707,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "The type of predefined worker that is allocated when a job runs. Accepts a value of\n Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m\n emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).
The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of\n Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m\n emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the\n autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of\n Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m\n emory, 128 GB disk), and provides up to 8 Ray workers based on the\n autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of\n Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m\n emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the\n autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of\n G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
\nFor the G.1X
worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X
worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X
worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X
worker type.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X
worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The system-generated identifier of the blue/green deployment.
" + "smithy.api#documentation": "The unique identifier of the blue/green deployment.
" } }, "BlueGreenDeploymentName": { @@ -2337,7 +2337,7 @@ "Status": { "target": "com.amazonaws.rds#BlueGreenDeploymentStatus", "traits": { - "smithy.api#documentation": "The status of the blue/green deployment.
\nValues:
\n\n PROVISIONING
- Resources are being created in the green environment.
\n AVAILABLE
- Resources are available in the green environment.
\n SWITCHOVER_IN_PROGRESS
- The deployment is being switched from the blue environment to the \n green environment.
\n SWITCHOVER_COMPLETED
- Switchover from the blue environment to the green environment is complete.
\n INVALID_CONFIGURATION
- Resources in the green environment are invalid, so switchover isn't possible.
\n SWITCHOVER_FAILED
- Switchover was attempted but failed.
\n DELETING
- The blue/green deployment is being deleted.
The status of the blue/green deployment.
\nValid Values:
\n\n PROVISIONING
- Resources are being created in the green environment.
\n AVAILABLE
- Resources are available in the green environment.
\n SWITCHOVER_IN_PROGRESS
- The deployment is being switched from the blue environment to the \n green environment.
\n SWITCHOVER_COMPLETED
- Switchover from the blue environment to the green environment is complete.
\n INVALID_CONFIGURATION
- Resources in the green environment are invalid, so switchover isn't possible.
\n SWITCHOVER_FAILED
- Switchover was attempted but failed.
\n DELETING
- The blue/green deployment is being deleted.
Specifies the time when the blue/green deployment was created, in Universal Coordinated Time (UTC).
" + "smithy.api#documentation": "The time when the blue/green deployment was created, in Universal Coordinated Time\n (UTC).
" } }, "DeleteTime": { "target": "com.amazonaws.rds#TStamp", "traits": { - "smithy.api#documentation": "Specifies the time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).
" + "smithy.api#documentation": "The time when the blue/green deployment was deleted, in Universal Coordinated Time\n (UTC).
" } }, "TagList": { @@ -2363,7 +2363,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains the details about a blue/green deployment.
\nFor more information, see Using Amazon RDS Blue/Green Deployments \n for database updates in the Amazon RDS User Guide and \n \n Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora \n User Guide.
" + "smithy.api#documentation": "Details about a blue/green deployment.
\nFor more information, see Using Amazon RDS\n Blue/Green Deployments for database updates in the Amazon RDS User\n Guide and Using Amazon RDS\n Blue/Green Deployments for database updates in the Amazon Aurora\n User Guide.
" } }, "com.amazonaws.rds#BlueGreenDeploymentAlreadyExistsFault": { @@ -2444,12 +2444,12 @@ "Status": { "target": "com.amazonaws.rds#BlueGreenDeploymentTaskStatus", "traits": { - "smithy.api#documentation": "The status of the blue/green deployment task.
\nValues:
\n\n PENDING
- The resources are being prepared for deployment.
\n IN_PROGRESS
- The resource is being deployed.
\n COMPLETED
- The resource has been deployed.
\n FAILED
- Deployment of the resource failed.
The status of the blue/green deployment task.
\nValid Values:
\n\n PENDING
- The resource is being prepared for deployment.
\n IN_PROGRESS
- The resource is being deployed.
\n COMPLETED
- The resource has been deployed.
\n FAILED
- Deployment of the resource failed.
Contains the details about a task for a blue/green deployment.
\nFor more information, see Using Amazon RDS Blue/Green Deployments \n for database updates in the Amazon RDS User Guide and \n \n Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora \n User Guide.
" + "smithy.api#documentation": "Details about a task for a blue/green deployment.
\nFor more information, see Using Amazon RDS\n Blue/Green Deployments for database updates in the Amazon RDS User\n Guide and Using Amazon RDS\n Blue/Green Deployments for database updates in the Amazon Aurora\n User Guide.
" } }, "com.amazonaws.rds#BlueGreenDeploymentTaskList": { @@ -4116,7 +4116,7 @@ "DBName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The meaning of this parameter differs depending on the database engine.
\nThe name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is\n created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.
\nConstraints:
\nMust contain 1 to 64 alphanumeric characters.
\nCan't be a word reserved by the database engine.
\nThe name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is\n created.
\nDefault: postgres
\n
Constraints:
\nMust contain 1 to 63 alphanumeric characters.
\nMust begin with a letter.\n Subsequent characters can be letters, underscores, or digits\n (0 to 9).
\nCan't be a word reserved by the database engine.
\nThe Oracle System ID (SID) of the created RDS Custom DB instance.
\nDefault: ORCL
\n
Constraints:
\nMust contain 1 to 8 alphanumeric characters.
\nMust contain a letter.
\nCan't be a word reserved by the database engine.
\nNot applicable. Must be null.
\nThe name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
\nConstraints:
\nMust contain 1 to 64 letters or numbers.
\nMust begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
\nCan't be a word reserved by the database engine.
\nThe name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
\nConstraints:
\nMust contain 1 to 64 letters or numbers.
\nMust begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
\nCan't be a word reserved by the database engine.
\nThe Oracle System ID (SID) of the created DB instance.
\nDefault: ORCL
\n
Constraints:
\nCan't be longer than 8 characters.
\nCan't be a word reserved by the database engine, such as the string NULL
.
The name of the database to create when the DB instance is created.
\nDefault: postgres
\n
Constraints:
\nMust contain 1 to 63 letters, numbers, or underscores.
\nMust begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
\nCan't be a word reserved by the database engine.
\nNot applicable. Must be null.
\nThe meaning of this parameter differs according to the database engine you use.
\n\n MySQL\n
\nThe name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
\nConstraints:
\nMust contain 1 to 64 letters or numbers.
\nMust begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
\nCan't be a word reserved by the specified database engine
\n\n MariaDB\n
\nThe name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
\nConstraints:
\nMust contain 1 to 64 letters or numbers.
\nMust begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
\nCan't be a word reserved by the specified database engine
\n\n PostgreSQL\n
\nThe name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres
\n is created in the DB instance.
Constraints:
\nMust contain 1 to 63 letters, numbers, or underscores.
\nMust begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
\nCan't be a word reserved by the specified database engine
\n\n Oracle\n
\nThe Oracle System ID (SID) of the created DB instance. If you don't specify a value, \n the default value is ORCL
. You can't specify the \n string null
, or any other reserved word, for DBName
.
Default: ORCL
\n
Constraints:
\nCan't be longer than 8 characters
\n\n Amazon RDS Custom for Oracle\n
\nThe Oracle System ID (SID) of the created RDS Custom DB instance.\n If you don't specify a value, the default value is ORCL
for non-CDBs and\n RDSCDB
for CDBs.
Default: ORCL
\n
Constraints:
\nIt must contain 1 to 8 alphanumeric characters.
\nIt must contain a letter.
\nIt can't be a word reserved by the database engine.
\n\n Amazon RDS Custom for SQL Server\n
\nNot applicable. Must be null.
\n\n SQL Server\n
\nNot applicable. Must be null.
\n\n Amazon Aurora MySQL\n
\nThe name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is\n created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created \n in the DB cluster.
\nConstraints:
\nIt must contain 1 to 64 alphanumeric characters.
\nIt can't be a word reserved by the database engine.
\n\n Amazon Aurora PostgreSQL\n
\nThe name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is\n created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, \n a database named postgres
is created in the DB cluster.
Constraints:
\nIt must contain 1 to 63 alphanumeric characters.
\nIt must begin with a letter.\n Subsequent characters can be letters, underscores, or digits\n (0 to 9).
\nIt can't be a word reserved by the\n database engine.
\nThe CA certificate identifier to use for the DB instance's server certificate.
\nThis setting doesn't apply to RDS Custom DB instances.
\nFor more information, see Using SSL/TLS to encrypt a connection to a DB \n instance in the Amazon RDS User Guide and \n \n Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora \n User Guide.
" } + }, + "DBSystemId": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "The Oracle system identifier (SID), which is the name of the Oracle database instance that \n manages your database files. In this context, the term \"Oracle database instance\" refers exclusively \n to the system global area (SGA) and Oracle background processes. If you don't specify a SID, \n the value defaults to RDSCDB
. The Oracle SID is also the name of your CDB.
The meaning of this parameter differs depending on the database engine.
\nFor RDS for MariaDB, Microsoft SQL Server, MySQL, and PostgreSQL - The name of the initial database specified for this DB instance when it was created, if one was provided. This same name is returned for the life of the DB instance.
\nFor RDS for Oracle - The Oracle System ID (SID) of the created DB instance. This value is only returned when the object returned is an Oracle DB instance.
\nContains the initial database name that you provided (if required) when you created\n the DB instance. This name is returned for the life of your DB instance. For an RDS for\n Oracle CDB instance, the name identifies the PDB rather than the CDB.
" } }, "Endpoint": { @@ -9371,6 +9377,12 @@ "traits": { "smithy.api#documentation": "Specifies the storage throughput for the DB snapshot.
" } + }, + "DBSystemId": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "The Oracle system identifier (SID), which is the name of the Oracle database instance that \n manages your database files. The Oracle SID is also the name of your CDB.
" + } } }, "traits": { @@ -9714,7 +9726,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a blue/green deployment.
\nFor more information, see Using Amazon RDS Blue/Green Deployments \n for database updates in the Amazon RDS User Guide and \n \n Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora \n User Guide.
" + "smithy.api#documentation": "Deletes a blue/green deployment.
\nFor more information, see Using Amazon RDS\n Blue/Green Deployments for database updates in the Amazon RDS User\n Guide and Using Amazon RDS\n Blue/Green Deployments for database updates in the Amazon Aurora\n User Guide.
" } }, "com.amazonaws.rds#DeleteBlueGreenDeploymentRequest": { @@ -9723,14 +9735,14 @@ "BlueGreenDeploymentIdentifier": { "target": "com.amazonaws.rds#BlueGreenDeploymentIdentifier", "traits": { - "smithy.api#documentation": "The blue/green deployment identifier of the deployment to be deleted. This parameter isn't case-sensitive.
\nConstraints:\n
\nMust match an existing blue/green deployment identifier.
\nThe unique identifier of the blue/green deployment to delete. This parameter isn't\n case-sensitive.
\nConstraints:\n
\nMust match an existing blue/green deployment identifier.
\nA value that indicates whether to delete the resources in the green environment. You\n can't specify this option if the blue/green deployment status\n is SWITCHOVER_COMPLETED
.
Specifies whether to delete the resources in the green environment. You can't specify\n this option if the blue/green deployment status is\n SWITCHOVER_COMPLETED
.
Returns information about blue/green deployments.
\nFor more information, see Using Amazon RDS Blue/Green Deployments \n for database updates in the Amazon RDS User Guide and \n \n Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora \n User Guide.
", + "smithy.api#documentation": "Describes one or more blue/green deployments.
\nFor more information, see Using Amazon RDS Blue/Green Deployments \n for database updates in the Amazon RDS User Guide and \n \n Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora \n User Guide.
", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -10590,25 +10602,25 @@ "BlueGreenDeploymentIdentifier": { "target": "com.amazonaws.rds#BlueGreenDeploymentIdentifier", "traits": { - "smithy.api#documentation": "The blue/green deployment identifier. If this parameter is specified, information from only the \n specific blue/green deployment is returned. This parameter isn't case-sensitive.
\nConstraints:
\nIf supplied, must match an existing blue/green deployment identifier.
\nThe blue/green deployment identifier. If you specify this parameter, the response only\n includes information about the specific blue/green deployment. This parameter isn't\n case-sensitive.
\nConstraints:
\nMust match an existing blue/green deployment identifier.
\nA filter that specifies one or more blue/green deployments to describe.
\nSupported filters:
\n\n blue-green-deployment-identifier
- Accepts system-generated\n identifiers for blue/green deployments. The results list only includes\n information about the blue/green deployments with the specified\n identifiers.
\n blue-green-deployment-name
- Accepts user-supplied names for blue/green deployments. \n The results list only includes information about the blue/green deployments with the \n specified names.
\n source
- Accepts source databases for a blue/green deployment. \n The results list only includes information about the blue/green deployments with \n the specified source databases.
\n target
- Accepts target databases for a blue/green deployment. \n The results list only includes information about the blue/green deployments with \n the specified target databases.
A filter that specifies one or more blue/green deployments to describe.
\nValid Values:
\n\n blue-green-deployment-identifier
- Accepts system-generated\n identifiers for blue/green deployments. The results list only includes\n information about the blue/green deployments with the specified\n identifiers.
\n blue-green-deployment-name
- Accepts user-supplied names for blue/green deployments. \n The results list only includes information about the blue/green deployments with the \n specified names.
\n source
- Accepts source databases for a blue/green deployment. \n The results list only includes information about the blue/green deployments with \n the specified source databases.
\n target
- Accepts target databases for a blue/green deployment. \n The results list only includes information about the blue/green deployments with \n the specified target databases.
An optional pagination token provided by a previous DescribeBlueGreenDeployments
request.\n If this parameter is specified, the response includes only records beyond the marker,\n up to the value specified by MaxRecords
.
An optional pagination token provided by a previous\n DescribeBlueGreenDeployments
request. If you specify this parameter,\n the response only includes records beyond the marker, up to the value specified by\n MaxRecords
.
The maximum number of records to include in the response.\n If more records exist than the specified MaxRecords
value,\n a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
\nConstraints: Minimum 20, maximum 100.
" + "smithy.api#documentation": "The maximum number of records to include in the response.\n If more records exist than the specified MaxRecords
value,\n a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
\nConstraints:
\nMust be a minimum of 20.
\nCan't exceed 100.
\nContains a list of blue/green deployments for the user.
" + "smithy.api#documentation": "A list of blue/green deployments in the current account and Amazon Web Services Region.
" } }, "Marker": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "A pagination token that can be used in a later DescribeBlueGreenDeployments request.
" + "smithy.api#documentation": "A pagination token that can be used in a later\n DescribeBlueGreenDeployments
request.
Switches over a blue/green deployment.
\nBefore you switch over, production traffic is routed to the databases in the blue environment. \n After you switch over, production traffic is routed to the databases in the green environment.
\nFor more information, see Using Amazon RDS Blue/Green Deployments \n for database updates in the Amazon RDS User Guide and \n \n Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora \n User Guide.
" + "smithy.api#documentation": "Switches over a blue/green deployment.
\nBefore you switch over, production traffic is routed to the databases in the blue environment. \n After you switch over, production traffic is routed to the databases in the green environment.
\nFor more information, see Using Amazon RDS\n Blue/Green Deployments for database updates in the Amazon RDS User\n Guide and Using Amazon RDS\n Blue/Green Deployments for database updates in the Amazon Aurora\n User Guide.
" } }, "com.amazonaws.rds#SwitchoverBlueGreenDeploymentRequest": { @@ -23122,14 +23134,14 @@ "BlueGreenDeploymentIdentifier": { "target": "com.amazonaws.rds#BlueGreenDeploymentIdentifier", "traits": { - "smithy.api#documentation": "The blue/green deployment identifier.
\nConstraints:
\nMust match an existing blue/green deployment identifier.
\nThe unique identifier of the blue/green deployment.
\nConstraints:
\nMust match an existing blue/green deployment identifier.
\nThe amount of time, in seconds, for the switchover to complete. The default is 300.
\nIf the switchover takes longer than the specified duration, then any changes are rolled back, \n and no changes are made to the environments.
" + "smithy.api#documentation": "The amount of time, in seconds, for the switchover to complete.
\nDefault: 300
\nIf the switchover takes longer than the specified duration, then any changes are rolled back, \n and no changes are made to the environments.
" } } }, diff --git a/codegen/sdk-codegen/aws-models/workspaces.json b/codegen/sdk-codegen/aws-models/workspaces.json index 7f6382ffb93..2750668137f 100644 --- a/codegen/sdk-codegen/aws-models/workspaces.json +++ b/codegen/sdk-codegen/aws-models/workspaces.json @@ -239,6 +239,9 @@ "smithy.api#documentation": "The identifier of the connection alias association. You use the connection identifier in\n the DNS TXT record when you're configuring your DNS routing policies.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#AssociateIpGroups": { @@ -297,7 +300,10 @@ }, "com.amazonaws.workspaces#AssociateIpGroupsResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#AssociationStatus": { "type": "enum", @@ -387,7 +393,10 @@ }, "com.amazonaws.workspaces#AuthorizeIpRulesResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#AwsAccount": { "type": "string", @@ -1043,6 +1052,9 @@ "smithy.api#documentation": "The identifier of the image.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#CreateConnectClientAddIn": { @@ -1112,6 +1124,9 @@ "smithy.api#documentation": "The client add-in identifier.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#CreateConnectionAlias": { @@ -1176,6 +1191,9 @@ "smithy.api#documentation": "The identifier of the connection alias.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#CreateIpGroup": { @@ -1249,6 +1267,9 @@ "smithy.api#documentation": "The identifier of the group.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#CreateStandbyWorkspaces": { @@ -1317,6 +1338,9 @@ "smithy.api#documentation": "Information about the standby WorkSpace that was created.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#CreateTags": { @@ -1366,7 +1390,10 @@ }, "com.amazonaws.workspaces#CreateTagsResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#CreateUpdatedWorkspaceImage": { "type": "operation", @@ -1447,6 +1474,9 @@ "smithy.api#documentation": "The identifier of the new updated WorkSpace image.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#CreateWorkspaceBundle": { @@ -1537,6 +1567,9 @@ "WorkspaceBundle": { "target": "com.amazonaws.workspaces#WorkspaceBundle" } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#CreateWorkspaceImage": { @@ -1660,6 +1693,9 @@ "smithy.api#documentation": "The identifier of the Amazon Web Services account that owns the image.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#CreateWorkspaces": { @@ -1712,6 +1748,9 @@ "smithy.api#documentation": "Information about the WorkSpaces that were created.
\nBecause this operation is asynchronous, the identifier returned is not immediately\n available for use with other operations. For example, if you call DescribeWorkspaces before the WorkSpace is created, the information returned\n can be incomplete.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DedicatedTenancyCidrRangeList": { @@ -1996,7 +2035,10 @@ }, "com.amazonaws.workspaces#DeleteClientBrandingResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DeleteConnectClientAddIn": { "type": "operation", @@ -2045,7 +2087,10 @@ }, "com.amazonaws.workspaces#DeleteConnectClientAddInResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DeleteConnectionAlias": { "type": "operation", @@ -2096,7 +2141,10 @@ }, "com.amazonaws.workspaces#DeleteConnectionAliasResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DeleteIpGroup": { "type": "operation", @@ -2141,7 +2189,10 @@ }, "com.amazonaws.workspaces#DeleteIpGroupResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DeleteTags": { "type": "operation", @@ -2187,7 +2238,10 @@ }, "com.amazonaws.workspaces#DeleteTagsResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DeleteWorkspaceBundle": { "type": "operation", @@ -2231,7 +2285,10 @@ }, "com.amazonaws.workspaces#DeleteWorkspaceBundleResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DeleteWorkspaceImage": { "type": "operation", @@ -2273,7 +2330,10 @@ }, "com.amazonaws.workspaces#DeleteWorkspaceImageResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DeregisterWorkspaceDirectory": { "type": "operation", @@ -2321,7 +2381,10 @@ }, "com.amazonaws.workspaces#DeregisterWorkspaceDirectoryResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DescribeAccount": { "type": "operation", @@ -2386,6 +2449,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeAccountRequest": { @@ -2410,6 +2476,9 @@ "smithy.api#documentation": "The IP address range, specified as an IPv4 CIDR block, used for the management network\n interface.
\nThe management network interface is connected to a secure Amazon WorkSpaces management\n network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces\n clients, and to allow Amazon WorkSpaces to manage the WorkSpace.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeClientBranding": { @@ -2489,6 +2558,9 @@ "smithy.api#documentation": "The branding information for Web access.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeClientProperties": { @@ -2538,6 +2610,9 @@ "smithy.api#documentation": "Information about the specified Amazon WorkSpaces clients.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeConnectClientAddIns": { @@ -2605,6 +2680,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeConnectionAliasPermissions": { @@ -2681,6 +2759,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeConnectionAliases": { @@ -2753,6 +2834,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeIpGroups": { @@ -2816,6 +2900,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeTags": { @@ -2859,6 +2946,9 @@ "smithy.api#documentation": "The tags.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeWorkspaceBundles": { @@ -2924,6 +3014,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are no more \n results to return. This token is valid for one day and must be used within that time\n frame.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeWorkspaceDirectories": { @@ -2989,6 +3082,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeWorkspaceImagePermissions": { @@ -3062,6 +3158,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeWorkspaceImages": { @@ -3128,6 +3227,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeWorkspaceSnapshots": { @@ -3183,6 +3285,9 @@ "smithy.api#documentation": "Information about the snapshots that can be used to restore a WorkSpace. These snapshots\n include both the root volume and the user volume.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeWorkspaces": { @@ -3263,6 +3368,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#DescribeWorkspacesRequest": { @@ -3324,6 +3432,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#Description": { @@ -3406,7 +3517,10 @@ }, "com.amazonaws.workspaces#DisassociateConnectionAliasResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DisassociateIpGroups": { "type": "operation", @@ -3458,7 +3572,10 @@ }, "com.amazonaws.workspaces#DisassociateIpGroupsResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#DnsIpAddresses": { "type": "list", @@ -3754,6 +3871,9 @@ "smithy.api#documentation": "The branding information configured for web access.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#ImportWorkspaceImage": { @@ -3845,6 +3965,9 @@ "smithy.api#documentation": "The identifier of the WorkSpace image.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#InvalidParameterValuesException": { @@ -4131,6 +4254,9 @@ "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#LogUploadEnum": { @@ -4243,6 +4369,9 @@ "smithy.api#documentation": "The new identifier of the WorkSpace that is being migrated. If the migration does not\n succeed, the target WorkSpace ID will not be used, and the WorkSpace will still have the\n original WorkSpace ID.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#ModificationResourceEnum": { @@ -4362,7 +4491,10 @@ }, "com.amazonaws.workspaces#ModifyAccountResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#ModifyCertificateBasedAuthProperties": { "type": "operation", @@ -4419,7 +4551,10 @@ }, "com.amazonaws.workspaces#ModifyCertificateBasedAuthPropertiesResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#ModifyClientProperties": { "type": "operation", @@ -4468,7 +4603,10 @@ }, "com.amazonaws.workspaces#ModifyClientPropertiesResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#ModifySamlProperties": { "type": "operation", @@ -4525,7 +4663,10 @@ }, "com.amazonaws.workspaces#ModifySamlPropertiesResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#ModifySelfservicePermissions": { "type": "operation", @@ -4574,7 +4715,10 @@ }, "com.amazonaws.workspaces#ModifySelfservicePermissionsResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#ModifyWorkspaceAccessProperties": { "type": "operation", @@ -4620,7 +4764,10 @@ }, "com.amazonaws.workspaces#ModifyWorkspaceAccessPropertiesResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#ModifyWorkspaceCreationProperties": { "type": "operation", @@ -4672,7 +4819,10 @@ }, "com.amazonaws.workspaces#ModifyWorkspaceCreationPropertiesResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#ModifyWorkspaceProperties": { "type": "operation", @@ -4733,7 +4883,10 @@ }, "com.amazonaws.workspaces#ModifyWorkspacePropertiesResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#ModifyWorkspaceState": { "type": "operation", @@ -4785,7 +4938,10 @@ }, "com.amazonaws.workspaces#ModifyWorkspaceStateResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#NonEmptyString": { "type": "string", @@ -4997,6 +5153,9 @@ "smithy.api#documentation": "Information about the WorkSpaces that could not be rebooted.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#RebuildRequest": { @@ -5067,6 +5226,9 @@ "smithy.api#documentation": "Information about the WorkSpace that could not be rebuilt.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#ReconnectEnum": { @@ -5182,7 +5344,10 @@ }, "com.amazonaws.workspaces#RegisterWorkspaceDirectoryResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#RegistrationCode": { "type": "string", @@ -5379,7 +5544,10 @@ }, "com.amazonaws.workspaces#RestoreWorkspaceResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#RevokeIpRules": { "type": "operation", @@ -5431,7 +5599,10 @@ }, "com.amazonaws.workspaces#RevokeIpRulesResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#RootStorage": { "type": "structure", @@ -5722,6 +5893,9 @@ "smithy.api#documentation": "Information about the WorkSpaces that could not be started.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#StopRequest": { @@ -5786,6 +5960,9 @@ "smithy.api#documentation": "Information about the WorkSpaces that could not be stopped.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#SubnetId": { @@ -5958,6 +6135,9 @@ "smithy.api#documentation": "Information about the WorkSpaces that could not be terminated.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.workspaces#Timestamp": { @@ -6046,7 +6226,10 @@ }, "com.amazonaws.workspaces#UpdateConnectClientAddInResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#UpdateConnectionAliasPermission": { "type": "operation", @@ -6107,7 +6290,10 @@ }, "com.amazonaws.workspaces#UpdateConnectionAliasPermissionResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#UpdateDescription": { "type": "string", @@ -6192,7 +6378,10 @@ }, "com.amazonaws.workspaces#UpdateRulesOfIpGroupResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#UpdateWorkspaceBundle": { "type": "operation", @@ -6245,7 +6434,10 @@ }, "com.amazonaws.workspaces#UpdateWorkspaceBundleResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#UpdateWorkspaceImagePermission": { "type": "operation", @@ -6307,7 +6499,10 @@ }, "com.amazonaws.workspaces#UpdateWorkspaceImagePermissionResult": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.workspaces#UserName": { "type": "string", @@ -6404,7 +6599,7 @@ "VolumeEncryptionKey": { "target": "com.amazonaws.workspaces#VolumeEncryptionKey", "traits": { - "smithy.api#documentation": "The symmetric KMS key used to encrypt data stored on your WorkSpace.\n Amazon WorkSpaces does not support asymmetric KMS keys.
" + "smithy.api#documentation": "The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace.\n Amazon WorkSpaces does not support asymmetric KMS keys.
" } }, "UserVolumeEncryptionEnabled": { @@ -7177,7 +7372,7 @@ "VolumeEncryptionKey": { "target": "com.amazonaws.workspaces#VolumeEncryptionKey", "traits": { - "smithy.api#documentation": "The symmetric KMS key used to encrypt data stored on your WorkSpace.\n Amazon WorkSpaces does not support asymmetric KMS keys.
" + "smithy.api#documentation": "The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace.\n Amazon WorkSpaces does not support asymmetric KMS keys.
" } }, "UserVolumeEncryptionEnabled": {