From 694be8764ee72b4792a50b3659b039a1914a2b38 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Thu, 21 Jul 2022 11:28:49 -0700 Subject: [PATCH] Release v1.44.60 (2022-07-21) (#4488) Release v1.44.60 (2022-07-21) === ### Service Client Updates * `service/athena`: Updates service API, documentation, and paginators * This feature allows customers to retrieve runtime statistics for completed queries * `service/dms`: Updates service documentation * Documentation updates for Database Migration Service (DMS). * `service/docdb`: Updates service API and documentation * Enable copy-on-write restore type * `service/ec2-instance-connect`: Updates service API and documentation * `service/frauddetector`: Updates service API and documentation * `service/iotsitewise`: Updates service API, documentation, and paginators * `service/kendra`: Updates service API and documentation * Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html * `service/monitoring`: Updates service API and documentation * Adding support for the suppression of Composite Alarm actions * `service/network-firewall`: Updates service API and documentation * `service/rds`: Updates service API, documentation, waiters, paginators, and examples * Adds support for creating an RDS Proxy for an RDS for MariaDB database. --- CHANGELOG.md | 21 + aws/version.go | 2 +- models/apis/athena/2017-05-18/api-2.json | 88 + models/apis/athena/2017-05-18/docs-2.json | 85 +- .../apis/athena/2017-05-18/paginators-1.json | 5 + models/apis/dms/2016-01-01/docs-2.json | 4 +- models/apis/docdb/2014-10-31/api-2.json | 5 + models/apis/docdb/2014-10-31/docs-2.json | 5 + .../2018-04-02/api-2.json | 13 +- .../2018-04-02/docs-2.json | 6 + .../apis/frauddetector/2019-11-15/api-2.json | 147 +- .../apis/frauddetector/2019-11-15/docs-2.json | 150 +- models/apis/iotsitewise/2019-12-02/api-2.json | 255 +++ .../apis/iotsitewise/2019-12-02/docs-2.json | 140 ++ .../iotsitewise/2019-12-02/paginators-1.json | 6 + models/apis/kendra/2019-02-03/api-2.json | 10 +- models/apis/kendra/2019-02-03/docs-2.json | 16 +- models/apis/monitoring/2010-08-01/api-2.json | 35 +- models/apis/monitoring/2010-08-01/docs-2.json | 28 +- .../network-firewall/2020-11-12/api-2.json | 64 +- .../network-firewall/2020-11-12/docs-2.json | 63 + models/apis/rds/2014-10-31/docs-2.json | 144 +- service/athena/api.go | 582 +++++++ service/athena/athenaiface/interface.go | 7 + service/cloudwatch/api.go | 135 +- service/databasemigrationservice/api.go | 8 +- service/docdb/api.go | 61 + service/ec2instanceconnect/api.go | 70 + service/ec2instanceconnect/errors.go | 7 + service/ec2instanceconnect/examples_test.go | 2 + service/frauddetector/api.go | 759 ++++++++- service/iotsitewise/api.go | 1489 ++++++++++++++++- .../iotsitewise/iotsitewiseiface/interface.go | 15 + service/kendra/api.go | 57 +- service/networkfirewall/api.go | 264 +++ service/rds/api.go | 614 ++++--- 36 files changed, 4926 insertions(+), 436 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1020f14cf3a..fe35b5a7041 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +Release v1.44.60 (2022-07-21) +=== + +### Service Client Updates +* `service/athena`: Updates service API, documentation, and paginators + * This feature allows customers to retrieve runtime statistics for completed queries +* `service/dms`: Updates service documentation + * Documentation updates for Database Migration Service (DMS). +* `service/docdb`: Updates service API and documentation + * Enable copy-on-write restore type +* `service/ec2-instance-connect`: Updates service API and documentation +* `service/frauddetector`: Updates service API and documentation +* `service/iotsitewise`: Updates service API, documentation, and paginators +* `service/kendra`: Updates service API and documentation + * Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html +* `service/monitoring`: Updates service API and documentation + * Adding support for the suppression of Composite Alarm actions +* `service/network-firewall`: Updates service API and documentation +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Adds support for creating an RDS Proxy for an RDS for MariaDB database. + Release v1.44.59 (2022-07-20) === diff --git a/aws/version.go b/aws/version.go index 9d0b08aff5f..0a0afae2b3a 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.59" +const SDKVersion = "1.44.60" diff --git a/models/apis/athena/2017-05-18/api-2.json b/models/apis/athena/2017-05-18/api-2.json index 1230522bd56..a624ccfe94b 100644 --- a/models/apis/athena/2017-05-18/api-2.json +++ b/models/apis/athena/2017-05-18/api-2.json @@ -239,6 +239,19 @@ {"shape":"InvalidRequestException"} ] }, + "GetQueryRuntimeStatistics":{ + "name":"GetQueryRuntimeStatistics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetQueryRuntimeStatisticsInput"}, + "output":{"shape":"GetQueryRuntimeStatisticsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, "GetTableMetadata":{ "name":"GetTableMetadata", "http":{ @@ -982,6 +995,19 @@ "NextToken":{"shape":"Token"} } }, + "GetQueryRuntimeStatisticsInput":{ + "type":"structure", + "required":["QueryExecutionId"], + "members":{ + "QueryExecutionId":{"shape":"QueryExecutionId"} + } + }, + "GetQueryRuntimeStatisticsOutput":{ + "type":"structure", + "members":{ + "QueryRuntimeStatistics":{"shape":"QueryRuntimeStatistics"} + } + }, "GetTableMetadataInput":{ "type":"structure", "required":[ @@ -1402,6 +1428,64 @@ "AthenaError":{"shape":"AthenaError"} } }, + "QueryRuntimeStatistics":{ + "type":"structure", + "members":{ + "Timeline":{"shape":"QueryRuntimeStatisticsTimeline"}, + "Rows":{"shape":"QueryRuntimeStatisticsRows"}, + "OutputStage":{"shape":"QueryStage"} + } + }, + "QueryRuntimeStatisticsRows":{ + "type":"structure", + "members":{ + "InputRows":{"shape":"Long"}, + "InputBytes":{"shape":"Long"}, + "OutputBytes":{"shape":"Long"}, + "OutputRows":{"shape":"Long"} + } + }, + "QueryRuntimeStatisticsTimeline":{ + "type":"structure", + "members":{ + "QueryQueueTimeInMillis":{"shape":"Long"}, + "QueryPlanningTimeInMillis":{"shape":"Long"}, + "EngineExecutionTimeInMillis":{"shape":"Long"}, + "ServiceProcessingTimeInMillis":{"shape":"Long"}, + "TotalExecutionTimeInMillis":{"shape":"Long"} + } + }, + "QueryStage":{ + "type":"structure", + "members":{ + "StageId":{"shape":"Long"}, + "State":{"shape":"String"}, + "OutputBytes":{"shape":"Long"}, + "OutputRows":{"shape":"Long"}, + "InputBytes":{"shape":"Long"}, + "InputRows":{"shape":"Long"}, + "ExecutionTime":{"shape":"Long"}, + "QueryStagePlan":{"shape":"QueryStagePlanNode"}, + "SubStages":{"shape":"QueryStages"} + } + }, + "QueryStagePlanNode":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Identifier":{"shape":"String"}, + "Children":{"shape":"QueryStagePlanNodes"}, + "RemoteSources":{"shape":"StringList"} + } + }, + "QueryStagePlanNodes":{ + "type":"list", + "member":{"shape":"QueryStagePlanNode"} + }, + "QueryStages":{ + "type":"list", + "member":{"shape":"QueryStage"} + }, "QueryString":{ "type":"string", "max":262144, @@ -1516,6 +1600,10 @@ } }, "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, "TableMetadata":{ "type":"structure", "required":["Name"], diff --git a/models/apis/athena/2017-05-18/docs-2.json b/models/apis/athena/2017-05-18/docs-2.json index da171605efc..76a3c931409 100644 --- a/models/apis/athena/2017-05-18/docs-2.json +++ b/models/apis/athena/2017-05-18/docs-2.json @@ -19,6 +19,7 @@ "GetPreparedStatement": "

Retrieves the prepared statement with the specified name from the specified workgroup.

", "GetQueryExecution": "

Returns information about a single execution of a query if you have access to the workgroup in which the query ran. Each time a query executes, information about the query execution is saved with a unique ID.

", "GetQueryResults": "

Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location.

IAM principals with permission to the Amazon S3 GetObject action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.

", + "GetQueryRuntimeStatistics": "

Returns query execution runtime statistics related to a single execution of a query if you have access to the workgroup in which the query ran. The query execution runtime statistics is returned only when QueryExecutionStatus$State is in a SUCCEEDED or FAILED state.

", "GetTableMetadata": "

Returns table metadata for the specified catalog, database, and table.

", "GetWorkGroup": "

Returns information about the workgroup with the specified name.

", "ListDataCatalogs": "

Lists the data catalogs in the current Amazon Web Services account.

", @@ -484,6 +485,16 @@ "refs": { } }, + "GetQueryRuntimeStatisticsInput": { + "base": null, + "refs": { + } + }, + "GetQueryRuntimeStatisticsOutput": { + "base": null, + "refs": { + } + }, "GetTableMetadataInput": { "base": null, "refs": { @@ -633,7 +644,22 @@ "QueryExecutionStatistics$TotalExecutionTimeInMillis": "

The number of milliseconds that Athena took to run the query.

", "QueryExecutionStatistics$QueryQueueTimeInMillis": "

The number of milliseconds that the query was in your query queue waiting for resources. Note that if transient errors occur, Athena might automatically add the query back to the queue.

", "QueryExecutionStatistics$QueryPlanningTimeInMillis": "

The number of milliseconds that Athena took to plan the query processing flow. This includes the time spent retrieving table partitions from the data source. Note that because the query engine performs the query planning, query planning time is a subset of engine processing time.

", - "QueryExecutionStatistics$ServiceProcessingTimeInMillis": "

The number of milliseconds that Athena took to finalize and publish the query results after the query engine finished running the query.

" + "QueryExecutionStatistics$ServiceProcessingTimeInMillis": "

The number of milliseconds that Athena took to finalize and publish the query results after the query engine finished running the query.

", + "QueryRuntimeStatisticsRows$InputRows": "

The number of rows read to execute the query.

", + "QueryRuntimeStatisticsRows$InputBytes": "

The number of bytes read to execute the query.

", + "QueryRuntimeStatisticsRows$OutputBytes": "

The number of bytes returned by the query.

", + "QueryRuntimeStatisticsRows$OutputRows": "

The number of rows returned by the query.

", + "QueryRuntimeStatisticsTimeline$QueryQueueTimeInMillis": "

The number of milliseconds that the query was in your query queue waiting for resources. Note that if transient errors occur, Athena might automatically add the query back to the queue.

", + "QueryRuntimeStatisticsTimeline$QueryPlanningTimeInMillis": "

The number of milliseconds that Athena took to plan the query processing flow. This includes the time spent retrieving table partitions from the data source. Note that because the query engine performs the query planning, query planning time is a subset of engine processing time.

", + "QueryRuntimeStatisticsTimeline$EngineExecutionTimeInMillis": "

The number of milliseconds that the query took to execute.

", + "QueryRuntimeStatisticsTimeline$ServiceProcessingTimeInMillis": "

The number of milliseconds that Athena took to finalize and publish the query results after the query engine finished running the query.

", + "QueryRuntimeStatisticsTimeline$TotalExecutionTimeInMillis": "

The number of milliseconds that Athena took to run the query.

", + "QueryStage$StageId": "

The identifier for a stage.

", + "QueryStage$OutputBytes": "

The number of bytes output from the stage after execution.

", + "QueryStage$OutputRows": "

The number of rows output from the stage after execution.

", + "QueryStage$InputBytes": "

The number of bytes input into the stage for execution.

", + "QueryStage$InputRows": "

The number of rows input into the stage for execution.

", + "QueryStage$ExecutionTime": "

Time taken to execute this stage.

" } }, "MaxDataCatalogsCount": { @@ -822,6 +848,7 @@ "refs": { "GetQueryExecutionInput$QueryExecutionId": "

The unique ID of the query execution.

", "GetQueryResultsInput$QueryExecutionId": "

The unique ID of the query execution.

", + "GetQueryRuntimeStatisticsInput$QueryExecutionId": "

The unique ID of the query execution.

", "QueryExecution$QueryExecutionId": "

The unique identifier for each query execution.

", "QueryExecutionIdList$member": null, "StartQueryExecutionOutput$QueryExecutionId": "

The unique ID of the query that ran as a result of this request.

", @@ -860,6 +887,50 @@ "QueryExecution$Status": "

The completion date, current state, submission time, and state change reason (if applicable) for the query execution.

" } }, + "QueryRuntimeStatistics": { + "base": "

The query execution timeline, statistics on input and output rows and bytes, and the different query stages that form the query execution plan.

", + "refs": { + "GetQueryRuntimeStatisticsOutput$QueryRuntimeStatistics": "

Runtime statistics about the query execution.

" + } + }, + "QueryRuntimeStatisticsRows": { + "base": "

Statistics such as input rows and bytes read by the query, rows and bytes output by the query, and the number of rows written by the query.

", + "refs": { + "QueryRuntimeStatistics$Rows": null + } + }, + "QueryRuntimeStatisticsTimeline": { + "base": "

Timeline statistics such as query queue time, planning time, execution time, service processing time, and total execution time.

", + "refs": { + "QueryRuntimeStatistics$Timeline": null + } + }, + "QueryStage": { + "base": "

Stage statistics such as input and output rows and bytes, execution time and stage state. This information also includes substages and the query stage plan.

", + "refs": { + "QueryRuntimeStatistics$OutputStage": "

Stage statistics such as input and output rows and bytes, execution time, and stage state. This information also includes substages and the query stage plan.

", + "QueryStages$member": null + } + }, + "QueryStagePlanNode": { + "base": "

Stage plan information such as name, identifier, sub plans, and remote sources.

", + "refs": { + "QueryStage$QueryStagePlan": "

Stage plan information such as name, identifier, sub plans, and source stages.

", + "QueryStagePlanNodes$member": null + } + }, + "QueryStagePlanNodes": { + "base": null, + "refs": { + "QueryStagePlanNode$Children": "

Stage plan information such as name, identifier, sub plans, and remote sources of child plan nodes/

" + } + }, + "QueryStages": { + "base": null, + "refs": { + "QueryStage$SubStages": "

List of sub query stages that form this stage execution plan.

" + } + }, "QueryString": { "base": null, "refs": { @@ -980,7 +1051,17 @@ "ColumnInfo$Type": "

The data type of the column.

", "EncryptionConfiguration$KmsKey": "

For SSE_KMS and CSE_KMS, this is the KMS key ARN or ID.

", "QueryExecutionStatistics$DataManifestLocation": "

The location and file name of a data manifest file. The manifest file is saved to the Athena query results location in Amazon S3. The manifest file tracks files that the query wrote to Amazon S3. If the query fails, the manifest file also tracks files that the query intended to write. The manifest is useful for identifying orphaned files resulting from a failed query. For more information, see Working with Query Results, Output Files, and Query History in the Amazon Athena User Guide.

", - "QueryExecutionStatus$StateChangeReason": "

Further detail about the status of the query.

" + "QueryExecutionStatus$StateChangeReason": "

Further detail about the status of the query.

", + "QueryStage$State": "

State of the stage after query execution.

", + "QueryStagePlanNode$Name": "

Name of the query stage plan that describes the operation this stage is performing as part of query execution.

", + "QueryStagePlanNode$Identifier": "

Information about the operation this query stage plan node is performing.

", + "StringList$member": null + } + }, + "StringList": { + "base": null, + "refs": { + "QueryStagePlanNode$RemoteSources": "

Source plan node IDs.

" } }, "TableMetadata": { diff --git a/models/apis/athena/2017-05-18/paginators-1.json b/models/apis/athena/2017-05-18/paginators-1.json index d813cda77ae..305dbd384d5 100644 --- a/models/apis/athena/2017-05-18/paginators-1.json +++ b/models/apis/athena/2017-05-18/paginators-1.json @@ -17,6 +17,11 @@ "output_token": "NextToken", "result_key": "DatabaseList" }, + "ListEngineVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "ListNamedQueries": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/models/apis/dms/2016-01-01/docs-2.json b/models/apis/dms/2016-01-01/docs-2.json index 428db295af6..7fbc32b95cd 100644 --- a/models/apis/dms/2016-01-01/docs-2.json +++ b/models/apis/dms/2016-01-01/docs-2.json @@ -1968,7 +1968,7 @@ "Connection$EndpointIdentifier": "

The identifier of the endpoint. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.

", "Connection$ReplicationInstanceIdentifier": "

The replication instance identifier. This parameter is stored as a lowercase string.

", "CreateEndpointMessage$EndpointIdentifier": "

The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.

", - "CreateEndpointMessage$EngineName": "

The type of engine for the endpoint. Valid values, depending on the EndpointType value, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"opensearch\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"docdb\", \"sqlserver\", and \"neptune\".

", + "CreateEndpointMessage$EngineName": "

The type of engine for the endpoint. Valid values, depending on the EndpointType value, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"opensearch\", \"redshift\", \"s3\", \"db2\", db2-zos, \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"docdb\", \"sqlserver\", \"neptune\", and babelfish.

", "CreateEndpointMessage$Username": "

The user name to be used to log in to the endpoint database.

", "CreateEndpointMessage$ServerName": "

The name of the server where the endpoint database resides.

", "CreateEndpointMessage$DatabaseName": "

The name of the endpoint database. For a MySQL source or target endpoint, do not specify DatabaseName. To migrate to a specific database, use this setting and targetDbType.

", @@ -2393,7 +2393,7 @@ "SybaseSettings$SecretsManagerSecretId": "

The full ARN, partial ARN, or friendly name of the SecretsManagerSecret that contains the SAP SAE endpoint connection details.

", "TableStatistics$SchemaName": "

The schema name.

", "TableStatistics$TableName": "

The name of the table.

", - "TableStatistics$TableState": "

The state of the tables described.

Valid states: Table does not exist | Before load | Full load | Table completed | Table cancelled | Table error | Table all | Table updates | Table is being reloaded

", + "TableStatistics$TableState": "

The state of the tables described.

Valid states: Table does not exist | Before load | Full load | Table completed | Table cancelled | Table error | Table is being reloaded

", "TableStatistics$ValidationState": "

The validation state of the table.

This parameter can have the following values:

", "TableStatistics$ValidationStateDetails": "

Additional details about the state of validation.

", "TableToReload$SchemaName": "

The schema name of the table to be reloaded.

", diff --git a/models/apis/docdb/2014-10-31/api-2.json b/models/apis/docdb/2014-10-31/api-2.json index 958a61699cc..6b7ddaa68bd 100644 --- a/models/apis/docdb/2014-10-31/api-2.json +++ b/models/apis/docdb/2014-10-31/api-2.json @@ -1202,6 +1202,7 @@ "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, "Tags":{"shape":"TagList"}, "DBClusterIdentifier":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, "PromotionTier":{"shape":"IntegerOptional"}, "EnablePerformanceInsights":{"shape":"BooleanOptional"}, "PerformanceInsightsKMSKeyId":{"shape":"String"} @@ -1305,6 +1306,7 @@ "DbClusterResourceId":{"shape":"String"}, "DBClusterArn":{"shape":"String"}, "AssociatedRoles":{"shape":"DBClusterRoles"}, + "CloneGroupId":{"shape":"String"}, "ClusterCreateTime":{"shape":"TStamp"}, "EnabledCloudwatchLogsExports":{"shape":"LogTypeList"}, "DeletionProtection":{"shape":"Boolean"} @@ -1571,6 +1573,7 @@ "KmsKeyId":{"shape":"String"}, "DbiResourceId":{"shape":"String"}, "CACertificateIdentifier":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, "PromotionTier":{"shape":"IntegerOptional"}, "DBInstanceArn":{"shape":"String"}, "EnabledCloudwatchLogsExports":{"shape":"LogTypeList"} @@ -2558,6 +2561,7 @@ "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, "NewDBInstanceIdentifier":{"shape":"String"}, "CACertificateIdentifier":{"shape":"String"}, + "CopyTagsToSnapshot":{"shape":"BooleanOptional"}, "PromotionTier":{"shape":"IntegerOptional"}, "EnablePerformanceInsights":{"shape":"BooleanOptional"}, "PerformanceInsightsKMSKeyId":{"shape":"String"} @@ -2859,6 +2863,7 @@ ], "members":{ "DBClusterIdentifier":{"shape":"String"}, + "RestoreType":{"shape":"String"}, "SourceDBClusterIdentifier":{"shape":"String"}, "RestoreToTime":{"shape":"TStamp"}, "UseLatestRestorableTime":{"shape":"Boolean"}, diff --git a/models/apis/docdb/2014-10-31/docs-2.json b/models/apis/docdb/2014-10-31/docs-2.json index 8093117c078..1bde6821075 100644 --- a/models/apis/docdb/2014-10-31/docs-2.json +++ b/models/apis/docdb/2014-10-31/docs-2.json @@ -159,10 +159,12 @@ "CreateDBClusterMessage$StorageEncrypted": "

Specifies whether the cluster is encrypted.

", "CreateDBClusterMessage$DeletionProtection": "

Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.

", "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

This parameter does not apply to Amazon DocumentDB. Amazon DocumentDB does not perform minor version upgrades regardless of the value set.

Default: false

", + "CreateDBInstanceMessage$CopyTagsToSnapshot": "

A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

", "CreateDBInstanceMessage$EnablePerformanceInsights": "

A value that indicates whether to enable Performance Insights for the DB Instance. For more information, see Using Amazon Performance Insights.

", "CreateEventSubscriptionMessage$Enabled": "

A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

", "CreateGlobalClusterMessage$DeletionProtection": "

The deletion protection setting for the new global cluster. The global cluster can't be deleted when deletion protection is enabled.

", "CreateGlobalClusterMessage$StorageEncrypted": "

The storage encryption setting for the new global cluster.

", + "DBInstance$CopyTagsToSnapshot": "

A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

", "DescribeDBEngineVersionsMessage$ListSupportedCharacterSets": "

If this parameter is specified and the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

", "DescribeDBEngineVersionsMessage$ListSupportedTimezones": "

If this parameter is specified and the requested engine supports the TimeZone parameter for CreateDBInstance, the response includes a list of supported time zones for each engine version.

", "DescribeOrderableDBInstanceOptionsMessage$Vpc": "

The virtual private cloud (VPC) filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

", @@ -170,6 +172,7 @@ "GlobalCluster$DeletionProtection": "

The deletion protection setting for the new global cluster.

", "ModifyDBClusterMessage$DeletionProtection": "

Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.

", "ModifyDBInstanceMessage$AutoMinorVersionUpgrade": "

This parameter does not apply to Amazon DocumentDB. Amazon DocumentDB does not perform minor version upgrades regardless of the value set.

", + "ModifyDBInstanceMessage$CopyTagsToSnapshot": "

A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

", "ModifyDBInstanceMessage$EnablePerformanceInsights": "

A value that indicates whether to enable Performance Insights for the DB Instance. For more information, see Using Amazon Performance Insights.

", "ModifyEventSubscriptionMessage$Enabled": "

A Boolean value; set to true to activate the subscription.

", "ModifyGlobalClusterMessage$DeletionProtection": "

Indicates if the global cluster has deletion protection enabled. The global cluster can't be deleted when deletion protection is enabled.

", @@ -1446,6 +1449,7 @@ "DBCluster$KmsKeyId": "

If StorageEncrypted is true, the KMS key identifier for the encrypted cluster.

", "DBCluster$DbClusterResourceId": "

The Amazon Web Services Region-unique, immutable identifier for the cluster. This identifier is found in CloudTrail log entries whenever the KMS key for the cluster is accessed.

", "DBCluster$DBClusterArn": "

The Amazon Resource Name (ARN) for the cluster.

", + "DBCluster$CloneGroupId": "

Identifies the clone group to which the DB cluster is associated.

", "DBClusterMember$DBInstanceIdentifier": "

Specifies the instance identifier for this member of the cluster.

", "DBClusterMember$DBClusterParameterGroupStatus": "

Specifies the status of the cluster parameter group for this member of the DB cluster.

", "DBClusterMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", @@ -1640,6 +1644,7 @@ "RestoreDBClusterFromSnapshotMessage$DBSubnetGroupName": "

The name of the subnet group to use for the new cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

", "RestoreDBClusterFromSnapshotMessage$KmsKeyId": "

The KMS key identifier to use when restoring an encrypted cluster from a DB snapshot or cluster snapshot.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a cluster with the same Amazon Web Services account that owns the KMS encryption key used to encrypt the new cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

", "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "

The name of the new cluster to be created.

Constraints:

", + "RestoreDBClusterToPointInTimeMessage$RestoreType": "

The type of restore to be performed. You can specify one of the following values:

Constraints: You can't specify copy-on-write if the engine version of the source DB cluster is earlier than 1.11.

If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.

", "RestoreDBClusterToPointInTimeMessage$SourceDBClusterIdentifier": "

The identifier of the source cluster from which to restore.

Constraints:

", "RestoreDBClusterToPointInTimeMessage$DBSubnetGroupName": "

The subnet group name to use for the new cluster.

Constraints: If provided, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

", "RestoreDBClusterToPointInTimeMessage$KmsKeyId": "

The KMS key identifier to use when restoring an encrypted cluster from an encrypted cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a cluster with the same Amazon Web Services account that owns the KMS encryption key used to encrypt the new cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

You can restore to a new cluster and encrypt the new cluster with an KMS key that is different from the KMS key used to encrypt the source cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId parameter.

If you do not specify a value for the KmsKeyId parameter, then the following occurs:

If DBClusterIdentifier refers to a cluster that is not encrypted, then the restore request is rejected.

", diff --git a/models/apis/ec2-instance-connect/2018-04-02/api-2.json b/models/apis/ec2-instance-connect/2018-04-02/api-2.json index 26c1734a4f5..2e8577d6406 100644 --- a/models/apis/ec2-instance-connect/2018-04-02/api-2.json +++ b/models/apis/ec2-instance-connect/2018-04-02/api-2.json @@ -27,7 +27,8 @@ {"shape":"ServiceException"}, {"shape":"ThrottlingException"}, {"shape":"EC2InstanceNotFoundException"}, - {"shape":"EC2InstanceStateInvalidException"} + {"shape":"EC2InstanceStateInvalidException"}, + {"shape":"EC2InstanceUnavailableException"} ] }, "SendSerialConsoleSSHPublicKey":{ @@ -48,7 +49,8 @@ {"shape":"EC2InstanceTypeInvalidException"}, {"shape":"SerialConsoleSessionLimitExceededException"}, {"shape":"SerialConsoleSessionUnavailableException"}, - {"shape":"EC2InstanceStateInvalidException"} + {"shape":"EC2InstanceStateInvalidException"}, + {"shape":"EC2InstanceUnavailableException"} ] } }, @@ -87,6 +89,13 @@ }, "exception":true }, + "EC2InstanceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, "InstanceId":{ "type":"string", "max":32, diff --git a/models/apis/ec2-instance-connect/2018-04-02/docs-2.json b/models/apis/ec2-instance-connect/2018-04-02/docs-2.json index a7da98023a7..bab3ed21f4e 100644 --- a/models/apis/ec2-instance-connect/2018-04-02/docs-2.json +++ b/models/apis/ec2-instance-connect/2018-04-02/docs-2.json @@ -32,6 +32,11 @@ "refs": { } }, + "EC2InstanceUnavailableException": { + "base": "

The instance is currently unavailable. Wait a few minutes and try again.

", + "refs": { + } + }, "InstanceId": { "base": null, "refs": { @@ -117,6 +122,7 @@ "EC2InstanceNotFoundException$Message": null, "EC2InstanceStateInvalidException$Message": null, "EC2InstanceTypeInvalidException$Message": null, + "EC2InstanceUnavailableException$Message": null, "InvalidArgsException$Message": null, "SerialConsoleAccessDisabledException$Message": null, "SerialConsoleSessionLimitExceededException$Message": null, diff --git a/models/apis/frauddetector/2019-11-15/api-2.json b/models/apis/frauddetector/2019-11-15/api-2.json index e8e8725be0a..43bf76093cd 100644 --- a/models/apis/frauddetector/2019-11-15/api-2.json +++ b/models/apis/frauddetector/2019-11-15/api-2.json @@ -1102,6 +1102,32 @@ } }, "shapes":{ + "ATIMetricDataPoint":{ + "type":"structure", + "members":{ + "cr":{"shape":"float"}, + "adr":{"shape":"float"}, + "threshold":{"shape":"float"}, + "atodr":{"shape":"float"} + } + }, + "ATIMetricDataPointsList":{ + "type":"list", + "member":{"shape":"ATIMetricDataPoint"} + }, + "ATIModelPerformance":{ + "type":"structure", + "members":{ + "asi":{"shape":"float"} + } + }, + "ATITrainingMetricsValue":{ + "type":"structure", + "members":{ + "metricDataPoints":{"shape":"ATIMetricDataPointsList"}, + "modelPerformance":{"shape":"ATIModelPerformance"} + } + }, "AccessDeniedException":{ "type":"structure", "required":["message"], @@ -1110,6 +1136,31 @@ }, "exception":true }, + "AggregatedLogOddsMetric":{ + "type":"structure", + "required":[ + "variableNames", + "aggregatedVariablesImportance" + ], + "members":{ + "variableNames":{"shape":"ListOfStrings"}, + "aggregatedVariablesImportance":{"shape":"float"} + } + }, + "AggregatedVariablesImpactExplanation":{ + "type":"structure", + "members":{ + "eventVariableNames":{"shape":"ListOfStrings"}, + "relativeImpact":{"shape":"string"}, + "logOddsImpact":{"shape":"float"} + } + }, + "AggregatedVariablesImportanceMetrics":{ + "type":"structure", + "members":{ + "logOddsMetrics":{"shape":"ListOfAggregatedLogOddsMetrics"} + } + }, "AsyncJobStatus":{ "type":"string", "enum":[ @@ -2338,7 +2389,6 @@ }, "LabelSchema":{ "type":"structure", - "required":["labelMapper"], "members":{ "labelMapper":{"shape":"labelMapper"}, "unlabeledEventsTreatment":{"shape":"UnlabeledEventsTreatment"} @@ -2367,6 +2417,14 @@ "nextToken":{"shape":"string"} } }, + "ListOfAggregatedLogOddsMetrics":{ + "type":"list", + "member":{"shape":"AggregatedLogOddsMetric"} + }, + "ListOfAggregatedVariablesImpactExplanations":{ + "type":"list", + "member":{"shape":"AggregatedVariablesImpactExplanation"} + }, "ListOfEvaluatedExternalModels":{ "type":"list", "member":{"shape":"EvaluatedExternalModel"} @@ -2536,7 +2594,8 @@ "type":"string", "enum":[ "ONLINE_FRAUD_INSIGHTS", - "TRANSACTION_FRAUD_INSIGHTS" + "TRANSACTION_FRAUD_INSIGHTS", + "ACCOUNT_TAKEOVER_INSIGHTS" ] }, "ModelVersion":{ @@ -2567,7 +2626,8 @@ "trainingResult":{"shape":"TrainingResult"}, "lastUpdatedTime":{"shape":"time"}, "createdTime":{"shape":"time"}, - "arn":{"shape":"fraudDetectorArn"} + "arn":{"shape":"fraudDetectorArn"}, + "trainingResultV2":{"shape":"TrainingResultV2"} } }, "ModelVersionEvaluation":{ @@ -2597,6 +2657,32 @@ "member":{"shape":"string"}, "min":1 }, + "OFIMetricDataPoint":{ + "type":"structure", + "members":{ + "fpr":{"shape":"float"}, + "precision":{"shape":"float"}, + "tpr":{"shape":"float"}, + "threshold":{"shape":"float"} + } + }, + "OFIMetricDataPointsList":{ + "type":"list", + "member":{"shape":"OFIMetricDataPoint"} + }, + "OFIModelPerformance":{ + "type":"structure", + "members":{ + "auc":{"shape":"float"} + } + }, + "OFITrainingMetricsValue":{ + "type":"structure", + "members":{ + "metricDataPoints":{"shape":"OFIMetricDataPointsList"}, + "modelPerformance":{"shape":"OFIModelPerformance"} + } + }, "Outcome":{ "type":"structure", "members":{ @@ -2620,7 +2706,8 @@ "PredictionExplanations":{ "type":"structure", "members":{ - "variableImpactExplanations":{"shape":"listOfVariableImpactExplanations"} + "variableImpactExplanations":{"shape":"listOfVariableImpactExplanations"}, + "aggregatedVariablesImpactExplanations":{"shape":"ListOfAggregatedVariablesImpactExplanations"} } }, "PredictionTimeRange":{ @@ -2848,6 +2935,32 @@ "members":{ } }, + "TFIMetricDataPoint":{ + "type":"structure", + "members":{ + "fpr":{"shape":"float"}, + "precision":{"shape":"float"}, + "tpr":{"shape":"float"}, + "threshold":{"shape":"float"} + } + }, + "TFIMetricDataPointsList":{ + "type":"list", + "member":{"shape":"TFIMetricDataPoint"} + }, + "TFIModelPerformance":{ + "type":"structure", + "members":{ + "auc":{"shape":"float"} + } + }, + "TFITrainingMetricsValue":{ + "type":"structure", + "members":{ + "metricDataPoints":{"shape":"TFIMetricDataPointsList"}, + "modelPerformance":{"shape":"TFIModelPerformance"} + } + }, "Tag":{ "type":"structure", "required":[ @@ -2891,10 +3004,7 @@ }, "TrainingDataSchema":{ "type":"structure", - "required":[ - "modelVariables", - "labelSchema" - ], + "required":["modelVariables"], "members":{ "modelVariables":{"shape":"ListOfStrings"}, "labelSchema":{"shape":"LabelSchema"} @@ -2914,6 +3024,14 @@ "metricDataPoints":{"shape":"metricDataPointsList"} } }, + "TrainingMetricsV2":{ + "type":"structure", + "members":{ + "ofi":{"shape":"OFITrainingMetricsValue"}, + "tfi":{"shape":"TFITrainingMetricsValue"}, + "ati":{"shape":"ATITrainingMetricsValue"} + } + }, "TrainingResult":{ "type":"structure", "members":{ @@ -2922,6 +3040,15 @@ "variableImportanceMetrics":{"shape":"VariableImportanceMetrics"} } }, + "TrainingResultV2":{ + "type":"structure", + "members":{ + "dataValidationMetrics":{"shape":"DataValidationMetrics"}, + "trainingMetricsV2":{"shape":"TrainingMetricsV2"}, + "variableImportanceMetrics":{"shape":"VariableImportanceMetrics"}, + "aggregatedVariablesImportanceMetrics":{"shape":"AggregatedVariablesImportanceMetrics"} + } + }, "UnlabeledEventsTreatment":{ "type":"string", "enum":[ @@ -3312,7 +3439,7 @@ "labelMapper":{ "type":"map", "key":{"shape":"string"}, - "value":{"shape":"NonEmptyListOfStrings"} + "value":{"shape":"ListOfStrings"} }, "labelsMaxResults":{ "type":"integer", @@ -3420,7 +3547,7 @@ }, "variableValue":{ "type":"string", - "max":1024, + "max":8192, "min":1, "sensitive":true }, diff --git a/models/apis/frauddetector/2019-11-15/docs-2.json b/models/apis/frauddetector/2019-11-15/docs-2.json index dc323dce326..f46903a67ff 100644 --- a/models/apis/frauddetector/2019-11-15/docs-2.json +++ b/models/apis/frauddetector/2019-11-15/docs-2.json @@ -72,11 +72,53 @@ "UpdateVariable": "

Updates a variable.

" }, "shapes": { + "ATIMetricDataPoint": { + "base": "

The Account Takeover Insights (ATI) model performance metrics data points.

", + "refs": { + "ATIMetricDataPointsList$member": null + } + }, + "ATIMetricDataPointsList": { + "base": null, + "refs": { + "ATITrainingMetricsValue$metricDataPoints": "

The model's performance metrics data points.

" + } + }, + "ATIModelPerformance": { + "base": "

The Account Takeover Insights (ATI) model performance score.

", + "refs": { + "ATITrainingMetricsValue$modelPerformance": "

The model's overall performance scores.

" + } + }, + "ATITrainingMetricsValue": { + "base": "

The Account Takeover Insights (ATI) model training metric details.

", + "refs": { + "TrainingMetricsV2$ati": "

The Account Takeover Insights (ATI) model training metric details.

" + } + }, "AccessDeniedException": { "base": "

An exception indicating Amazon Fraud Detector does not have the needed permissions. This can occur if you submit a request, such as PutExternalModel, that specifies a role that is not in your account.

", "refs": { } }, + "AggregatedLogOddsMetric": { + "base": "

The log odds metric details.

Account Takeover Insights (ATI) model uses event variables from the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address and user.

", + "refs": { + "ListOfAggregatedLogOddsMetrics$member": null + } + }, + "AggregatedVariablesImpactExplanation": { + "base": "

The details of the impact of aggregated variables on the prediction score.

Account Takeover Insights (ATI) model uses the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, the model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address and user.

", + "refs": { + "ListOfAggregatedVariablesImpactExplanations$member": null + } + }, + "AggregatedVariablesImportanceMetrics": { + "base": "

The details of the relative importance of the aggregated variables.

Account Takeover Insights (ATI) model uses event variables from the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address and user.

", + "refs": { + "TrainingResultV2$aggregatedVariablesImportanceMetrics": "

The variable importance metrics of the aggregated variables.

Account Takeover Insights (ATI) model uses event variables from the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address and user.

" + } + }, "AsyncJobStatus": { "base": null, "refs": { @@ -277,9 +319,10 @@ } }, "DataValidationMetrics": { - "base": "

The model training validation messages.

", + "base": "

The model training data validation metrics.

", "refs": { - "TrainingResult$dataValidationMetrics": "

The validation metrics.

" + "TrainingResult$dataValidationMetrics": "

The validation metrics.

", + "TrainingResultV2$dataValidationMetrics": null } }, "DeleteAuditHistory": { @@ -921,6 +964,18 @@ "refs": { } }, + "ListOfAggregatedLogOddsMetrics": { + "base": null, + "refs": { + "AggregatedVariablesImportanceMetrics$logOddsMetrics": "

List of variables' metrics.

" + } + }, + "ListOfAggregatedVariablesImpactExplanations": { + "base": null, + "refs": { + "PredictionExplanations$aggregatedVariablesImpactExplanations": "

The details of the aggregated variables impact on the prediction score.

Account Takeover Insights (ATI) model uses event variables from the login data you provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. In this case, event variables used to derive the aggregated variables are IP address and user.

" + } + }, "ListOfEvaluatedExternalModels": { "base": null, "refs": { @@ -986,6 +1041,8 @@ "ListOfStrings": { "base": null, "refs": { + "AggregatedLogOddsMetric$variableNames": "

The names of all the variables.

", + "AggregatedVariablesImpactExplanation$eventVariableNames": "

The names of all the event variables that were used to derive the aggregated variables.

", "CreateDetectorVersionRequest$externalModelEndpoints": "

The Amazon Sagemaker model endpoints to include in the detector version.

", "EvaluatedRule$outcomes": "

The rule outcome.

", "EventType$eventVariables": "

The event type event variables.

", @@ -995,7 +1052,8 @@ "PutEventTypeRequest$labels": "

The event type labels.

", "RuleResult$outcomes": "

The outcomes of the matched rule, based on the rule execution mode.

", "TrainingDataSchema$modelVariables": "

The training data schema variables.

", - "UpdateDetectorVersionRequest$externalModelEndpoints": "

The Amazon SageMaker model endpoints to include in the detector version.

" + "UpdateDetectorVersionRequest$externalModelEndpoints": "

The Amazon SageMaker model endpoints to include in the detector version.

", + "labelMapper$value": null } }, "ListTagsForResourceRequest": { @@ -1159,8 +1217,31 @@ "PutEventTypeRequest$eventVariables": "

The event type variables.

", "PutEventTypeRequest$entityTypes": "

The entity type for the event type. Example entity types: customer, merchant, account.

", "RuleDetail$outcomes": "

The rule outcomes.

", - "UpdateRuleVersionRequest$outcomes": "

The outcomes.

", - "labelMapper$value": null + "UpdateRuleVersionRequest$outcomes": "

The outcomes.

" + } + }, + "OFIMetricDataPoint": { + "base": "

The Online Fraud Insights (OFI) model performance metrics data points.

", + "refs": { + "OFIMetricDataPointsList$member": null + } + }, + "OFIMetricDataPointsList": { + "base": null, + "refs": { + "OFITrainingMetricsValue$metricDataPoints": "

The model's performance metrics data points.

" + } + }, + "OFIModelPerformance": { + "base": "

The Online Fraud Insights (OFI) model performance score.

", + "refs": { + "OFITrainingMetricsValue$modelPerformance": "

The model's overall performance score.

" + } + }, + "OFITrainingMetricsValue": { + "base": "

The Online Fraud Insights (OFI) model training metric details.

", + "refs": { + "TrainingMetricsV2$ofi": "

The Online Fraud Insights (OFI) model training metric details.

" } }, "Outcome": { @@ -1335,6 +1416,30 @@ "refs": { } }, + "TFIMetricDataPoint": { + "base": "

The performance metrics data points for Transaction Fraud Insights (TFI) model.

", + "refs": { + "TFIMetricDataPointsList$member": null + } + }, + "TFIMetricDataPointsList": { + "base": null, + "refs": { + "TFITrainingMetricsValue$metricDataPoints": "

The model's performance metrics data points.

" + } + }, + "TFIModelPerformance": { + "base": "

The Transaction Fraud Insights (TFI) model performance score.

", + "refs": { + "TFITrainingMetricsValue$modelPerformance": "

The model performance score.

" + } + }, + "TFITrainingMetricsValue": { + "base": "

The Transaction Fraud Insights (TFI) model training metric details.

", + "refs": { + "TrainingMetricsV2$tfi": "

The Transaction Fraud Insights (TFI) model training metric details.

" + } + }, "Tag": { "base": "

A key and value pair.

", "refs": { @@ -1384,12 +1489,24 @@ "TrainingResult$trainingMetrics": "

The training metric details.

" } }, + "TrainingMetricsV2": { + "base": "

The training metrics details.

", + "refs": { + "TrainingResultV2$trainingMetricsV2": "

The training metric details.

" + } + }, "TrainingResult": { "base": "

The training result details.

", "refs": { "ModelVersionDetail$trainingResult": "

The training results.

" } }, + "TrainingResultV2": { + "base": "

The training result details.

", + "refs": { + "ModelVersionDetail$trainingResultV2": "

The training result details. The details include the relative importance of the variables.

" + } + }, "UnlabeledEventsTreatment": { "base": null, "refs": { @@ -1544,7 +1661,8 @@ "VariableImportanceMetrics": { "base": "

The variable importance metrics details.

", "refs": { - "TrainingResult$variableImportanceMetrics": "

The variable importance metrics.

" + "TrainingResult$variableImportanceMetrics": "

The variable importance metrics.

", + "TrainingResultV2$variableImportanceMetrics": null } }, "VariableList": { @@ -1662,7 +1780,7 @@ "fileValidationMessageList": { "base": null, "refs": { - "DataValidationMetrics$fileLevelMessages": "

The file-specific model training validation messages.

" + "DataValidationMetrics$fileLevelMessages": "

The file-specific model training data validation messages.

" } }, "filterString": { @@ -1674,12 +1792,29 @@ "float": { "base": null, "refs": { + "ATIMetricDataPoint$cr": "

The challenge rate. This indicates the percentage of login events that the model recommends to challenge such as one-time password, multi-factor authentication, and investigations.

", + "ATIMetricDataPoint$adr": "

The anomaly discovery rate. This metric quantifies the percentage of anomalies that can be detected by the model at the selected score threshold. A lower score threshold increases the percentage of anomalies captured by the model, but would also require challenging a larger percentage of login events, leading to a higher customer friction.

", + "ATIMetricDataPoint$threshold": "

The model's threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.

", + "ATIMetricDataPoint$atodr": "

The account takeover discovery rate. This metric quantifies the percentage of account compromise events that can be detected by the model at the selected score threshold. This metric is only available if 50 or more entities with at-least one labeled account takeover event is present in the ingested dataset.

", + "ATIModelPerformance$asi": "

The anomaly separation index (ASI) score. This metric summarizes the overall ability of the model to separate anomalous activities from the normal behavior. Depending on the business, a large fraction of these anomalous activities can be malicious and correspond to the account takeover attacks. A model with no separability power will have the lowest possible ASI score of 0.5, whereas the a model with a high separability power will have the highest possible ASI score of 1.0

", + "AggregatedLogOddsMetric$aggregatedVariablesImportance": "

The relative importance of the variables in the list to the other event variable.

", + "AggregatedVariablesImpactExplanation$logOddsImpact": "

The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from -infinity to +infinity.

", "LogOddsMetric$variableImportance": "

The relative importance of the variable. For more information, see Model variable importance.

", "MetricDataPoint$fpr": "

The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.

", "MetricDataPoint$precision": "

The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.

", "MetricDataPoint$tpr": "

The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.

", "MetricDataPoint$threshold": "

The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.

", "ModelPredictionMap$value": null, + "OFIMetricDataPoint$fpr": "

The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.

", + "OFIMetricDataPoint$precision": "

The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.

", + "OFIMetricDataPoint$tpr": "

The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.

", + "OFIMetricDataPoint$threshold": "

The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.

", + "OFIModelPerformance$auc": "

The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds.

", + "TFIMetricDataPoint$fpr": "

The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.

", + "TFIMetricDataPoint$precision": "

The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.

", + "TFIMetricDataPoint$tpr": "

The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.

", + "TFIMetricDataPoint$threshold": "

The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.

", + "TFIModelPerformance$auc": "

The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds.

", "TrainingMetrics$auc": "

The area under the curve. This summarizes true positive rate (TPR) and false positive rate (FPR) across all possible model score thresholds. A model with no predictive power has an AUC of 0.5, whereas a perfect model has a score of 1.0.

", "VariableImpactExplanation$logOddsImpact": "

The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from - infinity to + infinity.

" } @@ -1950,6 +2085,7 @@ "base": null, "refs": { "AccessDeniedException$message": null, + "AggregatedVariablesImpactExplanation$relativeImpact": "

The relative impact of the aggregated variables in terms of magnitude on the prediction scores.

", "BatchCreateVariableError$name": "

The name.

", "BatchCreateVariableError$message": "

The error message.

", "BatchGetVariableError$name": "

The error name.

", diff --git a/models/apis/iotsitewise/2019-12-02/api-2.json b/models/apis/iotsitewise/2019-12-02/api-2.json index e26ba18b982..72ecb1779b7 100644 --- a/models/apis/iotsitewise/2019-12-02/api-2.json +++ b/models/apis/iotsitewise/2019-12-02/api-2.json @@ -205,6 +205,26 @@ ], "endpoint":{"hostPrefix":"api."} }, + "CreateBulkImportJob":{ + "name":"CreateBulkImportJob", + "http":{ + "method":"POST", + "requestUri":"/jobs", + "responseCode":202 + }, + "input":{"shape":"CreateBulkImportJobRequest"}, + "output":{"shape":"CreateBulkImportJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictingOperationException"} + ], + "endpoint":{"hostPrefix":"data."} + }, "CreateDashboard":{ "name":"CreateDashboard", "http":{ @@ -478,6 +498,22 @@ ], "endpoint":{"hostPrefix":"api."} }, + "DescribeBulkImportJob":{ + "name":"DescribeBulkImportJob", + "http":{ + "method":"GET", + "requestUri":"/jobs/{jobId}" + }, + "input":{"shape":"DescribeBulkImportJobRequest"}, + "output":{"shape":"DescribeBulkImportJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "endpoint":{"hostPrefix":"data."} + }, "DescribeDashboard":{ "name":"DescribeDashboard", "http":{ @@ -805,6 +841,22 @@ ], "endpoint":{"hostPrefix":"api."} }, + "ListBulkImportJobs":{ + "name":"ListBulkImportJobs", + "http":{ + "method":"GET", + "requestUri":"/jobs" + }, + "input":{"shape":"ListBulkImportJobsRequest"}, + "output":{"shape":"ListBulkImportJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "endpoint":{"hostPrefix":"data."} + }, "ListDashboards":{ "name":"ListDashboards", "http":{ @@ -2120,6 +2172,11 @@ "errorEntries":{"shape":"BatchPutAssetPropertyErrorEntries"} } }, + "Bucket":{ + "type":"string", + "max":63, + "min":3 + }, "CapabilityConfiguration":{ "type":"string", "max":104857600, @@ -2146,6 +2203,23 @@ "min":36, "pattern":"\\S{36,64}" }, + "ColumnName":{ + "type":"string", + "enum":[ + "ALIAS", + "ASSET_ID", + "PROPERTY_ID", + "DATA_TYPE", + "TIMESTAMP_SECONDS", + "TIMESTAMP_NANO_OFFSET", + "QUALITY", + "VALUE" + ] + }, + "ColumnNames":{ + "type":"list", + "member":{"shape":"ColumnName"} + }, "CompositeModelProperty":{ "type":"structure", "required":[ @@ -2301,6 +2375,36 @@ "assetStatus":{"shape":"AssetStatus"} } }, + "CreateBulkImportJobRequest":{ + "type":"structure", + "required":[ + "jobName", + "jobRoleArn", + "files", + "errorReportLocation", + "jobConfiguration" + ], + "members":{ + "jobName":{"shape":"Name"}, + "jobRoleArn":{"shape":"ARN"}, + "files":{"shape":"Files"}, + "errorReportLocation":{"shape":"ErrorReportLocation"}, + "jobConfiguration":{"shape":"JobConfiguration"} + } + }, + "CreateBulkImportJobResponse":{ + "type":"structure", + "required":[ + "jobId", + "jobName", + "jobStatus" + ], + "members":{ + "jobId":{"shape":"ID"}, + "jobName":{"shape":"Name"}, + "jobStatus":{"shape":"JobStatus"} + } + }, "CreateDashboardRequest":{ "type":"structure", "required":[ @@ -2422,6 +2526,12 @@ "projectArn":{"shape":"ARN"} } }, + "Csv":{ + "type":"structure", + "members":{ + "columnNames":{"shape":"ColumnNames"} + } + }, "CustomerManagedS3Storage":{ "type":"structure", "required":[ @@ -2777,6 +2887,42 @@ "assetDescription":{"shape":"Description"} } }, + "DescribeBulkImportJobRequest":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"ID", + "location":"uri", + "locationName":"jobId" + } + } + }, + "DescribeBulkImportJobResponse":{ + "type":"structure", + "required":[ + "jobId", + "jobName", + "jobStatus", + "jobRoleArn", + "files", + "errorReportLocation", + "jobConfiguration", + "jobCreationDate", + "jobLastUpdateDate" + ], + "members":{ + "jobId":{"shape":"ID"}, + "jobName":{"shape":"Name"}, + "jobStatus":{"shape":"JobStatus"}, + "jobRoleArn":{"shape":"ARN"}, + "files":{"shape":"Files"}, + "errorReportLocation":{"shape":"ErrorReportLocation"}, + "jobConfiguration":{"shape":"JobConfiguration"}, + "jobCreationDate":{"shape":"Timestamp"}, + "jobLastUpdateDate":{"shape":"Timestamp"} + } + }, "DescribeDashboardRequest":{ "type":"structure", "required":["dashboardId"], @@ -3161,6 +3307,17 @@ } }, "ErrorMessage":{"type":"string"}, + "ErrorReportLocation":{ + "type":"structure", + "required":[ + "bucket", + "prefix" + ], + "members":{ + "bucket":{"shape":"Bucket"}, + "prefix":{"shape":"String"} + } + }, "ExceptionMessage":{"type":"string"}, "Expression":{ "type":"string", @@ -3182,6 +3339,28 @@ "type":"list", "member":{"shape":"ExpressionVariable"} }, + "File":{ + "type":"structure", + "required":[ + "bucket", + "key" + ], + "members":{ + "bucket":{"shape":"Bucket"}, + "key":{"shape":"String"}, + "versionId":{"shape":"String"} + } + }, + "FileFormat":{ + "type":"structure", + "members":{ + "csv":{"shape":"Csv"} + } + }, + "Files":{ + "type":"list", + "member":{"shape":"File"} + }, "ForwardingConfig":{ "type":"structure", "required":["state"], @@ -3644,6 +3823,41 @@ "error":{"httpStatusCode":400}, "exception":true }, + "JobConfiguration":{ + "type":"structure", + "required":["fileFormat"], + "members":{ + "fileFormat":{"shape":"FileFormat"} + } + }, + "JobStatus":{ + "type":"string", + "enum":[ + "PENDING", + "CANCELLED", + "RUNNING", + "COMPLETED", + "FAILED", + "COMPLETED_WITH_FAILURES" + ] + }, + "JobSummaries":{ + "type":"list", + "member":{"shape":"JobSummary"} + }, + "JobSummary":{ + "type":"structure", + "required":[ + "id", + "name", + "status" + ], + "members":{ + "id":{"shape":"ID"}, + "name":{"shape":"Name"}, + "status":{"shape":"JobStatus"} + } + }, "KmsKeyId":{ "type":"string", "max":2048, @@ -3845,6 +4059,46 @@ "nextToken":{"shape":"NextToken"} } }, + "ListBulkImportJobsFilter":{ + "type":"string", + "enum":[ + "ALL", + "PENDING", + "RUNNING", + "CANCELLED", + "FAILED", + "COMPLETED_WITH_FAILURES", + "COMPLETED" + ] + }, + "ListBulkImportJobsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "filter":{ + "shape":"ListBulkImportJobsFilter", + "location":"querystring", + "locationName":"filter" + } + } + }, + "ListBulkImportJobsResponse":{ + "type":"structure", + "required":["jobSummaries"], + "members":{ + "jobSummaries":{"shape":"JobSummaries"}, + "nextToken":{"shape":"NextToken"} + } + }, "ListDashboardsRequest":{ "type":"structure", "required":["projectId"], @@ -4478,6 +4732,7 @@ "MULTI_LAYER_STORAGE" ] }, + "String":{"type":"string"}, "TagKey":{ "type":"string", "max":128, diff --git a/models/apis/iotsitewise/2019-12-02/docs-2.json b/models/apis/iotsitewise/2019-12-02/docs-2.json index 005fdbaa566..d046d162e24 100644 --- a/models/apis/iotsitewise/2019-12-02/docs-2.json +++ b/models/apis/iotsitewise/2019-12-02/docs-2.json @@ -13,6 +13,7 @@ "CreateAccessPolicy": "

Creates an access policy that grants the specified identity (Amazon Web Services SSO user, Amazon Web Services SSO group, or IAM user) access to the specified IoT SiteWise Monitor portal or project resource.

", "CreateAsset": "

Creates an asset from an existing asset model. For more information, see Creating assets in the IoT SiteWise User Guide.

", "CreateAssetModel": "

Creates an asset model from specified property and hierarchy definitions. You create assets from asset models. With asset models, you can easily create assets of the same type that have standardized definitions. Each asset created from a model inherits the asset model's property and hierarchy definitions. For more information, see Defining asset models in the IoT SiteWise User Guide.

", + "CreateBulkImportJob": "

This API operation is in preview release for IoT SiteWise and is subject to change. We recommend that you use this operation only with test data, and not in production environments.

Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, see Create a bulk import job (CLI) in the Amazon Simple Storage Service User Guide.

You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. For more information about how to configure storage settings, see PutStorageConfiguration.

", "CreateDashboard": "

Creates a dashboard in an IoT SiteWise Monitor project.

", "CreateGateway": "

Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to IoT SiteWise. For more information, see Ingesting data using a gateway in the IoT SiteWise User Guide.

", "CreatePortal": "

Creates a portal, which can contain projects and dashboards. IoT SiteWise Monitor uses Amazon Web Services SSO or IAM to authenticate portal users and manage user permissions.

Before you can sign in to a new portal, you must add at least one identity to that portal. For more information, see Adding or removing portal administrators in the IoT SiteWise User Guide.

", @@ -29,6 +30,7 @@ "DescribeAsset": "

Retrieves information about an asset.

", "DescribeAssetModel": "

Retrieves information about an asset model.

", "DescribeAssetProperty": "

Retrieves information about an asset property.

When you call this operation for an attribute property, this response includes the default attribute value that you define in the asset model. If you update the default value in the model, this operation's response includes the new default value.

This operation doesn't return the value of the asset property. To get the value of an asset property, use GetAssetPropertyValue.

", + "DescribeBulkImportJob": "

This API operation is in preview release for IoT SiteWise and is subject to change. We recommend that you use this operation only with test data, and not in production environments.

Retrieves information about a bulk import job request. For more information, see Describe a bulk import job (CLI) in the Amazon Simple Storage Service User Guide.

", "DescribeDashboard": "

Retrieves information about a dashboard.

", "DescribeDefaultEncryptionConfiguration": "

Retrieves information about the default encryption configuration for the Amazon Web Services account in the default or specified Region. For more information, see Key management in the IoT SiteWise User Guide.

", "DescribeGateway": "

Retrieves information about a gateway.

", @@ -49,6 +51,7 @@ "ListAssetRelationships": "

Retrieves a paginated list of asset relationships for an asset. You can use this operation to identify an asset's root asset and all associated assets between that asset and its root.

", "ListAssets": "

Retrieves a paginated list of asset summaries.

You can use this operation to do the following:

You can't use this operation to list all assets. To retrieve summaries for all of your assets, use ListAssetModels to get all of your asset model IDs. Then, use ListAssets to get all assets for each asset model.

", "ListAssociatedAssets": "

Retrieves a paginated list of associated assets.

You can use this operation to do the following:

", + "ListBulkImportJobs": "

This API operation is in preview release for IoT SiteWise and is subject to change. We recommend that you use this operation only with test data, and not in production environments.

Retrieves a paginated list of bulk import job requests. For more information, see List bulk import jobs (CLI) in the Amazon Simple Storage Service User Guide.

", "ListDashboards": "

Retrieves a paginated list of dashboards for an IoT SiteWise Monitor project.

", "ListGateways": "

Retrieves a paginated list of gateways.

", "ListPortals": "

Retrieves a paginated list of IoT SiteWise Monitor portals.

", @@ -83,6 +86,7 @@ "CreateAccessPolicyResponse$accessPolicyArn": "

The ARN of the access policy, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:access-policy/${AccessPolicyId}

", "CreateAssetModelResponse$assetModelArn": "

The ARN of the asset model, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

", "CreateAssetResponse$assetArn": "

The ARN of the asset, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

", + "CreateBulkImportJobRequest$jobRoleArn": "

The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.

", "CreateDashboardResponse$dashboardArn": "

The ARN of the dashboard, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}

", "CreateGatewayResponse$gatewayArn": "

The ARN of the gateway, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}

", "CreatePortalRequest$roleArn": "

The ARN of a service role that allows the portal's users to access your IoT SiteWise resources on your behalf. For more information, see Using service roles for IoT SiteWise Monitor in the IoT SiteWise User Guide.

", @@ -93,6 +97,7 @@ "DescribeAccessPolicyResponse$accessPolicyArn": "

The ARN of the access policy, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:access-policy/${AccessPolicyId}

", "DescribeAssetModelResponse$assetModelArn": "

The ARN of the asset model, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

", "DescribeAssetResponse$assetArn": "

The ARN of the asset, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

", + "DescribeBulkImportJobResponse$jobRoleArn": "

The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.

", "DescribeDashboardResponse$dashboardArn": "

The ARN of the dashboard, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}

", "DescribeDefaultEncryptionConfigurationResponse$kmsKeyArn": "

The key ARN of the customer managed key used for KMS encryption if you use KMS_BASED_ENCRYPTION.

", "DescribeGatewayResponse$gatewayArn": "

The ARN of the gateway, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}

", @@ -777,6 +782,13 @@ "refs": { } }, + "Bucket": { + "base": null, + "refs": { + "ErrorReportLocation$bucket": "

The name of the Amazon S3 bucket to which errors associated with the bulk import job are sent.

", + "File$bucket": "

The name of the Amazon S3 bucket from which data is imported.

" + } + }, "CapabilityConfiguration": { "base": null, "refs": { @@ -833,6 +845,18 @@ "UpdateProjectRequest$clientToken": "

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

" } }, + "ColumnName": { + "base": null, + "refs": { + "ColumnNames$member": null + } + }, + "ColumnNames": { + "base": null, + "refs": { + "Csv$columnNames": "

The column names specified in the .csv file.

" + } + }, "CompositeModelProperty": { "base": "

Contains information about a composite model property on an asset.

", "refs": { @@ -908,6 +932,16 @@ "refs": { } }, + "CreateBulkImportJobRequest": { + "base": null, + "refs": { + } + }, + "CreateBulkImportJobResponse": { + "base": null, + "refs": { + } + }, "CreateDashboardRequest": { "base": null, "refs": { @@ -948,6 +982,12 @@ "refs": { } }, + "Csv": { + "base": "

A .csv file.

", + "refs": { + "FileFormat$csv": "

The .csv file format.

" + } + }, "CustomerManagedS3Storage": { "base": "

Contains information about a customer managed Amazon S3 bucket.

", "refs": { @@ -1090,6 +1130,16 @@ "refs": { } }, + "DescribeBulkImportJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeBulkImportJobResponse": { + "base": null, + "refs": { + } + }, "DescribeDashboardRequest": { "base": null, "refs": { @@ -1323,6 +1373,13 @@ "UnauthorizedException$message": null } }, + "ErrorReportLocation": { + "base": "

The Amazon S3 destination where errors associated with the job creation request are saved.

", + "refs": { + "CreateBulkImportJobRequest$errorReportLocation": "

The Amazon S3 destination where errors associated with the job creation request are saved.

", + "DescribeBulkImportJobResponse$errorReportLocation": "

The Amazon S3 destination where errors associated with the job creation request are saved.

" + } + }, "ExceptionMessage": { "base": null, "refs": { @@ -1349,6 +1406,25 @@ "Transform$variables": "

The list of variables used in the expression.

" } }, + "File": { + "base": "

The file in Amazon S3 where your data is saved.

", + "refs": { + "Files$member": null + } + }, + "FileFormat": { + "base": "

The file format of the data.

", + "refs": { + "JobConfiguration$fileFormat": "

The file format of the data in Amazon S3.

" + } + }, + "Files": { + "base": null, + "refs": { + "CreateBulkImportJobRequest$files": "

The files in the specified Amazon S3 bucket that contain your data.

", + "DescribeBulkImportJobResponse$files": "

The files in the specified Amazon S3 bucket that contain your data.

" + } + }, "ForwardingConfig": { "base": "

The forwarding configuration for a given property.

", "refs": { @@ -1501,6 +1577,7 @@ "CreateAssetModelResponse$assetModelId": "

The ID of the asset model. You can use this ID when you call other IoT SiteWise APIs.

", "CreateAssetRequest$assetModelId": "

The ID of the asset model from which to create the asset.

", "CreateAssetResponse$assetId": "

The ID of the asset. This ID uniquely identifies the asset within IoT SiteWise and can be used with other IoT SiteWise APIs.

", + "CreateBulkImportJobResponse$jobId": "

The ID of the job.

", "CreateDashboardRequest$projectId": "

The ID of the project in which to create the dashboard.

", "CreateDashboardResponse$dashboardId": "

The ID of the dashboard.

", "CreateGatewayResponse$gatewayId": "

The ID of the gateway device. You can use this ID when you call other IoT SiteWise APIs.

", @@ -1528,6 +1605,8 @@ "DescribeAssetRequest$assetId": "

The ID of the asset.

", "DescribeAssetResponse$assetId": "

The ID of the asset.

", "DescribeAssetResponse$assetModelId": "

The ID of the asset model that was used to create the asset.

", + "DescribeBulkImportJobRequest$jobId": "

The ID of the job.

", + "DescribeBulkImportJobResponse$jobId": "

The ID of the job.

", "DescribeDashboardRequest$dashboardId": "

The ID of the dashboard.

", "DescribeDashboardResponse$dashboardId": "

The ID of the dashboard.

", "DescribeDashboardResponse$projectId": "

The ID of the project that the dashboard is in.

", @@ -1561,6 +1640,7 @@ "IDs$member": null, "Image$id": "

The ID of an existing image. Specify this parameter to keep an existing image.

", "ImageLocation$id": "

The ID of the image.

", + "JobSummary$id": "

The ID of the job.

", "ListAccessPoliciesRequest$resourceId": "

The ID of the resource. This parameter is required if you specify resourceType.

", "ListAssetRelationshipsRequest$assetId": "

The ID of the asset.

", "ListAssetsRequest$assetModelId": "

The ID of the asset model by which to filter the list of assets. This parameter is required if you choose ALL for filter.

", @@ -1698,6 +1778,33 @@ "refs": { } }, + "JobConfiguration": { + "base": "

Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

", + "refs": { + "CreateBulkImportJobRequest$jobConfiguration": "

Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

", + "DescribeBulkImportJobResponse$jobConfiguration": "

Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

" + } + }, + "JobStatus": { + "base": null, + "refs": { + "CreateBulkImportJobResponse$jobStatus": "

The status of the bulk import job can be one of following values.

", + "DescribeBulkImportJobResponse$jobStatus": "

The status of the bulk import job can be one of following values.

", + "JobSummary$status": "

The status of the bulk import job can be one of following values.

" + } + }, + "JobSummaries": { + "base": null, + "refs": { + "ListBulkImportJobsResponse$jobSummaries": "

One or more job summaries to list.

" + } + }, + "JobSummary": { + "base": "

Contains a job summary information.

", + "refs": { + "JobSummaries$member": null + } + }, "KmsKeyId": { "base": null, "refs": { @@ -1765,6 +1872,22 @@ "refs": { } }, + "ListBulkImportJobsFilter": { + "base": null, + "refs": { + "ListBulkImportJobsRequest$filter": "

You can use a filter to select the bulk import jobs that you want to retrieve.

" + } + }, + "ListBulkImportJobsRequest": { + "base": null, + "refs": { + } + }, + "ListBulkImportJobsResponse": { + "base": null, + "refs": { + } + }, "ListDashboardsRequest": { "base": null, "refs": { @@ -1877,6 +2000,7 @@ "ListAssetRelationshipsRequest$maxResults": "

The maximum number of results to return for each paginated request.

", "ListAssetsRequest$maxResults": "

The maximum number of results to return for each paginated request.

Default: 50

", "ListAssociatedAssetsRequest$maxResults": "

The maximum number of results to return for each paginated request.

Default: 50

", + "ListBulkImportJobsRequest$maxResults": "

The maximum number of results to return for each paginated request.

", "ListDashboardsRequest$maxResults": "

The maximum number of results to return for each paginated request.

Default: 50

", "ListGatewaysRequest$maxResults": "

The maximum number of results to return for each paginated request.

Default: 50

", "ListPortalsRequest$maxResults": "

The maximum number of results to return for each paginated request.

Default: 50

", @@ -1966,6 +2090,8 @@ "CompositeModelProperty$type": "

The type of the composite model that defines this property.

", "CreateAssetModelRequest$assetModelName": "

A unique, friendly name for the asset model.

", "CreateAssetRequest$assetName": "

A unique, friendly name for the asset.

", + "CreateBulkImportJobRequest$jobName": "

The unique name that helps identify the job request.

", + "CreateBulkImportJobResponse$jobName": "

The unique name that helps identify the job request.

", "CreateDashboardRequest$dashboardName": "

A friendly name for the dashboard.

", "CreateGatewayRequest$gatewayName": "

A unique, friendly name for the gateway.

", "CreatePortalRequest$portalName": "

A friendly name for the portal.

", @@ -1974,12 +2100,14 @@ "DescribeAssetModelResponse$assetModelName": "

The name of the asset model.

", "DescribeAssetPropertyResponse$assetName": "

The name of the asset.

", "DescribeAssetResponse$assetName": "

The name of the asset.

", + "DescribeBulkImportJobResponse$jobName": "

The unique name that helps identify the job request.

", "DescribeDashboardResponse$dashboardName": "

The name of the dashboard.

", "DescribeGatewayResponse$gatewayName": "

The name of the gateway.

", "DescribePortalResponse$portalName": "

The name of the portal.

", "DescribeProjectResponse$projectName": "

The name of the project.

", "DescribeTimeSeriesResponse$dataTypeSpec": "

The data type of the structure for this time series. This parameter is required for time series that have the STRUCT data type.

The options for this parameter depend on the type of the composite model in which you created the asset property that is associated with your time series. Use AWS/ALARM_STATE for alarm state in alarm composite models.

", "GatewaySummary$gatewayName": "

The name of the asset.

", + "JobSummary$name": "

The unique name that helps identify the job request.

", "PortalSummary$name": "

The name of the portal.

", "ProjectSummary$name": "

The name of the project.

", "Property$name": "

The name of the property.

", @@ -2017,6 +2145,8 @@ "ListAssetsResponse$nextToken": "

The token for the next set of results, or null if there are no additional results.

", "ListAssociatedAssetsRequest$nextToken": "

The token to be used for the next set of paginated results.

", "ListAssociatedAssetsResponse$nextToken": "

The token for the next set of results, or null if there are no additional results.

", + "ListBulkImportJobsRequest$nextToken": "

The token to be used for the next set of paginated results.

", + "ListBulkImportJobsResponse$nextToken": "

The token for the next set of results, or null if there are no additional results.

", "ListDashboardsRequest$nextToken": "

The token to be used for the next set of paginated results.

", "ListDashboardsResponse$nextToken": "

The token for the next set of results, or null if there are no additional results.

", "ListGatewaysRequest$nextToken": "

The token to be used for the next set of paginated results.

", @@ -2345,6 +2475,14 @@ "PutStorageConfigurationResponse$storageType": "

The storage tier that you specified for your data. The storageType parameter can be one of the following values:

" } }, + "String": { + "base": null, + "refs": { + "ErrorReportLocation$prefix": "

Amazon S3 uses the prefix as a folder name to organize data in the bucket. Each Amazon S3 object has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/). For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide.

", + "File$key": "

The key of the Amazon S3 object that contains your data. Each object has a key that is a unique identifier. Each object has exactly one key.

", + "File$versionId": "

The version ID to identify a specific version of the Amazon S3 object that contains your data.

" + } + }, "TagKey": { "base": null, "refs": { @@ -2464,6 +2602,8 @@ "DescribeAssetModelResponse$assetModelLastUpdateDate": "

The date the asset model was last updated, in Unix epoch time.

", "DescribeAssetResponse$assetCreationDate": "

The date the asset was created, in Unix epoch time.

", "DescribeAssetResponse$assetLastUpdateDate": "

The date the asset was last updated, in Unix epoch time.

", + "DescribeBulkImportJobResponse$jobCreationDate": "

The date the job was created, in Unix epoch TIME.

", + "DescribeBulkImportJobResponse$jobLastUpdateDate": "

The date the job was last updated, in Unix epoch time.

", "DescribeDashboardResponse$dashboardCreationDate": "

The date the dashboard was created, in Unix epoch time.

", "DescribeDashboardResponse$dashboardLastUpdateDate": "

The date the dashboard was last updated, in Unix epoch time.

", "DescribeGatewayResponse$creationDate": "

The date the gateway was created, in Unix epoch time.

", diff --git a/models/apis/iotsitewise/2019-12-02/paginators-1.json b/models/apis/iotsitewise/2019-12-02/paginators-1.json index f136f3af528..56e894096b4 100644 --- a/models/apis/iotsitewise/2019-12-02/paginators-1.json +++ b/models/apis/iotsitewise/2019-12-02/paginators-1.json @@ -62,6 +62,12 @@ "limit_key": "maxResults", "result_key": "assetSummaries" }, + "ListBulkImportJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "jobSummaries" + }, "ListDashboards": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/models/apis/kendra/2019-02-03/api-2.json b/models/apis/kendra/2019-02-03/api-2.json index 309aa33276f..59ae6823832 100644 --- a/models/apis/kendra/2019-02-03/api-2.json +++ b/models/apis/kendra/2019-02-03/api-2.json @@ -4572,9 +4572,17 @@ "FieldMappings":{"shape":"DataSourceToIndexFieldMappingList"}, "DocumentTitleFieldName":{"shape":"DataSourceFieldName"}, "DisableLocalGroups":{"shape":"Boolean"}, - "SslCertificateS3Path":{"shape":"S3Path"} + "SslCertificateS3Path":{"shape":"S3Path"}, + "AuthenticationType":{"shape":"SharePointOnlineAuthenticationType"} } }, + "SharePointOnlineAuthenticationType":{ + "type":"string", + "enum":[ + "HTTP_BASIC", + "OAUTH2" + ] + }, "SharePointUrlList":{ "type":"list", "member":{"shape":"Url"}, diff --git a/models/apis/kendra/2019-02-03/docs-2.json b/models/apis/kendra/2019-02-03/docs-2.json index 3b93d2fb69e..53a2c63b7f3 100644 --- a/models/apis/kendra/2019-02-03/docs-2.json +++ b/models/apis/kendra/2019-02-03/docs-2.json @@ -8,7 +8,7 @@ "BatchGetDocumentStatus": "

Returns the indexing status for one or more documents submitted with the BatchPutDocument API.

When you use the BatchPutDocument API, documents are indexed asynchronously. You can use the BatchGetDocumentStatus API to get the current status of a list of documents so that you can determine if they have been successfully indexed.

You can also use the BatchGetDocumentStatus API to check the status of the BatchDeleteDocument API. When a document is deleted from the index, Amazon Kendra returns NOT_FOUND as the status.

", "BatchPutDocument": "

Adds one or more documents to an index.

The BatchPutDocument API enables you to ingest inline documents or a set of documents stored in an Amazon S3 bucket. Use this API to ingest your text and unstructured text into an index, add custom attributes to the documents, and to attach an access control list to the documents added to the index.

The documents are indexed asynchronously. You can see the progress of the batch using Amazon Web Services CloudWatch. Any error messages related to processing the batch are sent to your Amazon Web Services CloudWatch log.

For an example of ingesting inline documents using Python and Java SDKs, see Adding files directly to an index.

", "ClearQuerySuggestions": "

Clears existing query suggestions from an index.

This deletes existing suggestions only, not the queries in the query log. After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. If you do not see any new suggestions, then please allow Amazon Kendra to collect enough queries to learn new suggestions.

ClearQuerySuggestions is currently not supported in the Amazon Web Services GovCloud (US-West) region.

", - "CreateAccessControlConfiguration": "

Creates an access configuration for your documents. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

You can use this to re-configure your existing document level access control without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. One of these users leaves the company or switches to a team that should be blocked from access to top-secret documents. Your documents in your index still give this user access to top-secret documents due to the user having access at the time your documents were indexed. You can create a specific access control configuration for this user with deny access. You can later update the access control configuration to allow access in the case the user returns to the company and re-joins the 'top-secret' team. You can re-configure access control for your documents circumstances change.

To apply your access control configuration to certain documents, you call the BatchPutDocument API with the AccessControlConfigurationId included in the Document object. If you use an S3 bucket as a data source, you update the .metadata.json with the AccessControlConfigurationId and synchronize your data source. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument API.

", + "CreateAccessControlConfiguration": "

Creates an access configuration for your documents. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

You can use this to re-configure your existing document level access control without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. One of these users leaves the company or switches to a team that should be blocked from accessing top-secret documents. The user still has access to top-secret documents because the user had access when your documents were previously indexed. You can create a specific access control configuration for the user with deny access. You can later update the access control configuration to allow access if the user returns to the company and re-joins the 'top-secret' team. You can re-configure access control for your documents as circumstances change.

To apply your access control configuration to certain documents, you call the BatchPutDocument API with the AccessControlConfigurationId included in the Document object. If you use an S3 bucket as a data source, you update the .metadata.json with the AccessControlConfigurationId and synchronize your data source. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument API.

", "CreateDataSource": "

Creates a data source that you want to use with an Amazon Kendra index.

You specify a name, data source connector type and description for your data source. You also specify configuration information for the data source connector.

CreateDataSource is a synchronous operation. The operation returns 200 if the data source was successfully created. Otherwise, an exception is raised.

Amazon S3 and custom data sources are the only supported data sources in the Amazon Web Services GovCloud (US-West) region.

For an example of creating an index and data source using the Python SDK, see Getting started with Python SDK. For an example of creating an index and data source using the Java SDK, see Getting started with Java SDK.

", "CreateExperience": "

Creates an Amazon Kendra experience such as a search application. For more information on creating a search application experience, including using the Python and Java SDKs, see Building a search experience with no code.

", "CreateFaq": "

Creates an new set of frequently asked question (FAQ) questions and answers.

Adding FAQs to an index is an asynchronous operation.

For an example of adding an FAQ to an index using Python and Java SDKs, see Using your FAQ file.

", @@ -55,7 +55,7 @@ "SubmitFeedback": "

Enables you to provide feedback to Amazon Kendra to improve the performance of your index.

SubmitFeedback is currently not supported in the Amazon Web Services GovCloud (US-West) region.

", "TagResource": "

Adds the specified tag to the specified index, FAQ, or data source resource. If the tag already exists, the existing value is replaced with the new value.

", "UntagResource": "

Removes a tag from an index, FAQ, or a data source.

", - "UpdateAccessControlConfiguration": "

Updates an access control configuration for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

You can update an access control configuration you created without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. You created an 'allow' access control configuration for one user who recently joined the 'top-secret' team, switching from a team with 'deny' access to top-secret documents. However, the user suddenly returns to their previous team and should no longer have access to top secret documents. You can update the access control configuration to re-configure access control for your documents as circumstances change.

You call the BatchPutDocument API to apply the updated access control configuration, with the AccessControlConfigurationId included in the Document object. If you use an S3 bucket as a data source, you synchronize your data source to apply the the AccessControlConfigurationId in the .metadata.json file. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument API.

", + "UpdateAccessControlConfiguration": "

Updates an access control configuration for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

You can update an access control configuration you created without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. You created an 'allow' access control configuration for one user who recently joined the 'top-secret' team, switching from a team with 'deny' access to top-secret documents. However, the user suddenly returns to their previous team and should no longer have access to top secret documents. You can update the access control configuration to re-configure access control for your documents as circumstances change.

You call the BatchPutDocument API to apply the updated access control configuration, with the AccessControlConfigurationId included in the Document object. If you use an S3 bucket as a data source, you synchronize your data source to apply the AccessControlConfigurationId in the .metadata.json file. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument API.

", "UpdateDataSource": "

Updates an existing Amazon Kendra data source.

", "UpdateExperience": "

Updates your Amazon Kendra experience such as a search application. For more information on creating a search application experience, see Building a search experience with no code.

", "UpdateIndex": "

Updates an existing Amazon Kendra index.

", @@ -2977,7 +2977,7 @@ "QuipConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs that are required to connect to your Quip. The secret must contain a JSON structure with the following keys:

", "SalesforceConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of an Secrets Managersecret that contains the key/value pairs required to connect to your Salesforce instance. The secret must contain a JSON structure with the following keys:

", "ServiceNowConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of the Secrets Manager secret that contains the user name and password required to connect to the ServiceNow instance. You can also provide OAuth authentication credentials of user name, password, client ID, and client secret. For more information, see Authentication for a ServiceNow data source.

", - "SharePointConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the SharePoint instance. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source.

", + "SharePointConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the SharePoint instance. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source.

You can also provide OAuth authentication credentials of user name, password, client ID, and client secret. For more information, see Authentication for a SharePoint data source.

", "SlackConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your Slack workspace team. The secret must contain a JSON structure with the following keys:

" } }, @@ -3065,6 +3065,12 @@ "DataSourceConfiguration$SharePointConfiguration": "

Provides the configuration information to connect to Microsoft SharePoint as your data source.

" } }, + "SharePointOnlineAuthenticationType": { + "base": null, + "refs": { + "SharePointConfiguration$AuthenticationType": "

Whether you want to connect to SharePoint using basic authentication of user name and password, or OAuth authentication of user name, password, client ID, and client secret. You can use OAuth authentication for SharePoint Online.

" + } + }, "SharePointUrlList": { "base": null, "refs": { @@ -3231,8 +3237,8 @@ "JiraStatus$member": null, "JsonTokenTypeConfiguration$UserNameAttributeField": "

The user name attribute field.

", "JsonTokenTypeConfiguration$GroupAttributeField": "

The group attribute field.

", - "ListAccessControlConfigurationsRequest$NextToken": "

If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of access control configurations.

", - "ListAccessControlConfigurationsResponse$NextToken": "

If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of access control configurations.

", + "ListAccessControlConfigurationsRequest$NextToken": "

If the previous response was incomplete (because there's more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of access control configurations.

", + "ListAccessControlConfigurationsResponse$NextToken": "

If the response is truncated, Amazon Kendra returns this token, which you can use in the subsequent request to retrieve the next set of access control configurations.

", "PrivateChannelFilter$member": null, "Project$member": null, "PublicChannelFilter$member": null, diff --git a/models/apis/monitoring/2010-08-01/api-2.json b/models/apis/monitoring/2010-08-01/api-2.json index 7da31990144..54382095239 100644 --- a/models/apis/monitoring/2010-08-01/api-2.json +++ b/models/apis/monitoring/2010-08-01/api-2.json @@ -583,6 +583,19 @@ "min":1 }, "ActionsEnabled":{"type":"boolean"}, + "ActionsSuppressedBy":{ + "type":"string", + "enum":[ + "WaitPeriod", + "ExtensionPeriod", + "Alarm" + ] + }, + "ActionsSuppressedReason":{ + "type":"string", + "max":1024, + "min":0 + }, "AlarmArn":{ "type":"string", "max":1600, @@ -750,7 +763,13 @@ "StateReason":{"shape":"StateReason"}, "StateReasonData":{"shape":"StateReasonData"}, "StateUpdatedTimestamp":{"shape":"Timestamp"}, - "StateValue":{"shape":"StateValue"} + "StateValue":{"shape":"StateValue"}, + "StateTransitionedTimestamp":{"shape":"Timestamp"}, + "ActionsSuppressedBy":{"shape":"ActionsSuppressedBy"}, + "ActionsSuppressedReason":{"shape":"ActionsSuppressedReason"}, + "ActionsSuppressor":{"shape":"AlarmArn"}, + "ActionsSuppressorWaitPeriod":{"shape":"SuppressorPeriod"}, + "ActionsSuppressorExtensionPeriod":{"shape":"SuppressorPeriod"} }, "xmlOrder":[ "ActionsEnabled", @@ -765,7 +784,13 @@ "StateReason", "StateReasonData", "StateUpdatedTimestamp", - "StateValue" + "StateValue", + "StateTransitionedTimestamp", + "ActionsSuppressedBy", + "ActionsSuppressedReason", + "ActionsSuppressor", + "ActionsSuppressorWaitPeriod", + "ActionsSuppressorExtensionPeriod" ] }, "CompositeAlarms":{ @@ -1966,7 +1991,10 @@ "AlarmRule":{"shape":"AlarmRule"}, "InsufficientDataActions":{"shape":"ResourceList"}, "OKActions":{"shape":"ResourceList"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "ActionsSuppressor":{"shape":"AlarmArn"}, + "ActionsSuppressorWaitPeriod":{"shape":"SuppressorPeriod"}, + "ActionsSuppressorExtensionPeriod":{"shape":"SuppressorPeriod"} } }, "PutDashboardInput":{ @@ -2278,6 +2306,7 @@ "type":"integer", "min":1 }, + "SuppressorPeriod":{"type":"integer"}, "Tag":{ "type":"structure", "required":[ diff --git a/models/apis/monitoring/2010-08-01/docs-2.json b/models/apis/monitoring/2010-08-01/docs-2.json index ef2157f1847..1347be0880f 100644 --- a/models/apis/monitoring/2010-08-01/docs-2.json +++ b/models/apis/monitoring/2010-08-01/docs-2.json @@ -61,11 +61,25 @@ "PutMetricAlarmInput$ActionsEnabled": "

Indicates whether actions should be executed during any changes to the alarm state. The default is TRUE.

" } }, + "ActionsSuppressedBy": { + "base": null, + "refs": { + "CompositeAlarm$ActionsSuppressedBy": "

When the value is ALARM, it means that the actions are suppressed because the suppressor alarm is in ALARM When the value is WaitPeriod, it means that the actions are suppressed because the composite alarm is waiting for the suppressor alarm to go into into the ALARM state. The maximum waiting time is as specified in ActionsSuppressorWaitPeriod. After this time, the composite alarm performs its actions. When the value is ExtensionPeriod, it means that the actions are suppressed because the composite alarm is waiting after the suppressor alarm went out of the ALARM state. The maximum waiting time is as specified in ActionsSuppressorExtensionPeriod. After this time, the composite alarm performs its actions.

" + } + }, + "ActionsSuppressedReason": { + "base": null, + "refs": { + "CompositeAlarm$ActionsSuppressedReason": "

Captures the reason for action suppression.

" + } + }, "AlarmArn": { "base": null, "refs": { "CompositeAlarm$AlarmArn": "

The Amazon Resource Name (ARN) of the alarm.

", - "MetricAlarm$AlarmArn": "

The Amazon Resource Name (ARN) of the alarm.

" + "CompositeAlarm$ActionsSuppressor": "

Actions will be suppressed if the suppressor alarm is in the ALARM state. ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm.

", + "MetricAlarm$AlarmArn": "

The Amazon Resource Name (ARN) of the alarm.

", + "PutCompositeAlarmInput$ActionsSuppressor": "

Actions will be suppressed if the suppressor alarm is in the ALARM state. ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm.

" } }, "AlarmDescription": { @@ -1568,6 +1582,15 @@ "MetricDatum$StorageResolution": "

Valid values are 1 and 60. Setting this to 1 specifies this metric as a high-resolution metric, so that CloudWatch stores the metric with sub-minute resolution down to one second. Setting this to 60 specifies this metric as a regular-resolution metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution is available only for custom metrics. For more information about high-resolution metrics, see High-Resolution Metrics in the Amazon CloudWatch User Guide.

This field is optional, if you do not specify it the default of 60 is used.

" } }, + "SuppressorPeriod": { + "base": null, + "refs": { + "CompositeAlarm$ActionsSuppressorWaitPeriod": "

The maximum time in seconds that the composite alarm waits for the suppressor alarm to go into the ALARM state. After this time, the composite alarm performs its actions.

WaitPeriod is required only when ActionsSuppressor is specified.

", + "CompositeAlarm$ActionsSuppressorExtensionPeriod": "

The maximum time in seconds that the composite alarm waits after suppressor alarm goes out of the ALARM state. After this time, the composite alarm performs its actions.

ExtensionPeriod is required only when ActionsSuppressor is specified.

", + "PutCompositeAlarmInput$ActionsSuppressorWaitPeriod": "

The maximum time in seconds that the composite alarm waits for the suppressor alarm to go into the ALARM state. After this time, the composite alarm performs its actions.

WaitPeriod is required only when ActionsSuppressor is specified.

", + "PutCompositeAlarmInput$ActionsSuppressorExtensionPeriod": "

The maximum time in seconds that the composite alarm waits after suppressor alarm goes out of the ALARM state. After this time, the composite alarm performs its actions.

ExtensionPeriod is required only when ActionsSuppressor is specified.

" + } + }, "Tag": { "base": "

A key-value pair associated with a CloudWatch resource.

", "refs": { @@ -1626,7 +1649,8 @@ "refs": { "AlarmHistoryItem$Timestamp": "

The time stamp for the alarm history item.

", "CompositeAlarm$AlarmConfigurationUpdatedTimestamp": "

The time stamp of the last update to the alarm configuration.

", - "CompositeAlarm$StateUpdatedTimestamp": "

The time stamp of the last update to the alarm state.

", + "CompositeAlarm$StateUpdatedTimestamp": "

Tracks the timestamp of any state update, even if StateValue doesn't change.

", + "CompositeAlarm$StateTransitionedTimestamp": "

The timestamp of the last change to the alarm's StateValue.

", "Datapoint$Timestamp": "

The time stamp used for the data point.

", "DescribeAlarmHistoryInput$StartDate": "

The starting date to retrieve alarm history.

", "DescribeAlarmHistoryInput$EndDate": "

The ending date to retrieve alarm history.

", diff --git a/models/apis/network-firewall/2020-11-12/api-2.json b/models/apis/network-firewall/2020-11-12/api-2.json index f5779aff4be..fbf73da1883 100644 --- a/models/apis/network-firewall/2020-11-12/api-2.json +++ b/models/apis/network-firewall/2020-11-12/api-2.json @@ -606,12 +606,32 @@ "member":{"shape":"AzSubnet"} }, "Boolean":{"type":"boolean"}, + "CIDRCount":{ + "type":"integer", + "max":1000000, + "min":0 + }, + "CIDRSummary":{ + "type":"structure", + "members":{ + "AvailableCIDRCount":{"shape":"CIDRCount"}, + "UtilizedCIDRCount":{"shape":"CIDRCount"}, + "IPSetReferences":{"shape":"IPSetMetadataMap"} + } + }, + "CapacityUsageSummary":{ + "type":"structure", + "members":{ + "CIDRs":{"shape":"CIDRSummary"} + } + }, "CollectionMember_String":{"type":"string"}, "ConfigurationSyncState":{ "type":"string", "enum":[ "PENDING", - "IN_SYNC" + "IN_SYNC", + "CAPACITY_CONSTRAINED" ] }, "CreateFirewallPolicyRequest":{ @@ -1027,7 +1047,8 @@ "members":{ "Status":{"shape":"FirewallStatusValue"}, "ConfigurationSyncStateSummary":{"shape":"ConfigurationSyncState"}, - "SyncStates":{"shape":"SyncStates"} + "SyncStates":{"shape":"SyncStates"}, + "CapacityUsageSummary":{"shape":"CapacityUsageSummary"} } }, "FirewallStatusValue":{ @@ -1091,6 +1112,35 @@ "Definition":{"shape":"VariableDefinitionList"} } }, + "IPSetArn":{"type":"string"}, + "IPSetMetadata":{ + "type":"structure", + "members":{ + "ResolvedCIDRCount":{"shape":"CIDRCount"} + } + }, + "IPSetMetadataMap":{ + "type":"map", + "key":{"shape":"IPSetArn"}, + "value":{"shape":"IPSetMetadata"} + }, + "IPSetReference":{ + "type":"structure", + "members":{ + "ReferenceArn":{"shape":"ResourceArn"} + } + }, + "IPSetReferenceMap":{ + "type":"map", + "key":{"shape":"IPSetReferenceName"}, + "value":{"shape":"IPSetReference"} + }, + "IPSetReferenceName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[A-Za-z][A-Za-z0-9_]*$" + }, "IPSets":{ "type":"map", "key":{"shape":"RuleVariableName"}, @@ -1314,7 +1364,8 @@ "type":"string", "enum":[ "PENDING", - "IN_SYNC" + "IN_SYNC", + "CAPACITY_CONSTRAINED" ] }, "PolicyString":{ @@ -1397,6 +1448,12 @@ "members":{ } }, + "ReferenceSets":{ + "type":"structure", + "members":{ + "IPSetReferences":{"shape":"IPSetReferenceMap"} + } + }, "ResourceArn":{ "type":"string", "max":256, @@ -1467,6 +1524,7 @@ "required":["RulesSource"], "members":{ "RuleVariables":{"shape":"RuleVariables"}, + "ReferenceSets":{"shape":"ReferenceSets"}, "RulesSource":{"shape":"RulesSource"}, "StatefulRuleOptions":{"shape":"StatefulRuleOptions"} } diff --git a/models/apis/network-firewall/2020-11-12/docs-2.json b/models/apis/network-firewall/2020-11-12/docs-2.json index 4a5669f5d03..4d99b2e6af9 100644 --- a/models/apis/network-firewall/2020-11-12/docs-2.json +++ b/models/apis/network-firewall/2020-11-12/docs-2.json @@ -138,6 +138,26 @@ "UpdateSubnetChangeProtectionResponse$SubnetChangeProtection": "

A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" } }, + "CIDRCount": { + "base": null, + "refs": { + "CIDRSummary$AvailableCIDRCount": "

The number of CIDR blocks available for use by the IP set references in a firewall.

", + "CIDRSummary$UtilizedCIDRCount": "

The number of CIDR blocks used by the IP set references in a firewall.

", + "IPSetMetadata$ResolvedCIDRCount": "

Describes the total number of CIDR blocks currently in use by the IP set references in a firewall. To determine how many CIDR blocks are available for you to use in a firewall, you can call AvailableCIDRCount.

" + } + }, + "CIDRSummary": { + "base": "

Summarizes the CIDR blocks used by the IP set references in a firewall. Network Firewall calculates the number of CIDRs by taking an aggregated count of all CIDRs used by the IP sets you are referencing.

", + "refs": { + "CapacityUsageSummary$CIDRs": "

Describes the capacity usage of the CIDR blocks used by the IP set references in a firewall.

" + } + }, + "CapacityUsageSummary": { + "base": "

The capacity usage summary of the resources used by the ReferenceSets in a firewall.

", + "refs": { + "FirewallStatus$CapacityUsageSummary": "

Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.

" + } + }, "CollectionMember_String": { "base": null, "refs": { @@ -490,6 +510,42 @@ "IPSets$value": null } }, + "IPSetArn": { + "base": null, + "refs": { + "IPSetMetadataMap$key": null + } + }, + "IPSetMetadata": { + "base": "

General information about the IP set.

", + "refs": { + "IPSetMetadataMap$value": null + } + }, + "IPSetMetadataMap": { + "base": null, + "refs": { + "CIDRSummary$IPSetReferences": "

The list of the IP set references used by a firewall.

" + } + }, + "IPSetReference": { + "base": "

Configures one or more IP set references for a Suricata-compatible rule group. This is used in CreateRuleGroup or UpdateRuleGroup. An IP set reference is a rule variable that references a resource that you create and manage in another Amazon Web Services service, such as an Amazon VPC prefix list. Network Firewall IP set references enable you to dynamically update the contents of your rules. When you create, update, or delete the IP set you are referencing in your rule, Network Firewall automatically updates the rule's content with the changes. For more information about IP set references in Network Firewall, see Using IP set references in the Network Firewall Developer Guide.

Network Firewall currently supports only Amazon VPC prefix lists as IP set references.

", + "refs": { + "IPSetReferenceMap$value": null + } + }, + "IPSetReferenceMap": { + "base": null, + "refs": { + "ReferenceSets$IPSetReferences": "

The list of IP set references.

" + } + }, + "IPSetReferenceName": { + "base": null, + "refs": { + "IPSetReferenceMap$key": null + } + }, "IPSets": { "base": null, "refs": { @@ -768,6 +824,12 @@ "refs": { } }, + "ReferenceSets": { + "base": "

Contains a set of IP set references.

", + "refs": { + "RuleGroup$ReferenceSets": "

The list of a rule group's reference sets.

" + } + }, "ResourceArn": { "base": null, "refs": { @@ -797,6 +859,7 @@ "FirewallMetadata$FirewallArn": "

The Amazon Resource Name (ARN) of the firewall.

", "FirewallPolicyMetadata$Arn": "

The Amazon Resource Name (ARN) of the firewall policy.

", "FirewallPolicyResponse$FirewallPolicyArn": "

The Amazon Resource Name (ARN) of the firewall policy.

If this response is for a create request that had DryRun set to TRUE, then this ARN is a placeholder that isn't attached to a valid resource.

", + "IPSetReference$ReferenceArn": "

The Amazon Resource Name (ARN) of the resource that you are referencing in your rule group.

", "ListTagsForResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the resource.

", "PutResourcePolicyRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the account that you want to share rule groups and firewall policies with.

", "RuleGroupMetadata$Arn": "

The Amazon Resource Name (ARN) of the rule group.

", diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index b71f11e0f03..54ce53b7d01 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -8,28 +8,28 @@ "AddTagsToResource": "

Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

", "ApplyPendingMaintenanceAction": "

Applies a pending maintenance action to a resource (for example, to a DB instance).

", "AuthorizeDBSecurityGroupIngress": "

Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

You can't authorize ingress from an EC2 security group in one Amazon Web Services Region to an Amazon RDS DB instance in another. You can't authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

For an overview of CIDR ranges, go to the Wikipedia Tutorial.

", - "BacktrackDBCluster": "

Backtracks a DB cluster to a specific time, without creating a new DB cluster.

For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon Aurora User Guide.

This action only applies to Aurora MySQL DB clusters.

", + "BacktrackDBCluster": "

Backtracks a DB cluster to a specific time, without creating a new DB cluster.

For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon Aurora User Guide.

This action applies only to Aurora MySQL DB clusters.

", "CancelExportTask": "

Cancels an export task in progress that is exporting a snapshot to Amazon S3. Any data that has already been written to the S3 bucket isn't removed.

", "CopyDBClusterParameterGroup": "

Copies the specified DB cluster parameter group.

", - "CopyDBClusterSnapshot": "

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot action is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the following values:

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", + "CopyDBClusterSnapshot": "

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot operation is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the following values:

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", "CopyDBParameterGroup": "

Copies the specified DB parameter group.

", - "CopyDBSnapshot": "

Copies the specified DB snapshot. The source DB snapshot must be in the available state.

You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region where you call the CopyDBSnapshot action is the destination Amazon Web Services Region for the DB snapshot copy.

This command doesn't apply to RDS Custom.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

", + "CopyDBSnapshot": "

Copies the specified DB snapshot. The source DB snapshot must be in the available state.

You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region where you call the CopyDBSnapshot operation is the destination Amazon Web Services Region for the DB snapshot copy.

This command doesn't apply to RDS Custom.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

", "CopyOptionGroup": "

Copies the specified option group.

", "CreateCustomDBEngineVersion": "

Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database engine and specific AMI. The supported engines are the following:

Amazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software. The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.

When you create a custom engine version, you specify the files in a JSON document called a CEV manifest. This document describes installation .zip files stored in Amazon S3. RDS Custom creates your CEV from the installation files that you provided. This service model is called Bring Your Own Media (BYOM).

Creation takes approximately two hours. If creation fails, RDS Custom issues RDS-EVENT-0196 with the message Creation failed for custom engine version, and includes details about the failure. For example, the event prints missing files.

After you create the CEV, it is available for use. You can create multiple CEVs, and create multiple RDS Custom instances from any CEV. You can also change the status of a CEV to make it available or inactive.

The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the CreateCustomDbEngineVersion event aren't logged. However, you might see calls from the API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for the CreateCustomDbEngineVersion event.

For more information, see Creating a CEV in the Amazon RDS User Guide.

", - "CreateDBCluster": "

Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.

You can use the ReplicationSourceIdentifier parameter to create an Amazon Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, also specify the PreSignedUrl parameter.

For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", - "CreateDBClusterEndpoint": "

Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.

This action only applies to Aurora DB clusters.

", - "CreateDBClusterParameterGroup": "

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster.

When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

When you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters action to verify that your DB cluster parameter group has been created or modified.

For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", + "CreateDBCluster": "

Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.

You can use the ReplicationSourceIdentifier parameter to create an Amazon Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or PostgreSQL DB instance.

For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", + "CreateDBClusterEndpoint": "

Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.

This action applies only to Aurora DB clusters.

", + "CreateDBClusterParameterGroup": "

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster.

When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

When you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters operation to verify that your DB cluster parameter group has been created or modified.

For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", "CreateDBClusterSnapshot": "

Creates a snapshot of a DB cluster.

For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", - "CreateDBInstance": "

Creates a new DB instance.

", - "CreateDBInstanceReadReplica": "

Creates a new DB instance that acts as a read replica for an existing source DB instance. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read Replicas in the Amazon RDS User Guide.

Amazon Aurora doesn't support this action. Call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All read replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified.

Your source DB instance must have backup retention enabled.

", + "CreateDBInstance": "

Creates a new DB instance.

The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster. For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance to the cluster.

For more information about creating an RDS DB instance, see Creating an Amazon RDS DB instance in the Amazon RDS User Guide.

For more information about creating a DB instance in an Aurora DB cluster, see Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.

", + "CreateDBInstanceReadReplica": "

Creates a new DB instance that acts as a read replica for an existing source DB instance. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read Replicas in the Amazon RDS User Guide.

Amazon Aurora doesn't support this operation. Call the CreateDBInstance operation to create a DB instance for an Aurora DB cluster.

All read replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified.

Your source DB instance must have backup retention enabled.

", "CreateDBParameterGroup": "

Creates a new DB parameter group.

A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.

This command doesn't apply to RDS Custom.

After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

", "CreateDBProxy": "

Creates a new DB proxy.

", "CreateDBProxyEndpoint": "

Creates a DBProxyEndpoint. Only applies to proxies that are associated with Aurora DB clusters. You can use DB proxy endpoints to specify read/write or read-only access to the DB cluster. You can also use DB proxy endpoints to access a DB proxy through a different VPC than the proxy's default VPC.

", "CreateDBSecurityGroup": "

Creates a new DB security group. DB security groups control access to a DB instance.

A DB security group controls access to EC2-Classic DB instances that are not in a VPC.

", "CreateDBSnapshot": "

Creates a snapshot of a DB instance. The source DB instance must be in the available or storage-optimization state.

", "CreateDBSubnetGroup": "

Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the Amazon Web Services Region.

", - "CreateEventSubscription": "

Creates an RDS event notification subscription. This action requires a topic Amazon Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the type of source (SourceType) that you want to be notified of and provide a list of RDS sources (SourceIds) that triggers the events. You can also provide a list of event categories (EventCategories) for events that you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIds = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify SourceIds, you receive notice of the events for that source type for all your RDS sources. If you don't specify either the SourceType or the SourceIds, you are notified of events generated from all RDS sources belonging to your customer account.

RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.

", - "CreateGlobalCluster": "

Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

This action only applies to Aurora DB clusters.

", + "CreateEventSubscription": "

Creates an RDS event notification subscription. This operation requires a topic Amazon Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the type of source (SourceType) that you want to be notified of and provide a list of RDS sources (SourceIds) that triggers the events. You can also provide a list of event categories (EventCategories) for events that you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIds = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify SourceIds, you receive notice of the events for that source type for all your RDS sources. If you don't specify either the SourceType or the SourceIds, you are notified of events generated from all RDS sources belonging to your customer account.

RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.

", + "CreateGlobalCluster": "

Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

This action applies only to Aurora DB clusters.

", "CreateOptionGroup": "

Creates a new option group. You can create up to 20 option groups.

This command doesn't apply to RDS Custom.

", "DeleteCustomDBEngineVersion": "

Deletes a custom engine version. To run this command, make sure you meet the following prerequisites:

Typically, deletion takes a few minutes.

The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the DeleteCustomDbEngineVersion event aren't logged. However, you might see calls from the API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for the DeleteCustomDbEngineVersion event.

For more information, see Deleting a CEV in the Amazon RDS User Guide.

", "DeleteDBCluster": "

The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", @@ -75,7 +75,7 @@ "DescribeEngineDefaultParameters": "

Returns the default engine and system parameter information for the specified database engine.

", "DescribeEventCategories": "

Displays a list of categories for all event source types, or, if specified, for a specified source type. You can also see this list in the \"Amazon RDS event categories and event messages\" section of the Amazon RDS User Guide or the Amazon Aurora User Guide .

", "DescribeEventSubscriptions": "

Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

If you specify a SubscriptionName, lists the description for that subscription.

", - "DescribeEvents": "

Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be obtained by providing the name as a parameter.

By default, RDS returns events that were generated in the past hour.

", + "DescribeEvents": "

Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be obtained by providing the name as a parameter.

For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora events in the Amazon Aurora User Guide.

By default, RDS returns events that were generated in the past hour.

", "DescribeExportTasks": "

Returns information about a snapshot export to Amazon S3. This API operation supports pagination.

", "DescribeGlobalClusters": "

Returns information about Aurora global database clusters. This API supports pagination.

For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

", "DescribeOptionGroupOptions": "

Describes all available options.

", @@ -87,7 +87,7 @@ "DescribeSourceRegions": "

Returns a list of the source Amazon Web Services Regions where the current Amazon Web Services Region can create a read replica, copy a DB snapshot from, or replicate automated backups from. This API action supports pagination.

", "DescribeValidDBInstanceModifications": "

You can call DescribeValidDBInstanceModifications to learn what modifications you can make to your DB instance. You can use this information when you call ModifyDBInstance.

This command doesn't apply to RDS Custom.

", "DownloadDBLogFilePortion": "

Downloads all or a portion of the specified log file, up to 1 MB in size.

This command doesn't apply to RDS Custom.

", - "FailoverDBCluster": "

Forces a failover for a DB cluster.

For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).

For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).

An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby DB instance when the primary DB instance fails.

To simulate a failure of a primary instance for testing, you can force a failover. Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", + "FailoverDBCluster": "

Forces a failover for a DB cluster.

For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).

For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).

An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby DB instance when the primary DB instance fails.

To simulate a failure of a primary instance for testing, you can force a failover. Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", "FailoverGlobalCluster": "

Initiates the failover process for an Aurora global database (GlobalCluster).

A failover for an Aurora global database promotes one of secondary read-only DB clusters to be the primary DB cluster and demotes the primary DB cluster to being a secondary (read-only) DB cluster. In other words, the role of the current primary DB cluster and the selected (target) DB cluster are switched. The selected secondary DB cluster assumes full read/write capabilities for the Aurora global database.

For more information about failing over an Amazon Aurora global database, see Managed planned failover for Amazon Aurora global databases in the Amazon Aurora User Guide.

This action applies to GlobalCluster (Aurora global databases) only. Use this action only on healthy Aurora global databases with running Aurora DB clusters and no Region-wide outages, to test disaster recovery scenarios or to reconfigure your Aurora global database topology.

", "ListTagsForResource": "

Lists all tags on an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

", "ModifyCertificates": "

Override the system-default Secure Sockets Layer/Transport Layer Security (SSL/TLS) certificate for Amazon RDS for new DB instances, or remove the override.

By using this operation, you can specify an RDS-approved SSL/TLS certificate for new DB instances that is different from the default certificate provided by RDS. You can also use this operation to remove the override, so that new DB instances use the default certificate provided by RDS.

You might need to override the default certificate in the following situations:

For more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS Certificate in the Amazon RDS User Guide.

For more information about rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the Amazon Aurora User Guide.

", @@ -95,15 +95,15 @@ "ModifyCustomDBEngineVersion": "

Modifies the status of a custom engine version (CEV). You can find CEVs to modify by calling DescribeDBEngineVersions.

The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the ModifyCustomDbEngineVersion event aren't logged. However, you might see calls from the API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for the ModifyCustomDbEngineVersion event.

For more information, see Modifying CEV status in the Amazon RDS User Guide.

", "ModifyDBCluster": "

Modify the settings for an Amazon Aurora DB cluster or a Multi-AZ DB cluster. You can change one or more settings by specifying these parameters and the new values in the request.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", "ModifyDBClusterEndpoint": "

Modifies the properties of an endpoint in an Amazon Aurora DB cluster.

This action only applies to Aurora DB clusters.

", - "ModifyDBClusterParameterGroup": "

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters action to verify that your DB cluster parameter group has been created or modified.

If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora applies the update immediately. The cluster restart might interrupt your workload. In that case, your application must reopen any connections and retry any transactions that were active when the parameter changes took effect.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", - "ModifyDBClusterSnapshotAttribute": "

Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

To share a manual DB cluster snapshot with other Amazon Web Services accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all Amazon Web Services accounts.

Don't add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all Amazon Web Services accounts.

If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are returned as values for the restore attribute.

", + "ModifyDBClusterParameterGroup": "

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters operation to verify that your DB cluster parameter group has been created or modified.

If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora applies the update immediately. The cluster restart might interrupt your workload. In that case, your application must reopen any connections and retry any transactions that were active when the parameter changes took effect.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", + "ModifyDBClusterSnapshotAttribute": "

Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

To share a manual DB cluster snapshot with other Amazon Web Services accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all Amazon Web Services accounts.

Don't add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all Amazon Web Services accounts.

If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are returned as values for the restore attribute.

", "ModifyDBInstance": "

Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications before you call ModifyDBInstance.

", "ModifyDBParameterGroup": "

Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

", "ModifyDBProxy": "

Changes the settings for an existing DB proxy.

", "ModifyDBProxyEndpoint": "

Changes the settings for an existing DB proxy endpoint.

", "ModifyDBProxyTargetGroup": "

Modifies the properties of a DBProxyTargetGroup.

", "ModifyDBSnapshot": "

Updates a manual DB snapshot with a new engine version. The snapshot can be encrypted or unencrypted, but not shared or public.

Amazon RDS supports upgrading DB snapshots for MySQL, PostgreSQL, and Oracle. This command doesn't apply to RDS Custom.

", - "ModifyDBSnapshotAttribute": "

Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

To share a manual DB snapshot with other Amazon Web Services accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot. Uses the value all to make the manual DB snapshot public, which means it can be copied or restored by all Amazon Web Services accounts.

Don't add the all value for any manual DB snapshots that contain private information that you don't want available to all Amazon Web Services accounts.

If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as values for the restore attribute.

", + "ModifyDBSnapshotAttribute": "

Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

To share a manual DB snapshot with other Amazon Web Services accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot. Uses the value all to make the manual DB snapshot public, which means it can be copied or restored by all Amazon Web Services accounts.

Don't add the all value for any manual DB snapshots that contain private information that you don't want available to all Amazon Web Services accounts.

If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as values for the restore attribute.

", "ModifyDBSubnetGroup": "

Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the Amazon Web Services Region.

", "ModifyEventSubscription": "

Modifies an existing RDS event notification subscription. You can't modify the source identifiers using this call. To change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

You can see a list of the event categories for a given source type (SourceType) in Events in the Amazon RDS User Guide or by using the DescribeEventCategories operation.

", "ModifyGlobalCluster": "

Modify a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

", @@ -112,7 +112,7 @@ "PromoteReadReplicaDBCluster": "

Promotes a read replica DB cluster to a standalone DB cluster.

", "PurchaseReservedDBInstancesOffering": "

Purchases a reserved DB instance offering.

", "RebootDBCluster": "

You might need to reboot your DB cluster, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB cluster parameter group associated with the DB cluster, reboot the DB cluster for the changes to take effect.

Rebooting a DB cluster restarts the database engine service. Rebooting a DB cluster results in a momentary outage, during which the DB cluster status is set to rebooting.

Use this operation only for a non-Aurora Multi-AZ DB cluster.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", - "RebootDBInstance": "

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.

This command doesn't apply to RDS Custom.

", + "RebootDBInstance": "

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.

This command doesn't apply to RDS Custom.

If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster operation.

", "RegisterDBProxyTargets": "

Associate one or more DBProxyTarget data structures with a DBProxyTargetGroup.

", "RemoveFromGlobalCluster": "

Detaches an Aurora secondary cluster from an Aurora global database cluster. The cluster becomes a standalone cluster with read-write capability instead of being read-only and receiving data from a primary cluster in a different Region.

This action only applies to Aurora DB clusters.

", "RemoveRoleFromDBCluster": "

Removes the asssociation of an Amazon Web Services Identity and Access Management (IAM) role from a DB cluster.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", @@ -136,7 +136,7 @@ "StopActivityStream": "

Stops a database activity stream that was started using the Amazon Web Services console, the start-activity-stream CLI command, or the StartActivityStream action.

For more information, see Database Activity Streams in the Amazon Aurora User Guide.

", "StopDBCluster": "

Stops an Amazon Aurora DB cluster. When you stop a DB cluster, Aurora retains the DB cluster's metadata, including its endpoints and DB parameter groups. Aurora also retains the transaction logs so you can do a point-in-time restore if necessary.

For more information, see Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

", "StopDBInstance": "

Stops an Amazon RDS DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary.

For more information, see Stopping an Amazon RDS DB Instance Temporarily in the Amazon RDS User Guide.

This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL. For Aurora clusters, use StopDBCluster instead.

", - "StopDBInstanceAutomatedBackupsReplication": "

Stops automated backup replication for a DB instance.

This command doesn't apply to RDS Custom.

For more information, see Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.

" + "StopDBInstanceAutomatedBackupsReplication": "

Stops automated backup replication for a DB instance.

This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.

For more information, see Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.

" }, "shapes": { "AccountAttributesMessage": { @@ -419,7 +419,7 @@ "CopyDBClusterSnapshotMessage$CopyTags": "

A value that indicates whether to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot. By default, tags are not copied.

", "CopyDBSnapshotMessage$CopyTags": "

A value that indicates whether to copy all tags from the source DB snapshot to the target DB snapshot. By default, tags are not copied.

", "CreateDBClusterMessage$StorageEncrypted": "

A value that indicates whether the DB cluster is encrypted.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", - "CreateDBClusterMessage$EnableIAMDatabaseAuthentication": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide..

Valid for: Aurora DB clusters only

", + "CreateDBClusterMessage$EnableIAMDatabaseAuthentication": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$DeletionProtection": "

A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "CreateDBClusterMessage$EnableHttpEndpoint": "

A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled.

When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor.

For more information, see Using the Data API for Aurora Serverless v1 in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$CopyTagsToSnapshot": "

A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", @@ -427,12 +427,12 @@ "CreateDBClusterMessage$PubliclyAccessible": "

A value that indicates whether the DB cluster is publicly accessible.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

Valid for: Multi-AZ DB clusters only

", "CreateDBClusterMessage$AutoMinorVersionUpgrade": "

A value that indicates whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.

Valid for: Multi-AZ DB clusters only

", "CreateDBClusterMessage$EnablePerformanceInsights": "

A value that indicates whether to turn on Performance Insights for the DB cluster.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

Valid for: Multi-AZ DB clusters only

", - "CreateDBInstanceMessage$MultiAZ": "

A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceMessage$MultiAZ": "

A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster.

", "CreateDBInstanceMessage$AutoMinorVersionUpgrade": "

A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.

If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade to false.

", "CreateDBInstanceMessage$PubliclyAccessible": "

A value that indicates whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

", "CreateDBInstanceMessage$StorageEncrypted": "

A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.

For RDS Custom instances, either set this parameter to true or leave it unset. If you set this parameter to false, RDS reports an error.

Amazon Aurora

Not applicable. The encryption for DB instances is managed by the DB cluster.

", "CreateDBInstanceMessage$CopyTagsToSnapshot": "

A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

Amazon Aurora

Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.

", - "CreateDBInstanceMessage$EnableIAMDatabaseAuthentication": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora, mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.

For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

", + "CreateDBInstanceMessage$EnableIAMDatabaseAuthentication": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.

", "CreateDBInstanceMessage$EnablePerformanceInsights": "

A value that indicates whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

", "CreateDBInstanceMessage$DeletionProtection": "

A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.

Amazon Aurora

Not applicable. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.

", "CreateDBInstanceMessage$EnableCustomerOwnedIp": "

A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

", @@ -481,7 +481,7 @@ "ModifyDBInstanceMessage$CopyTagsToSnapshot": "

A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

Amazon Aurora

Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster.

", "ModifyDBInstanceMessage$PubliclyAccessible": "

A value that indicates whether the DB instance is publicly accessible.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

", "ModifyDBInstanceMessage$EnableIAMDatabaseAuthentication": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

This setting doesn't apply to Amazon Aurora. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

", - "ModifyDBInstanceMessage$EnablePerformanceInsights": "

A value that indicates whether to enable Performance Insights for the DB instance.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide..

This setting doesn't apply to RDS Custom.

", + "ModifyDBInstanceMessage$EnablePerformanceInsights": "

A value that indicates whether to enable Performance Insights for the DB instance.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

", "ModifyDBInstanceMessage$UseDefaultProcessorFeatures": "

A value that indicates whether the DB instance class of the DB instance uses its default processor features.

This setting doesn't apply to RDS Custom.

", "ModifyDBInstanceMessage$DeletionProtection": "

A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.

", "ModifyDBInstanceMessage$CertificateRotationRestart": "

A value that indicates whether the DB instance is restarted when you rotate your SSL/TLS certificate.

By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.

Set this parameter only if you are not using SSL/TLS to connect to the DB instance.

If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate:

This setting doesn't apply to RDS Custom.

", @@ -523,7 +523,7 @@ "RestoreDBInstanceFromS3Message$StorageEncrypted": "

A value that indicates whether the new DB instance is encrypted or not.

", "RestoreDBInstanceFromS3Message$CopyTagsToSnapshot": "

A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

", "RestoreDBInstanceFromS3Message$EnableIAMDatabaseAuthentication": "

A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

", - "RestoreDBInstanceFromS3Message$EnablePerformanceInsights": "

A value that indicates whether to enable Performance Insights for the DB instance.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide..

", + "RestoreDBInstanceFromS3Message$EnablePerformanceInsights": "

A value that indicates whether to enable Performance Insights for the DB instance.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

", "RestoreDBInstanceFromS3Message$UseDefaultProcessorFeatures": "

A value that indicates whether the DB instance class of the DB instance uses its default processor features.

", "RestoreDBInstanceFromS3Message$DeletionProtection": "

A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.

", "RestoreDBInstanceToPointInTimeMessage$MultiAZ": "

A value that indicates whether the DB instance is a Multi-AZ deployment.

This setting doesn't apply to RDS Custom.

Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

", @@ -1458,7 +1458,7 @@ "DBSecurityGroupNameList": { "base": null, "refs": { - "CreateDBInstanceMessage$DBSecurityGroups": "

A list of DB security groups to associate with this DB instance.

Default: The default DB security group for the database engine.

", + "CreateDBInstanceMessage$DBSecurityGroups": "

A list of DB security groups to associate with this DB instance.

This setting applies to the legacy EC2-Classic platform, which is no longer used to create new DB instances. Use the VpcSecurityGroupIds setting instead.

", "ModifyDBInstanceMessage$DBSecurityGroups": "

A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied as soon as possible.

This setting doesn't apply to RDS Custom.

Constraints:

", "OptionConfiguration$DBSecurityGroupMemberships": "

A list of DBSecurityGroupMembership name strings used for this option.

", "RestoreDBInstanceFromS3Message$DBSecurityGroups": "

A list of DB security groups to associate with this DB instance.

Default: The default DB security group for the database engine.

" @@ -2076,7 +2076,7 @@ "EngineFamily": { "base": null, "refs": { - "CreateDBProxyRequest$EngineFamily": "

The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

" + "CreateDBProxyRequest$EngineFamily": "

The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify MYSQL. For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL.

" } }, "EngineModeList": { @@ -2089,7 +2089,7 @@ } }, "Event": { - "base": "

This data type is used as a response element in the DescribeEvents action.

", + "base": "

This data type is used as a response element in the DescribeEvents action.

", "refs": { "EventList$member": null } @@ -2106,7 +2106,7 @@ } }, "EventCategoriesMap": { - "base": "

Contains the results of a successful invocation of the DescribeEventCategories operation.

", + "base": "

Contains the results of a successful invocation of the DescribeEventCategories operation.

", "refs": { "EventCategoriesMapList$member": null } @@ -2427,19 +2427,19 @@ "CreateDBClusterMessage$AllocatedStorage": "

The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.

This setting is required to create a Multi-AZ DB cluster.

Valid for: Multi-AZ DB clusters only

", "CreateDBClusterMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.

For information about valid Iops values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.

This setting is required to create a Multi-AZ DB cluster.

Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster.

Valid for: Multi-AZ DB clusters only

", "CreateDBClusterMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

Valid for: Multi-AZ DB clusters only

", - "CreateDBClusterMessage$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

Valid for: Multi-AZ DB clusters only

", + "CreateDBClusterMessage$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

For example, the following values are valid:

If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

Valid for: Multi-AZ DB clusters only

", "CreateDBInstanceMessage$AllocatedStorage": "

The amount of storage in gibibytes (GiB) to allocate for the DB instance.

Type: Integer

Amazon Aurora

Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

Amazon RDS Custom

Constraints to the amount of storage for each storage type are the following:

MySQL

Constraints to the amount of storage for each storage type are the following:

MariaDB

Constraints to the amount of storage for each storage type are the following:

PostgreSQL

Constraints to the amount of storage for each storage type are the following:

Oracle

Constraints to the amount of storage for each storage type are the following:

SQL Server

Constraints to the amount of storage for each storage type are the following:

", - "CreateDBInstanceMessage$BackupRetentionPeriod": "

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster.

Default: 1

Constraints:

", + "CreateDBInstanceMessage$BackupRetentionPeriod": "

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster.

Default: 1

Constraints:

", "CreateDBInstanceMessage$Port": "

The port number on which the database accepts connections.

MySQL

Default: 3306

Valid values: 1150-65535

Type: Integer

MariaDB

Default: 3306

Valid values: 1150-65535

Type: Integer

PostgreSQL

Default: 5432

Valid values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid values: 1150-65535

SQL Server

Default: 1433

Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156.

Amazon Aurora

Default: 3306

Valid values: 1150-65535

Type: Integer

", - "CreateDBInstanceMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.

Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 of the storage amount for the DB instance.

", + "CreateDBInstanceMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.

Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 of the storage amount for the DB instance.

Amazon Aurora

Not applicable. Storage is managed by the DB cluster.

", "CreateDBInstanceMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must set MonitoringInterval to a value other than 0.

This setting doesn't apply to RDS Custom.

Valid Values: 0, 1, 5, 10, 15, 30, 60

", "CreateDBInstanceMessage$PromotionTier": "

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

This setting doesn't apply to RDS Custom.

Default: 1

Valid Values: 0 - 15

", - "CreateDBInstanceMessage$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

This setting doesn't apply to RDS Custom.

", - "CreateDBInstanceMessage$MaxAllocatedStorage": "

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceMessage$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

For example, the following values are valid:

If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceMessage$MaxAllocatedStorage": "

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable. Storage is managed by the DB cluster.

", "CreateDBInstanceReadReplicaMessage$Port": "

The port number that the DB instance uses for connections.

Default: Inherits from the source DB instance

Valid Values: 1150-65535

", "CreateDBInstanceReadReplicaMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

", "CreateDBInstanceReadReplicaMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the read replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

This setting doesn't apply to RDS Custom.

Valid Values: 0, 1, 5, 10, 15, 30, 60

", - "CreateDBInstanceReadReplicaMessage$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceReadReplicaMessage$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

For example, the following values are valid:

If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

This setting doesn't apply to RDS Custom.

", "CreateDBInstanceReadReplicaMessage$MaxAllocatedStorage": "

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

", "CreateDBProxyRequest$IdleClientTimeout": "

The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database.

", "DBCluster$AllocatedStorage": "

For all database engines except Amazon Aurora, AllocatedStorage specifies the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage always returns 1, because Aurora DB cluster storage size isn't fixed, but instead automatically adjusts as needed.

", @@ -2448,7 +2448,7 @@ "DBCluster$Capacity": "

The current capacity of an Aurora Serverless v1 DB cluster. The capacity is 0 (zero) when the cluster is paused.

For more information about Aurora Serverless v1, see Using Amazon Aurora Serverless v1 in the Amazon Aurora User Guide.

", "DBCluster$Iops": "

The Provisioned IOPS (I/O operations per second) value.

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBCluster$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster.

This setting is only for non-Aurora Multi-AZ DB clusters.

", - "DBCluster$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

This setting is only for non-Aurora Multi-AZ DB clusters.

", + "DBCluster$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

For example, the following values are valid:

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBClusterCapacityInfo$PendingCapacity": "

A value that specifies the capacity that the DB cluster scales to next.

", "DBClusterCapacityInfo$CurrentCapacity": "

The current capacity of the DB cluster.

", "DBClusterCapacityInfo$SecondsBeforeTimeout": "

The number of seconds before a call to ModifyCurrentDBClusterCapacity times out.

", @@ -2456,7 +2456,7 @@ "DBInstance$Iops": "

Specifies the Provisioned IOPS (I/O operations per second) value.

", "DBInstance$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.

", "DBInstance$PromotionTier": "

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

", - "DBInstance$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

", + "DBInstance$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

For example, the following values are valid:

", "DBInstance$MaxAllocatedStorage": "

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

", "DBInstanceAutomatedBackup$Iops": "

The IOPS (I/O operations per second) value for the automated backup.

", "DBInstanceAutomatedBackup$BackupRetentionPeriod": "

The retention period for the automated backups.

", @@ -2485,7 +2485,7 @@ "DescribeGlobalClustersMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeOptionGroupOptionsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeOptionGroupsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

", - "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

", + "DescribeOrderableDBInstanceOptionsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 10000.

", "DescribePendingMaintenanceActionsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeReservedDBInstancesMessage$MaxRecords": "

The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

", @@ -2497,14 +2497,14 @@ "ModifyDBClusterMessage$AllocatedStorage": "

The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.

Type: Integer

Valid for: Multi-AZ DB clusters only

", "ModifyDBClusterMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.

For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster.

Valid for: Multi-AZ DB clusters only

", "ModifyDBClusterMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

Valid for: Multi-AZ DB clusters only

", - "ModifyDBClusterMessage$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

Valid for: Multi-AZ DB clusters only

", + "ModifyDBClusterMessage$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

For example, the following values are valid:

If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

Valid for: Multi-AZ DB clusters only

", "ModifyDBInstanceMessage$AllocatedStorage": "

The new amount of storage in gibibytes (GiB) to allocate for the DB instance.

For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

For the valid values for allocated storage for each engine, see CreateDBInstance.

", - "ModifyDBInstanceMessage$BackupRetentionPeriod": "

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.

These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

Constraints:

", + "ModifyDBInstanceMessage$BackupRetentionPeriod": "

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.

These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

Constraints:

", "ModifyDBInstanceMessage$Iops": "

The new Provisioned IOPS (I/O operations per second) value for the RDS instance.

Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

Default: Uses existing setting

", "ModifyDBInstanceMessage$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0, which is the default.

If MonitoringRoleArn is specified, set MonitoringInterval to a value other than 0.

This setting doesn't apply to RDS Custom.

Valid Values: 0, 1, 5, 10, 15, 30, 60

", "ModifyDBInstanceMessage$DBPortNumber": "

The port number on which the database accepts connections.

The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

If you change the DBPortNumber value, your database restarts regardless of the value of the ApplyImmediately parameter.

This setting doesn't apply to RDS Custom.

MySQL

Default: 3306

Valid values: 1150-65535

MariaDB

Default: 3306

Valid values: 1150-65535

PostgreSQL

Default: 5432

Valid values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid values: 1150-65535

SQL Server

Default: 1433

Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156.

Amazon Aurora

Default: 3306

Valid values: 1150-65535

", "ModifyDBInstanceMessage$PromotionTier": "

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

This setting doesn't apply to RDS Custom.

Default: 1

Valid Values: 0 - 15

", - "ModifyDBInstanceMessage$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

This setting doesn't apply to RDS Custom.

", + "ModifyDBInstanceMessage$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

For example, the following values are valid:

If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

This setting doesn't apply to RDS Custom.

", "ModifyDBInstanceMessage$MaxAllocatedStorage": "

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

", "ModifyDBInstanceMessage$ResumeFullAutomationModeMinutes": "

The number of minutes to pause the automation. When the time period ends, RDS Custom resumes full automation. The minimum value is 60 (default). The maximum value is 1,440.

", "ModifyDBProxyRequest$IdleClientTimeout": "

The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database.

", @@ -2535,7 +2535,7 @@ "RestoreDBInstanceFromS3Message$Port": "

The port number on which the database accepts connections.

Type: Integer

Valid Values: 1150-65535

Default: 3306

", "RestoreDBInstanceFromS3Message$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

", "RestoreDBInstanceFromS3Message$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

Default: 0

", - "RestoreDBInstanceFromS3Message$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

", + "RestoreDBInstanceFromS3Message$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

For example, the following values are valid:

If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

", "RestoreDBInstanceFromS3Message$MaxAllocatedStorage": "

The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

", "RestoreDBInstanceToPointInTimeMessage$Port": "

The port number on which the database accepts connections.

Constraints: Value must be 1150-65535

Default: The same port as the original DB instance.

", "RestoreDBInstanceToPointInTimeMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

Constraints: Must be an integer greater than 1000.

SQL Server

Setting the IOPS value for the SQL Server database engine isn't supported.

", @@ -2544,7 +2544,7 @@ "ScalingConfiguration$MaxCapacity": "

The maximum capacity for an Aurora DB cluster in serverless DB engine mode.

For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.

For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and 384.

The maximum capacity must be greater than or equal to the minimum capacity.

", "ScalingConfiguration$SecondsUntilAutoPause": "

The time, in seconds, before an Aurora DB cluster in serverless mode is paused.

Specify a value between 300 and 86,400 seconds.

", "ScalingConfiguration$SecondsBeforeTimeout": "

The amount of time, in seconds, that Aurora Serverless v1 tries to find a scaling point to perform seamless scaling before enforcing the timeout action. The default is 300.

Specify a value between 60 and 600 seconds.

", - "ScalingConfigurationInfo$MinCapacity": "

The maximum capacity for the Aurora DB cluster in serverless DB engine mode.

", + "ScalingConfigurationInfo$MinCapacity": "

The minimum capacity for an Aurora DB cluster in serverless DB engine mode.

", "ScalingConfigurationInfo$MaxCapacity": "

The maximum capacity for an Aurora DB cluster in serverless DB engine mode.

", "ScalingConfigurationInfo$SecondsUntilAutoPause": "

The remaining amount of time, in seconds, before the Aurora DB cluster in serverless mode is paused. A DB cluster can be paused only when it's idle (it has no connections).

", "ScalingConfigurationInfo$SecondsBeforeTimeout": "

The number of seconds before scaling times out. What happens when an attempted scaling action times out is determined by the TimeoutAction setting.

", @@ -2712,8 +2712,8 @@ "PendingCloudwatchLogsExports$LogTypesToEnable": "

Log types that are in the process of being deactivated. After they are deactivated, these log types aren't exported to CloudWatch Logs.

", "PendingCloudwatchLogsExports$LogTypesToDisable": "

Log types that are in the process of being enabled. After they are enabled, these log types are exported to CloudWatch Logs.

", "RestoreDBClusterFromS3Message$EnableCloudwatchLogsExports": "

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.

Aurora MySQL

Possible values are audit, error, general, and slowquery.

Aurora PostgreSQL

Possible value is postgresql.

For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

", - "RestoreDBClusterFromSnapshotMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used.

RDS for MySQL

Possible values are error, general, and slowquery.

RDS for PostgreSQL

Possible values are postgresql and upgrade.

Aurora MySQL

Possible values are audit, error, general, and slowquery.

Aurora PostgreSQL

Possible value is postgresql.

For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..

For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", - "RestoreDBClusterToPointInTimeMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.

RDS for MySQL

Possible values are error, general, and slowquery.

RDS for PostgreSQL

Possible values are postgresql and upgrade.

Aurora MySQL

Possible values are audit, error, general, and slowquery.

Aurora PostgreSQL

Possible value is postgresql.

For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..

For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", + "RestoreDBClusterFromSnapshotMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used.

RDS for MySQL

Possible values are error, general, and slowquery.

RDS for PostgreSQL

Possible values are postgresql and upgrade.

Aurora MySQL

Possible values are audit, error, general, and slowquery.

Aurora PostgreSQL

Possible value is postgresql.

For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", + "RestoreDBClusterToPointInTimeMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used.

RDS for MySQL

Possible values are error, general, and slowquery.

RDS for PostgreSQL

Possible values are postgresql and upgrade.

Aurora MySQL

Possible values are audit, error, general, and slowquery.

Aurora PostgreSQL

Possible value is postgresql.

For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBInstanceFromDBSnapshotMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

", "RestoreDBInstanceFromS3Message$EnableCloudwatchLogsExports": "

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

", "RestoreDBInstanceToPointInTimeMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

" @@ -3164,7 +3164,7 @@ "ProcessorFeatureList": { "base": null, "refs": { - "CreateDBInstanceMessage$ProcessorFeatures": "

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceMessage$ProcessorFeatures": "

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable.

", "CreateDBInstanceReadReplicaMessage$ProcessorFeatures": "

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

This setting doesn't apply to RDS Custom.

", "DBInstance$ProcessorFeatures": "

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

", "DBSnapshot$ProcessorFeatures": "

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance when the DB snapshot was created.

", @@ -3721,14 +3721,14 @@ "CopyDBClusterSnapshotMessage$SourceDBClusterSnapshotIdentifier": "

The identifier of the DB cluster snapshot to copy. This parameter isn't case-sensitive.

You can't copy an encrypted, shared DB cluster snapshot from one Amazon Web Services Region to another.

Constraints:

Example: my-cluster-snapshot1

", "CopyDBClusterSnapshotMessage$TargetDBClusterSnapshotIdentifier": "

The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter isn't case-sensitive.

Constraints:

Example: my-cluster-snapshot2

", "CopyDBClusterSnapshotMessage$KmsKeyId": "

The Amazon Web Services KMS key identifier for an encrypted DB cluster snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the Amazon Web Services KMS key.

If you copy an encrypted DB cluster snapshot from your Amazon Web Services account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another Amazon Web Services account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another Amazon Web Services Region, you must set KmsKeyId to the Amazon Web Services KMS key identifier you want to use to encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.

", - "CopyDBClusterSnapshotMessage$PreSignedUrl": "

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the Amazon Web Services Region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify PreSignedUrl when you are copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.

The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.

", + "CopyDBClusterSnapshotMessage$PreSignedUrl": "

When you are copying a DB cluster snapshot from one Amazon Web Services GovCloud (US) Region to another, the URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API operation in the Amazon Web Services Region that contains the source DB cluster snapshot to copy. Use the PreSignedUrl parameter when copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify PreSignedUrl when copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.

This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other Amazon Web Services Regions.

The presigned URL must be a valid request for the CopyDBClusterSnapshot API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

", "CopyDBParameterGroupMessage$SourceDBParameterGroupIdentifier": "

The identifier or ARN for the source DB parameter group. For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.

Constraints:

", "CopyDBParameterGroupMessage$TargetDBParameterGroupIdentifier": "

The identifier for the copied DB parameter group.

Constraints:

Example: my-db-parameter-group

", "CopyDBParameterGroupMessage$TargetDBParameterGroupDescription": "

A description for the copied DB parameter group.

", - "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

The identifier for the source DB snapshot.

If the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB snapshot identifier. For example, you might specify rds:mysql-instance1-snapshot-20130805.

If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB snapshot ARN. For example, you might specify arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805.

If you are copying from a shared manual DB snapshot, this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.

If you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region, and must match the SourceDBSnapshotIdentifier in the PreSignedUrl parameter.

Constraints:

Example: rds:mydb-2012-04-02-00-01

Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805

", + "CopyDBSnapshotMessage$SourceDBSnapshotIdentifier": "

The identifier for the source DB snapshot.

If the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB snapshot identifier. For example, you might specify rds:mysql-instance1-snapshot-20130805.

If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB snapshot ARN. For example, you might specify arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805.

If you are copying from a shared manual DB snapshot, this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.

If you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region.

Constraints:

Example: rds:mydb-2012-04-02-00-01

Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805

", "CopyDBSnapshotMessage$TargetDBSnapshotIdentifier": "

The identifier for the copy of the snapshot.

Constraints:

Example: my-db-snapshot

", "CopyDBSnapshotMessage$KmsKeyId": "

The Amazon Web Services KMS key identifier for an encrypted DB snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you copy an encrypted DB snapshot from your Amazon Web Services account, you can specify a value for this parameter to encrypt the copy with a new KMS key. If you don't specify a value for this parameter, then the copy of the DB snapshot is encrypted with the same Amazon Web Services KMS key as the source DB snapshot.

If you copy an encrypted DB snapshot that is shared from another Amazon Web Services account, then you must specify a value for this parameter.

If you specify this parameter when you copy an unencrypted snapshot, the copy is encrypted.

If you copy an encrypted snapshot to a different Amazon Web Services Region, then you must specify an Amazon Web Services KMS key identifier for the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region.

", - "CopyDBSnapshotMessage$PreSignedUrl": "

The URL that contains a Signature Version 4 signed request for the CopyDBSnapshot API action in the source Amazon Web Services Region that contains the source DB snapshot to copy.

You must specify this parameter when you copy an encrypted DB snapshot from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are copying an encrypted DB snapshot in the same Amazon Web Services Region.

The presigned URL must be a valid request for the CopyDBSnapshot API action that can be executed in the source Amazon Web Services Region that contains the encrypted DB snapshot to be copied. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.

", + "CopyDBSnapshotMessage$PreSignedUrl": "

When you are copying a snapshot from one Amazon Web Services GovCloud (US) Region to another, the URL that contains a Signature Version 4 signed request for the CopyDBSnapshot API operation in the source Amazon Web Services Region that contains the source DB snapshot to copy.

This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other Amazon Web Services Regions.

You must specify this parameter when you copy an encrypted DB snapshot from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are copying an encrypted DB snapshot in the same Amazon Web Services Region.

The presigned URL must be a valid request for the CopyDBClusterSnapshot API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

", "CopyDBSnapshotMessage$OptionGroupName": "

The name of an option group to associate with the copy of the snapshot.

Specify this option if you are copying a snapshot from one Amazon Web Services Region to another, and your DB instance uses a nondefault option group. If your source DB instance uses Transparent Data Encryption for Oracle or Microsoft SQL Server, you must specify this option when copying across Amazon Web Services Regions. For more information, see Option group considerations in the Amazon RDS User Guide.

", "CopyDBSnapshotMessage$TargetCustomAvailabilityZone": "

The external custom Availability Zone (CAZ) identifier for the target CAZ.

Example: rds-caz-aiqhTgQv.

", "CopyOptionGroupMessage$SourceOptionGroupIdentifier": "

The identifier for the source option group.

Constraints:

", @@ -3751,8 +3751,8 @@ "CreateDBClusterMessage$PreferredMaintenanceWindow": "

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "CreateDBClusterMessage$ReplicationSourceIdentifier": "

The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$KmsKeyId": "

The Amazon Web Services KMS key identifier for an encrypted DB cluster.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

When a KMS key isn't specified in KmsKeyId:

There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

If you create a read replica of an encrypted DB cluster in another Amazon Web Services Region, you must set KmsKeyId to a KMS key identifier that is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the read replica in that Amazon Web Services Region.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", - "CreateDBClusterMessage$PreSignedUrl": "

A URL that contains a Signature Version 4 signed request for the CreateDBCluster action to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster.

The pre-signed URL must be a valid request for the CreateDBCluster API action that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster to be copied.

The pre-signed URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.

Valid for: Aurora DB clusters only

", - "CreateDBClusterMessage$EngineMode": "

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

The parallelquery engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, and version 2.09 and higher 2.x versions.

The global engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, and global engine mode isn't required for any 2.x versions.

The multimaster engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.

For Aurora PostgreSQL, the global engine mode isn't required, and both the parallelquery and the multimaster engine modes currently aren't supported.

Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:

Valid for: Aurora DB clusters only

", + "CreateDBClusterMessage$PreSignedUrl": "

When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster.

The presigned URL must be a valid request for the CreateDBCluster API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster to copy.

The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

Valid for: Aurora DB clusters only

", + "CreateDBClusterMessage$EngineMode": "

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

The parallelquery engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, and version 2.09 and higher 2.x versions.

The global engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, and global engine mode isn't required for any 2.x versions.

The multimaster engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.

The serverless engine mode only applies for Aurora Serverless v1 DB clusters.

For Aurora PostgreSQL, the global engine mode isn't required, and both the parallelquery and the multimaster engine modes currently aren't supported.

Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$GlobalClusterIdentifier": "

The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster.

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$Domain": "

The Active Directory directory ID to create the DB cluster in.

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.

For more information, see Kerberos authentication in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "CreateDBClusterMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

Valid for: Aurora DB clusters only

", @@ -3767,7 +3767,7 @@ "CreateDBClusterSnapshotMessage$DBClusterIdentifier": "

The identifier of the DB cluster to create a snapshot for. This parameter isn't case-sensitive.

Constraints:

Example: my-cluster1

", "CreateDBInstanceMessage$DBName": "

The meaning of this parameter differs according to the database engine you use.

MySQL

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

MariaDB

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

PostgreSQL

The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance.

Constraints:

Oracle

The Oracle System ID (SID) of the created DB instance. If you specify null, the default value ORCL is used. You can't specify the string NULL, or any other reserved word, for DBName.

Default: ORCL

Constraints:

Amazon RDS Custom for Oracle

The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL.

Default: ORCL

Constraints:

Amazon RDS Custom for SQL Server

Not applicable. Must be null.

SQL Server

Not applicable. Must be null.

Amazon Aurora MySQL

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.

Constraints:

Amazon Aurora PostgreSQL

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster.

Constraints:

", "CreateDBInstanceMessage$DBInstanceIdentifier": "

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

Example: mydbinstance

", - "CreateDBInstanceMessage$DBInstanceClass": "

The compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

", + "CreateDBInstanceMessage$DBInstanceClass": "

The compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide.

", "CreateDBInstanceMessage$Engine": "

The name of the database engine to be used for this instance.

Not every database engine is available for every Amazon Web Services Region.

Valid Values:

", "CreateDBInstanceMessage$MasterUsername": "

The name for the master user.

Amazon Aurora

Not applicable. The name for the master user is managed by the DB cluster.

Amazon RDS

Constraints:

", "CreateDBInstanceMessage$MasterUserPassword": "

The password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".

Amazon Aurora

Not applicable. The password for the master user is managed by the DB cluster.

MariaDB

Constraints: Must contain from 8 to 41 characters.

Microsoft SQL Server

Constraints: Must contain from 8 to 128 characters.

MySQL

Constraints: Must contain from 8 to 41 characters.

Oracle

Constraints: Must contain from 8 to 30 characters.

PostgreSQL

Constraints: Must contain from 8 to 128 characters.

", @@ -3776,19 +3776,19 @@ "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window.

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

", "CreateDBInstanceMessage$DBParameterGroupName": "

The name of the DB parameter group to associate with this DB instance. If you do not specify a value, then the default DB parameter group for the specified DB engine and version is used.

This setting doesn't apply to RDS Custom.

Constraints:

", "CreateDBInstanceMessage$PreferredBackupWindow": "

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User Guide.

Amazon Aurora

Not applicable. The daily time range for creating automated backups is managed by the DB cluster.

Constraints:

", - "CreateDBInstanceMessage$EngineVersion": "

The version number of the database engine to use.

For a list of valid engine versions, use the DescribeDBEngineVersions action.

The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.

Amazon RDS Custom for Oracle

A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string . An example identifier is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.

Amazon RDS Custom for SQL Server

See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.

MariaDB

For information, see MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.

Microsoft SQL Server

For information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.

MySQL

For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide.

Oracle

For information, see Oracle Database Engine Release Notes in the Amazon RDS User Guide.

PostgreSQL

For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.

", - "CreateDBInstanceMessage$LicenseModel": "

License model information for this DB instance.

Valid values: license-included | bring-your-own-license | general-public-license

This setting doesn't apply to RDS Custom.

", - "CreateDBInstanceMessage$OptionGroupName": "

A value that indicates that the DB instance should be associated with the specified option group.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance after it is associated with a DB instance.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceMessage$EngineVersion": "

The version number of the database engine to use.

For a list of valid engine versions, use the DescribeDBEngineVersions operation.

The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.

Amazon RDS Custom for Oracle

A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string . An example identifier is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.

Amazon RDS Custom for SQL Server

See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.

MariaDB

For information, see MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.

Microsoft SQL Server

For information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.

MySQL

For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide.

Oracle

For information, see Oracle Database Engine Release Notes in the Amazon RDS User Guide.

PostgreSQL

For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.

", + "CreateDBInstanceMessage$LicenseModel": "

License model information for this DB instance.

Valid values: license-included | bring-your-own-license | general-public-license

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable.

", + "CreateDBInstanceMessage$OptionGroupName": "

A value that indicates that the DB instance should be associated with the specified option group.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance after it is associated with a DB instance.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable.

", "CreateDBInstanceMessage$CharacterSetName": "

For supported engines, this value indicates that the DB instance should be associated with the specified CharacterSet.

This setting doesn't apply to RDS Custom. However, if you need to change the character set, you can change it on the database itself.

Amazon Aurora

Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster.

", "CreateDBInstanceMessage$NcharCharacterSetName": "

The name of the NCHAR character set for the Oracle DB instance.

This parameter doesn't apply to RDS Custom.

", "CreateDBInstanceMessage$DBClusterIdentifier": "

The identifier of the DB cluster that the instance will belong to.

This setting doesn't apply to RDS Custom.

", - "CreateDBInstanceMessage$StorageType": "

Specifies the storage type to be associated with the DB instance.

Valid values: standard | gp2 | io1

If you specify io1, you must also include a value for the Iops parameter.

Default: io1 if the Iops parameter is specified, otherwise gp2

", - "CreateDBInstanceMessage$TdeCredentialArn": "

The ARN from the key store with which to associate the instance for TDE encryption.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceMessage$StorageType": "

Specifies the storage type to be associated with the DB instance.

Valid values: standard | gp2 | io1

If you specify io1, you must also include a value for the Iops parameter.

Default: io1 if the Iops parameter is specified, otherwise gp2

Amazon Aurora

Not applicable. Storage is managed by the DB cluster.

", + "CreateDBInstanceMessage$TdeCredentialArn": "

The ARN from the key store with which to associate the instance for TDE encryption.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable.

", "CreateDBInstanceMessage$TdeCredentialPassword": "

The password for the given ARN from the key store in order to access the device.

This setting doesn't apply to RDS Custom.

", "CreateDBInstanceMessage$KmsKeyId": "

The Amazon Web Services KMS key identifier for an encrypted DB instance.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

Amazon Aurora

Not applicable. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster.

If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

Amazon RDS Custom

A KMS key is required for RDS Custom instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key.

", - "CreateDBInstanceMessage$Domain": "

The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

For more information, see Kerberos Authentication in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceMessage$Domain": "

The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

For more information, see Kerberos Authentication in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable. The domain is managed by the DB cluster.

", "CreateDBInstanceMessage$MonitoringRoleArn": "

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

This setting doesn't apply to RDS Custom.

", - "CreateDBInstanceMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable. The domain is managed by the DB cluster.

", "CreateDBInstanceMessage$Timezone": "

The time zone of the DB instance. The time zone parameter is currently supported only by Microsoft SQL Server.

", "CreateDBInstanceMessage$PerformanceInsightsKMSKeyId": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

This setting doesn't apply to RDS Custom.

", "CreateDBInstanceMessage$CustomIamInstanceProfile": "

The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements:

For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide.

This setting is required for RDS Custom.

", @@ -3799,12 +3799,12 @@ "CreateDBInstanceReadReplicaMessage$DBInstanceClass": "

The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: Inherits from the source DB instance.

", "CreateDBInstanceReadReplicaMessage$AvailabilityZone": "

The Availability Zone (AZ) where the read replica will be created.

Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region.

Example: us-east-1d

", "CreateDBInstanceReadReplicaMessage$OptionGroupName": "

The option group the DB instance is associated with. If omitted, the option group associated with the source instance is used.

For SQL Server, you must use the option group associated with the source instance.

This setting doesn't apply to RDS Custom.

", - "CreateDBInstanceReadReplicaMessage$DBParameterGroupName": "

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica.

Specifying a parameter group for this operation is only supported for Oracle DB instances. It isn't supported for RDS Custom.

Constraints:

", + "CreateDBInstanceReadReplicaMessage$DBParameterGroupName": "

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica.

Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. It isn't supported for RDS Custom.

Constraints:

", "CreateDBInstanceReadReplicaMessage$DBSubnetGroupName": "

Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC.

Constraints:

Example: mydbsubnetgroup

", "CreateDBInstanceReadReplicaMessage$StorageType": "

Specifies the storage type to be associated with the read replica.

Valid values: standard | gp2 | io1

If you specify io1, you must also include a value for the Iops parameter.

Default: io1 if the Iops parameter is specified, otherwise gp2

", "CreateDBInstanceReadReplicaMessage$MonitoringRoleArn": "

The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, go to To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

This setting doesn't apply to RDS Custom.

", "CreateDBInstanceReadReplicaMessage$KmsKeyId": "

The Amazon Web Services KMS key identifier for an encrypted read replica.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you create an encrypted read replica in the same Amazon Web Services Region as the source DB instance, then do not specify a value for this parameter. A read replica in the same Amazon Web Services Region is always encrypted with the same KMS key as the source DB instance.

If you create an encrypted read replica in a different Amazon Web Services Region, then you must specify a KMS key identifier for the destination Amazon Web Services Region. KMS keys are specific to the Amazon Web Services Region that they are created in, and you can't use KMS keys from one Amazon Web Services Region in another Amazon Web Services Region.

You can't create an encrypted read replica from an unencrypted DB instance.

This setting doesn't apply to RDS Custom, which uses the same KMS key as the primary replica.

", - "CreateDBInstanceReadReplicaMessage$PreSignedUrl": "

The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action in the source Amazon Web Services Region that contains the source DB instance.

You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same Amazon Web Services Region.

The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action that can be executed in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can be executed in the source Amazon Web Services Region.

SourceRegion isn't supported for SQL Server, because SQL Server on Amazon RDS doesn't support cross-Region read replicas.

This setting doesn't apply to RDS Custom.

", + "CreateDBInstanceReadReplicaMessage$PreSignedUrl": "

When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API operation in the source Amazon Web Services Region that contains the source DB instance.

This setting applies only to Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.

You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same Amazon Web Services Region.

The presigned URL must be a valid request for the CreateDBInstanceReadReplica API operation that can run in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server doesn't support cross-Region read replicas.

This setting doesn't apply to RDS Custom.

", "CreateDBInstanceReadReplicaMessage$PerformanceInsightsKMSKeyId": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

This setting doesn't apply to RDS Custom.

", "CreateDBInstanceReadReplicaMessage$Domain": "

The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

For more information, see Kerberos Authentication in the Amazon RDS User Guide.

This setting doesn't apply to RDS Custom.

", "CreateDBInstanceReadReplicaMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

This setting doesn't apply to RDS Custom.

", @@ -3828,7 +3828,7 @@ "CreateGlobalClusterMessage$SourceDBClusterIdentifier": "

The Amazon Resource Name (ARN) to use as the primary cluster of the global database. This parameter is optional.

", "CreateGlobalClusterMessage$Engine": "

The name of the database engine to be used for this DB cluster.

", "CreateGlobalClusterMessage$EngineVersion": "

The engine version of the Aurora global database.

", - "CreateGlobalClusterMessage$DatabaseName": "

The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.

", + "CreateGlobalClusterMessage$DatabaseName": "

The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.

", "CreateOptionGroupMessage$OptionGroupName": "

Specifies the name of the option group to be created.

Constraints:

Example: myoptiongroup

", "CreateOptionGroupMessage$EngineName": "

Specifies the name of the engine that this option group should be associated with.

Valid Values:

", "CreateOptionGroupMessage$MajorEngineVersion": "

Specifies the major version of the engine that this option group should be associated with.

", @@ -3988,7 +3988,7 @@ "DBParameterGroupsMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DBProxy$DBProxyName": "

The identifier for the proxy. This name must be unique for all proxies owned by your Amazon Web Services account in the specified Amazon Web Services Region.

", "DBProxy$DBProxyArn": "

The Amazon Resource Name (ARN) for the proxy.

", - "DBProxy$EngineFamily": "

The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

", + "DBProxy$EngineFamily": "

The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. MYSQL supports Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases. POSTGRESQL supports Aurora PostgreSQL and RDS for PostgreSQL databases.

", "DBProxy$VpcId": "

Provides the VPC ID of the DB proxy.

", "DBProxy$RoleArn": "

The Amazon Resource Name (ARN) for the IAM role that the proxy uses to access Amazon Secrets Manager.

", "DBProxy$Endpoint": "

The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application.

", @@ -4118,8 +4118,8 @@ "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

The name of the DB security group to return details for.

", "DescribeDBSecurityGroupsMessage$Marker": "

An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeDBSnapshotAttributesMessage$DBSnapshotIdentifier": "

The identifier for the DB snapshot to describe the attributes for.

", - "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

The ID of the DB instance to retrieve the list of DB snapshots for. This parameter can't be used in conjunction with DBSnapshotIdentifier. This parameter isn't case-sensitive.

Constraints:

", - "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

A specific DB snapshot identifier to describe. This parameter can't be used in conjunction with DBInstanceIdentifier. This value is stored as a lowercase string.

Constraints:

", + "DescribeDBSnapshotsMessage$DBInstanceIdentifier": "

The ID of the DB instance to retrieve the list of DB snapshots for. This parameter isn't case-sensitive.

Constraints:

", + "DescribeDBSnapshotsMessage$DBSnapshotIdentifier": "

A specific DB snapshot identifier to describe. This value is stored as a lowercase string.

Constraints:

", "DescribeDBSnapshotsMessage$SnapshotType": "

The type of snapshots to be returned. You can specify one of the following values:

If you don't specify a SnapshotType value, then both automated and manual snapshots are returned. Shared and public DB snapshots are not included in the returned results by default. You can include shared snapshots with these results by enabling the IncludeShared parameter. You can include public snapshots with these results by enabling the IncludePublic parameter.

The IncludeShared and IncludePublic parameters don't apply for SnapshotType values of manual or automated. The IncludePublic parameter doesn't apply when SnapshotType is set to shared. The IncludeShared parameter doesn't apply when SnapshotType is set to public.

", "DescribeDBSnapshotsMessage$Marker": "

An optional pagination token provided by a previous DescribeDBSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeDBSnapshotsMessage$DbiResourceId": "

A specific DB resource ID to describe.

", @@ -4258,11 +4258,11 @@ "ModifyDBClusterMessage$PerformanceInsightsKMSKeyId": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

Valid for: Multi-AZ DB clusters only

", "ModifyDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

The name of the DB cluster parameter group to modify.

", "ModifyDBClusterSnapshotAttributeMessage$DBClusterSnapshotIdentifier": "

The identifier for the DB cluster snapshot to modify the attributes for.

", - "ModifyDBClusterSnapshotAttributeMessage$AttributeName": "

The name of the DB cluster snapshot attribute to modify.

To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB cluster snapshot, set this value to restore.

To view the list of attributes available to modify, use the DescribeDBClusterSnapshotAttributes API action.

", + "ModifyDBClusterSnapshotAttributeMessage$AttributeName": "

The name of the DB cluster snapshot attribute to modify.

To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB cluster snapshot, set this value to restore.

To view the list of attributes available to modify, use the DescribeDBClusterSnapshotAttributes API operation.

", "ModifyDBInstanceMessage$DBInstanceIdentifier": "

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

", - "ModifyDBInstanceMessage$DBInstanceClass": "

The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is enabled for this request.

This setting doesn't apply to RDS Custom for Oracle.

Default: Uses existing setting

", + "ModifyDBInstanceMessage$DBInstanceClass": "

The new compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is enabled for this request.

This setting doesn't apply to RDS Custom for Oracle.

Default: Uses existing setting

", "ModifyDBInstanceMessage$DBSubnetGroupName": "

The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.

Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you enable ApplyImmediately.

This parameter doesn't apply to RDS Custom.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mydbsubnetgroup

", - "ModifyDBInstanceMessage$MasterUserPassword": "

The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".

Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

MariaDB

Constraints: Must contain from 8 to 41 characters.

Microsoft SQL Server

Constraints: Must contain from 8 to 128 characters.

MySQL

Constraints: Must contain from 8 to 41 characters.

Oracle

Constraints: Must contain from 8 to 30 characters.

PostgreSQL

Constraints: Must contain from 8 to 128 characters.

Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.

", + "ModifyDBInstanceMessage$MasterUserPassword": "

The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".

Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

This setting doesn't apply to RDS Custom.

Amazon Aurora

Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

MariaDB

Constraints: Must contain from 8 to 41 characters.

Microsoft SQL Server

Constraints: Must contain from 8 to 128 characters.

MySQL

Constraints: Must contain from 8 to 41 characters.

Oracle

Constraints: Must contain from 8 to 30 characters.

PostgreSQL

Constraints: Must contain from 8 to 128 characters.

Amazon RDS API operations never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.

", "ModifyDBInstanceMessage$DBParameterGroupName": "

The name of the DB parameter group to apply to the DB instance.

Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. In this case, the DB instance isn't rebooted automatically, and the parameter changes aren't applied during the next maintenance window. However, if you modify dynamic parameters in the newly associated DB parameter group, these changes are applied immediately without a reboot.

This setting doesn't apply to RDS Custom.

Default: Uses existing setting

Constraints: The DB parameter group must be in the same DB parameter group family as the DB instance.

", "ModifyDBInstanceMessage$PreferredBackupWindow": "

The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User Guide.

Amazon Aurora

Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Constraints:

", "ModifyDBInstanceMessage$PreferredMaintenanceWindow": "

The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

For more information, see Amazon RDS Maintenance Window in the Amazon RDS User Guide.

Default: Uses existing setting

Format: ddd:hh24:mi-ddd:hh24:mi

Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

Constraints: Must be at least 30 minutes

", @@ -4283,11 +4283,11 @@ "ModifyDBProxyRequest$DBProxyName": "

The identifier for the DBProxy to modify.

", "ModifyDBProxyRequest$NewDBProxyName": "

The new identifier for the DBProxy. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.

", "ModifyDBProxyRequest$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access secrets in Amazon Web Services Secrets Manager.

", - "ModifyDBProxyTargetGroupRequest$TargetGroupName": "

The name of the new target group to assign to the proxy.

", - "ModifyDBProxyTargetGroupRequest$DBProxyName": "

The name of the new proxy to which to assign the target group.

", + "ModifyDBProxyTargetGroupRequest$TargetGroupName": "

The name of the target group to modify.

", + "ModifyDBProxyTargetGroupRequest$DBProxyName": "

The name of the proxy.

", "ModifyDBProxyTargetGroupRequest$NewName": "

The new name for the modified DBProxyTarget. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.

", "ModifyDBSnapshotAttributeMessage$DBSnapshotIdentifier": "

The identifier for the DB snapshot to modify the attributes for.

", - "ModifyDBSnapshotAttributeMessage$AttributeName": "

The name of the DB snapshot attribute to modify.

To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, set this value to restore.

To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes API action.

", + "ModifyDBSnapshotAttributeMessage$AttributeName": "

The name of the DB snapshot attribute to modify.

To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, set this value to restore.

To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes API operation.

", "ModifyDBSnapshotMessage$DBSnapshotIdentifier": "

The identifier of the DB snapshot to modify.

", "ModifyDBSnapshotMessage$EngineVersion": "

The engine version to upgrade the DB snapshot to.

The following are the database engines and engine versions that are available when you upgrade a DB snapshot.

MySQL

Oracle

PostgreSQL

For the list of engine versions that are available for upgrading a DB snapshot, see Upgrading the PostgreSQL DB Engine for Amazon RDS.

", "ModifyDBSnapshotMessage$OptionGroupName": "

The option group to identify with the upgraded DB snapshot.

You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see Option group considerations in the Amazon RDS User Guide.

", @@ -4415,8 +4415,8 @@ "RestoreDBClusterFromS3Message$DBClusterIdentifier": "

The name of the DB cluster to create from the source data in the Amazon S3 bucket. This parameter isn't case-sensitive.

Constraints:

Example: my-cluster1

", "RestoreDBClusterFromS3Message$DBClusterParameterGroupName": "

The name of the DB cluster parameter group to associate with the restored DB cluster. If this argument is omitted, default.aurora5.6 is used.

Constraints:

", "RestoreDBClusterFromS3Message$DBSubnetGroupName": "

A DB subnet group to associate with the restored DB cluster.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mydbsubnetgroup

", - "RestoreDBClusterFromS3Message$Engine": "

The name of the database engine to be used for this DB cluster.

Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql

", - "RestoreDBClusterFromS3Message$EngineVersion": "

The version number of the database engine to use.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-postgresql, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

Aurora MySQL

Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5, 8.0.mysql_aurora.3.01.0

Aurora PostgreSQL

Example: 9.6.3, 10.7

", + "RestoreDBClusterFromS3Message$Engine": "

The name of the database engine to be used for this DB cluster.

Valid Values: aurora (for MySQL 5.6-compatible Aurora) and aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

", + "RestoreDBClusterFromS3Message$EngineVersion": "

The version number of the database engine to use.

To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

Aurora MySQL

Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.mysql_aurora.2.07.1, 8.0.mysql_aurora.3.02.0

", "RestoreDBClusterFromS3Message$MasterUsername": "

The name of the master user for the restored DB cluster.

Constraints:

", "RestoreDBClusterFromS3Message$MasterUserPassword": "

The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

Constraints: Must contain from 8 to 41 characters.

", "RestoreDBClusterFromS3Message$OptionGroupName": "

A value that indicates that the restored DB cluster should be associated with the specified option group.

Permanent options can't be removed from an option group. An option group can't be removed from a DB cluster once it is associated with a DB cluster.

", @@ -4537,7 +4537,7 @@ "StartDBClusterMessage$DBClusterIdentifier": "

The DB cluster identifier of the Amazon Aurora DB cluster to be started. This parameter is stored as a lowercase string.

", "StartDBInstanceAutomatedBackupsReplicationMessage$SourceDBInstanceArn": "

The Amazon Resource Name (ARN) of the source DB instance for the replicated automated backups, for example, arn:aws:rds:us-west-2:123456789012:db:mydatabase.

", "StartDBInstanceAutomatedBackupsReplicationMessage$KmsKeyId": "

The Amazon Web Services KMS key identifier for encryption of the replicated automated backups. The KMS key ID is the Amazon Resource Name (ARN) for the KMS encryption key in the destination Amazon Web Services Region, for example, arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE.

", - "StartDBInstanceAutomatedBackupsReplicationMessage$PreSignedUrl": "

A URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication action to be called in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the StartDBInstanceAutomatedBackupsReplication API action that can be executed in the Amazon Web Services Region that contains the source DB instance.

", + "StartDBInstanceAutomatedBackupsReplicationMessage$PreSignedUrl": "

In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication operation to call in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the StartDBInstanceAutomatedBackupsReplication API operation that can run in the Amazon Web Services Region that contains the source DB instance.

This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other Amazon Web Services Regions.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

", "StartDBInstanceMessage$DBInstanceIdentifier": "

The user-supplied instance identifier.

", "StartExportTaskMessage$ExportTaskIdentifier": "

A unique identifier for the snapshot export task. This ID isn't an identifier for the Amazon S3 bucket where the snapshot is to be exported to.

", "StartExportTaskMessage$SourceArn": "

The Amazon Resource Name (ARN) of the snapshot to export to Amazon S3.

", @@ -4572,7 +4572,7 @@ "ValidStorageOptions$StorageType": "

The valid storage types for your DB instance. For example, gp2, io1.

", "VpcSecurityGroupIdList$member": null, "VpcSecurityGroupMembership$VpcSecurityGroupId": "

The name of the VPC security group.

", - "VpcSecurityGroupMembership$Status": "

The status of the VPC security group.

" + "VpcSecurityGroupMembership$Status": "

The membership status of the VPC security group.

Currently, the only valid status is active.

" } }, "String255": { diff --git a/service/athena/api.go b/service/athena/api.go index 42c8795eb49..8affa2c8bd1 100644 --- a/service/athena/api.go +++ b/service/athena/api.go @@ -1568,6 +1568,93 @@ func (c *Athena) GetQueryResultsPagesWithContext(ctx aws.Context, input *GetQuer return p.Err() } +const opGetQueryRuntimeStatistics = "GetQueryRuntimeStatistics" + +// GetQueryRuntimeStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetQueryRuntimeStatistics operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetQueryRuntimeStatistics for more information on using the GetQueryRuntimeStatistics +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetQueryRuntimeStatisticsRequest method. +// req, resp := client.GetQueryRuntimeStatisticsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatistics +func (c *Athena) GetQueryRuntimeStatisticsRequest(input *GetQueryRuntimeStatisticsInput) (req *request.Request, output *GetQueryRuntimeStatisticsOutput) { + op := &request.Operation{ + Name: opGetQueryRuntimeStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueryRuntimeStatisticsInput{} + } + + output = &GetQueryRuntimeStatisticsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetQueryRuntimeStatistics API operation for Amazon Athena. +// +// Returns query execution runtime statistics related to a single execution +// of a query if you have access to the workgroup in which the query ran. The +// query execution runtime statistics is returned only when QueryExecutionStatus$State +// is in a SUCCEEDED or FAILED state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation GetQueryRuntimeStatistics for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatistics +func (c *Athena) GetQueryRuntimeStatistics(input *GetQueryRuntimeStatisticsInput) (*GetQueryRuntimeStatisticsOutput, error) { + req, out := c.GetQueryRuntimeStatisticsRequest(input) + return out, req.Send() +} + +// GetQueryRuntimeStatisticsWithContext is the same as GetQueryRuntimeStatistics with the addition of +// the ability to pass a context and additional request options. +// +// See GetQueryRuntimeStatistics for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) GetQueryRuntimeStatisticsWithContext(ctx aws.Context, input *GetQueryRuntimeStatisticsInput, opts ...request.Option) (*GetQueryRuntimeStatisticsOutput, error) { + req, out := c.GetQueryRuntimeStatisticsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetTableMetadata = "GetTableMetadata" // GetTableMetadataRequest generates a "aws/request.Request" representing the @@ -2065,6 +2152,12 @@ func (c *Athena) ListEngineVersionsRequest(input *ListEngineVersionsInput) (req Name: opListEngineVersions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -2119,6 +2212,58 @@ func (c *Athena) ListEngineVersionsWithContext(ctx aws.Context, input *ListEngin return out, req.Send() } +// ListEngineVersionsPages iterates over the pages of a ListEngineVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEngineVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEngineVersions operation. +// pageNum := 0 +// err := client.ListEngineVersionsPages(params, +// func(page *athena.ListEngineVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Athena) ListEngineVersionsPages(input *ListEngineVersionsInput, fn func(*ListEngineVersionsOutput, bool) bool) error { + return c.ListEngineVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEngineVersionsPagesWithContext same as ListEngineVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListEngineVersionsPagesWithContext(ctx aws.Context, input *ListEngineVersionsInput, fn func(*ListEngineVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEngineVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEngineVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListEngineVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListNamedQueries = "ListNamedQueries" // ListNamedQueriesRequest generates a "aws/request.Request" representing the @@ -6025,6 +6170,86 @@ func (s *GetQueryResultsOutput) SetUpdateCount(v int64) *GetQueryResultsOutput { return s } +type GetQueryRuntimeStatisticsInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the query execution. + // + // QueryExecutionId is a required field + QueryExecutionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryRuntimeStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryRuntimeStatisticsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetQueryRuntimeStatisticsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetQueryRuntimeStatisticsInput"} + if s.QueryExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) + } + if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *GetQueryRuntimeStatisticsInput) SetQueryExecutionId(v string) *GetQueryRuntimeStatisticsInput { + s.QueryExecutionId = &v + return s +} + +type GetQueryRuntimeStatisticsOutput struct { + _ struct{} `type:"structure"` + + // Runtime statistics about the query execution. + QueryRuntimeStatistics *QueryRuntimeStatistics `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryRuntimeStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryRuntimeStatisticsOutput) GoString() string { + return s.String() +} + +// SetQueryRuntimeStatistics sets the QueryRuntimeStatistics field's value. +func (s *GetQueryRuntimeStatisticsOutput) SetQueryRuntimeStatistics(v *QueryRuntimeStatistics) *GetQueryRuntimeStatisticsOutput { + s.QueryRuntimeStatistics = v + return s +} + type GetTableMetadataInput struct { _ struct{} `type:"structure"` @@ -7992,6 +8217,363 @@ func (s *QueryExecutionStatus) SetSubmissionDateTime(v time.Time) *QueryExecutio return s } +// The query execution timeline, statistics on input and output rows and bytes, +// and the different query stages that form the query execution plan. +type QueryRuntimeStatistics struct { + _ struct{} `type:"structure"` + + // Stage statistics such as input and output rows and bytes, execution time, + // and stage state. This information also includes substages and the query stage + // plan. + OutputStage *QueryStage `type:"structure"` + + // Statistics such as input rows and bytes read by the query, rows and bytes + // output by the query, and the number of rows written by the query. + Rows *QueryRuntimeStatisticsRows `type:"structure"` + + // Timeline statistics such as query queue time, planning time, execution time, + // service processing time, and total execution time. + Timeline *QueryRuntimeStatisticsTimeline `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryRuntimeStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryRuntimeStatistics) GoString() string { + return s.String() +} + +// SetOutputStage sets the OutputStage field's value. +func (s *QueryRuntimeStatistics) SetOutputStage(v *QueryStage) *QueryRuntimeStatistics { + s.OutputStage = v + return s +} + +// SetRows sets the Rows field's value. +func (s *QueryRuntimeStatistics) SetRows(v *QueryRuntimeStatisticsRows) *QueryRuntimeStatistics { + s.Rows = v + return s +} + +// SetTimeline sets the Timeline field's value. +func (s *QueryRuntimeStatistics) SetTimeline(v *QueryRuntimeStatisticsTimeline) *QueryRuntimeStatistics { + s.Timeline = v + return s +} + +// Statistics such as input rows and bytes read by the query, rows and bytes +// output by the query, and the number of rows written by the query. +type QueryRuntimeStatisticsRows struct { + _ struct{} `type:"structure"` + + // The number of bytes read to execute the query. + InputBytes *int64 `type:"long"` + + // The number of rows read to execute the query. + InputRows *int64 `type:"long"` + + // The number of bytes returned by the query. + OutputBytes *int64 `type:"long"` + + // The number of rows returned by the query. + OutputRows *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryRuntimeStatisticsRows) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryRuntimeStatisticsRows) GoString() string { + return s.String() +} + +// SetInputBytes sets the InputBytes field's value. +func (s *QueryRuntimeStatisticsRows) SetInputBytes(v int64) *QueryRuntimeStatisticsRows { + s.InputBytes = &v + return s +} + +// SetInputRows sets the InputRows field's value. +func (s *QueryRuntimeStatisticsRows) SetInputRows(v int64) *QueryRuntimeStatisticsRows { + s.InputRows = &v + return s +} + +// SetOutputBytes sets the OutputBytes field's value. +func (s *QueryRuntimeStatisticsRows) SetOutputBytes(v int64) *QueryRuntimeStatisticsRows { + s.OutputBytes = &v + return s +} + +// SetOutputRows sets the OutputRows field's value. +func (s *QueryRuntimeStatisticsRows) SetOutputRows(v int64) *QueryRuntimeStatisticsRows { + s.OutputRows = &v + return s +} + +// Timeline statistics such as query queue time, planning time, execution time, +// service processing time, and total execution time. +type QueryRuntimeStatisticsTimeline struct { + _ struct{} `type:"structure"` + + // The number of milliseconds that the query took to execute. + EngineExecutionTimeInMillis *int64 `type:"long"` + + // The number of milliseconds that Athena took to plan the query processing + // flow. This includes the time spent retrieving table partitions from the data + // source. Note that because the query engine performs the query planning, query + // planning time is a subset of engine processing time. + QueryPlanningTimeInMillis *int64 `type:"long"` + + // The number of milliseconds that the query was in your query queue waiting + // for resources. Note that if transient errors occur, Athena might automatically + // add the query back to the queue. + QueryQueueTimeInMillis *int64 `type:"long"` + + // The number of milliseconds that Athena took to finalize and publish the query + // results after the query engine finished running the query. + ServiceProcessingTimeInMillis *int64 `type:"long"` + + // The number of milliseconds that Athena took to run the query. + TotalExecutionTimeInMillis *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryRuntimeStatisticsTimeline) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryRuntimeStatisticsTimeline) GoString() string { + return s.String() +} + +// SetEngineExecutionTimeInMillis sets the EngineExecutionTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetEngineExecutionTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.EngineExecutionTimeInMillis = &v + return s +} + +// SetQueryPlanningTimeInMillis sets the QueryPlanningTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetQueryPlanningTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.QueryPlanningTimeInMillis = &v + return s +} + +// SetQueryQueueTimeInMillis sets the QueryQueueTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetQueryQueueTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.QueryQueueTimeInMillis = &v + return s +} + +// SetServiceProcessingTimeInMillis sets the ServiceProcessingTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetServiceProcessingTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.ServiceProcessingTimeInMillis = &v + return s +} + +// SetTotalExecutionTimeInMillis sets the TotalExecutionTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetTotalExecutionTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.TotalExecutionTimeInMillis = &v + return s +} + +// Stage statistics such as input and output rows and bytes, execution time +// and stage state. This information also includes substages and the query stage +// plan. +type QueryStage struct { + _ struct{} `type:"structure"` + + // Time taken to execute this stage. + ExecutionTime *int64 `type:"long"` + + // The number of bytes input into the stage for execution. + InputBytes *int64 `type:"long"` + + // The number of rows input into the stage for execution. + InputRows *int64 `type:"long"` + + // The number of bytes output from the stage after execution. + OutputBytes *int64 `type:"long"` + + // The number of rows output from the stage after execution. + OutputRows *int64 `type:"long"` + + // Stage plan information such as name, identifier, sub plans, and source stages. + QueryStagePlan *QueryStagePlanNode `type:"structure"` + + // The identifier for a stage. + StageId *int64 `type:"long"` + + // State of the stage after query execution. + State *string `type:"string"` + + // List of sub query stages that form this stage execution plan. + SubStages []*QueryStage `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryStage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryStage) GoString() string { + return s.String() +} + +// SetExecutionTime sets the ExecutionTime field's value. +func (s *QueryStage) SetExecutionTime(v int64) *QueryStage { + s.ExecutionTime = &v + return s +} + +// SetInputBytes sets the InputBytes field's value. +func (s *QueryStage) SetInputBytes(v int64) *QueryStage { + s.InputBytes = &v + return s +} + +// SetInputRows sets the InputRows field's value. +func (s *QueryStage) SetInputRows(v int64) *QueryStage { + s.InputRows = &v + return s +} + +// SetOutputBytes sets the OutputBytes field's value. +func (s *QueryStage) SetOutputBytes(v int64) *QueryStage { + s.OutputBytes = &v + return s +} + +// SetOutputRows sets the OutputRows field's value. +func (s *QueryStage) SetOutputRows(v int64) *QueryStage { + s.OutputRows = &v + return s +} + +// SetQueryStagePlan sets the QueryStagePlan field's value. +func (s *QueryStage) SetQueryStagePlan(v *QueryStagePlanNode) *QueryStage { + s.QueryStagePlan = v + return s +} + +// SetStageId sets the StageId field's value. +func (s *QueryStage) SetStageId(v int64) *QueryStage { + s.StageId = &v + return s +} + +// SetState sets the State field's value. +func (s *QueryStage) SetState(v string) *QueryStage { + s.State = &v + return s +} + +// SetSubStages sets the SubStages field's value. +func (s *QueryStage) SetSubStages(v []*QueryStage) *QueryStage { + s.SubStages = v + return s +} + +// Stage plan information such as name, identifier, sub plans, and remote sources. +type QueryStagePlanNode struct { + _ struct{} `type:"structure"` + + // Stage plan information such as name, identifier, sub plans, and remote sources + // of child plan nodes/ + Children []*QueryStagePlanNode `type:"list"` + + // Information about the operation this query stage plan node is performing. + Identifier *string `type:"string"` + + // Name of the query stage plan that describes the operation this stage is performing + // as part of query execution. + Name *string `type:"string"` + + // Source plan node IDs. + RemoteSources []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryStagePlanNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryStagePlanNode) GoString() string { + return s.String() +} + +// SetChildren sets the Children field's value. +func (s *QueryStagePlanNode) SetChildren(v []*QueryStagePlanNode) *QueryStagePlanNode { + s.Children = v + return s +} + +// SetIdentifier sets the Identifier field's value. +func (s *QueryStagePlanNode) SetIdentifier(v string) *QueryStagePlanNode { + s.Identifier = &v + return s +} + +// SetName sets the Name field's value. +func (s *QueryStagePlanNode) SetName(v string) *QueryStagePlanNode { + s.Name = &v + return s +} + +// SetRemoteSources sets the RemoteSources field's value. +func (s *QueryStagePlanNode) SetRemoteSources(v []*string) *QueryStagePlanNode { + s.RemoteSources = v + return s +} + // A resource, such as a workgroup, was not found. type ResourceNotFoundException struct { _ struct{} `type:"structure"` diff --git a/service/athena/athenaiface/interface.go b/service/athena/athenaiface/interface.go index 1b8fa5d66a5..9288205eba3 100644 --- a/service/athena/athenaiface/interface.go +++ b/service/athena/athenaiface/interface.go @@ -131,6 +131,10 @@ type AthenaAPI interface { GetQueryResultsPages(*athena.GetQueryResultsInput, func(*athena.GetQueryResultsOutput, bool) bool) error GetQueryResultsPagesWithContext(aws.Context, *athena.GetQueryResultsInput, func(*athena.GetQueryResultsOutput, bool) bool, ...request.Option) error + GetQueryRuntimeStatistics(*athena.GetQueryRuntimeStatisticsInput) (*athena.GetQueryRuntimeStatisticsOutput, error) + GetQueryRuntimeStatisticsWithContext(aws.Context, *athena.GetQueryRuntimeStatisticsInput, ...request.Option) (*athena.GetQueryRuntimeStatisticsOutput, error) + GetQueryRuntimeStatisticsRequest(*athena.GetQueryRuntimeStatisticsInput) (*request.Request, *athena.GetQueryRuntimeStatisticsOutput) + GetTableMetadata(*athena.GetTableMetadataInput) (*athena.GetTableMetadataOutput, error) GetTableMetadataWithContext(aws.Context, *athena.GetTableMetadataInput, ...request.Option) (*athena.GetTableMetadataOutput, error) GetTableMetadataRequest(*athena.GetTableMetadataInput) (*request.Request, *athena.GetTableMetadataOutput) @@ -157,6 +161,9 @@ type AthenaAPI interface { ListEngineVersionsWithContext(aws.Context, *athena.ListEngineVersionsInput, ...request.Option) (*athena.ListEngineVersionsOutput, error) ListEngineVersionsRequest(*athena.ListEngineVersionsInput) (*request.Request, *athena.ListEngineVersionsOutput) + ListEngineVersionsPages(*athena.ListEngineVersionsInput, func(*athena.ListEngineVersionsOutput, bool) bool) error + ListEngineVersionsPagesWithContext(aws.Context, *athena.ListEngineVersionsInput, func(*athena.ListEngineVersionsOutput, bool) bool, ...request.Option) error + ListNamedQueries(*athena.ListNamedQueriesInput) (*athena.ListNamedQueriesOutput, error) ListNamedQueriesWithContext(aws.Context, *athena.ListNamedQueriesInput, ...request.Option) (*athena.ListNamedQueriesOutput, error) ListNamedQueriesRequest(*athena.ListNamedQueriesInput) (*request.Request, *athena.ListNamedQueriesOutput) diff --git a/service/cloudwatch/api.go b/service/cloudwatch/api.go index 8727027e59c..34f52a2fe5b 100644 --- a/service/cloudwatch/api.go +++ b/service/cloudwatch/api.go @@ -4163,6 +4163,40 @@ type CompositeAlarm struct { // state. ActionsEnabled *bool `type:"boolean"` + // When the value is ALARM, it means that the actions are suppressed because + // the suppressor alarm is in ALARM When the value is WaitPeriod, it means that + // the actions are suppressed because the composite alarm is waiting for the + // suppressor alarm to go into into the ALARM state. The maximum waiting time + // is as specified in ActionsSuppressorWaitPeriod. After this time, the composite + // alarm performs its actions. When the value is ExtensionPeriod, it means that + // the actions are suppressed because the composite alarm is waiting after the + // suppressor alarm went out of the ALARM state. The maximum waiting time is + // as specified in ActionsSuppressorExtensionPeriod. After this time, the composite + // alarm performs its actions. + ActionsSuppressedBy *string `type:"string" enum:"ActionsSuppressedBy"` + + // Captures the reason for action suppression. + ActionsSuppressedReason *string `type:"string"` + + // Actions will be suppressed if the suppressor alarm is in the ALARM state. + // ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from + // an existing alarm. + ActionsSuppressor *string `min:"1" type:"string"` + + // The maximum time in seconds that the composite alarm waits after suppressor + // alarm goes out of the ALARM state. After this time, the composite alarm performs + // its actions. + // + // ExtensionPeriod is required only when ActionsSuppressor is specified. + ActionsSuppressorExtensionPeriod *int64 `type:"integer"` + + // The maximum time in seconds that the composite alarm waits for the suppressor + // alarm to go into the ALARM state. After this time, the composite alarm performs + // its actions. + // + // WaitPeriod is required only when ActionsSuppressor is specified. + ActionsSuppressorWaitPeriod *int64 `type:"integer"` + // The actions to execute when this alarm transitions to the ALARM state from // any other state. Each action is specified as an Amazon Resource Name (ARN). AlarmActions []*string `type:"list"` @@ -4197,7 +4231,10 @@ type CompositeAlarm struct { // An explanation for the alarm state, in JSON format. StateReasonData *string `type:"string"` - // The time stamp of the last update to the alarm state. + // The timestamp of the last change to the alarm's StateValue. + StateTransitionedTimestamp *time.Time `type:"timestamp"` + + // Tracks the timestamp of any state update, even if StateValue doesn't change. StateUpdatedTimestamp *time.Time `type:"timestamp"` // The state value for the alarm. @@ -4228,6 +4265,36 @@ func (s *CompositeAlarm) SetActionsEnabled(v bool) *CompositeAlarm { return s } +// SetActionsSuppressedBy sets the ActionsSuppressedBy field's value. +func (s *CompositeAlarm) SetActionsSuppressedBy(v string) *CompositeAlarm { + s.ActionsSuppressedBy = &v + return s +} + +// SetActionsSuppressedReason sets the ActionsSuppressedReason field's value. +func (s *CompositeAlarm) SetActionsSuppressedReason(v string) *CompositeAlarm { + s.ActionsSuppressedReason = &v + return s +} + +// SetActionsSuppressor sets the ActionsSuppressor field's value. +func (s *CompositeAlarm) SetActionsSuppressor(v string) *CompositeAlarm { + s.ActionsSuppressor = &v + return s +} + +// SetActionsSuppressorExtensionPeriod sets the ActionsSuppressorExtensionPeriod field's value. +func (s *CompositeAlarm) SetActionsSuppressorExtensionPeriod(v int64) *CompositeAlarm { + s.ActionsSuppressorExtensionPeriod = &v + return s +} + +// SetActionsSuppressorWaitPeriod sets the ActionsSuppressorWaitPeriod field's value. +func (s *CompositeAlarm) SetActionsSuppressorWaitPeriod(v int64) *CompositeAlarm { + s.ActionsSuppressorWaitPeriod = &v + return s +} + // SetAlarmActions sets the AlarmActions field's value. func (s *CompositeAlarm) SetAlarmActions(v []*string) *CompositeAlarm { s.AlarmActions = v @@ -4288,6 +4355,12 @@ func (s *CompositeAlarm) SetStateReasonData(v string) *CompositeAlarm { return s } +// SetStateTransitionedTimestamp sets the StateTransitionedTimestamp field's value. +func (s *CompositeAlarm) SetStateTransitionedTimestamp(v time.Time) *CompositeAlarm { + s.StateTransitionedTimestamp = &v + return s +} + // SetStateUpdatedTimestamp sets the StateUpdatedTimestamp field's value. func (s *CompositeAlarm) SetStateUpdatedTimestamp(v time.Time) *CompositeAlarm { s.StateUpdatedTimestamp = &v @@ -9654,6 +9727,25 @@ type PutCompositeAlarmInput struct { // state of the composite alarm. The default is TRUE. ActionsEnabled *bool `type:"boolean"` + // Actions will be suppressed if the suppressor alarm is in the ALARM state. + // ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from + // an existing alarm. + ActionsSuppressor *string `min:"1" type:"string"` + + // The maximum time in seconds that the composite alarm waits after suppressor + // alarm goes out of the ALARM state. After this time, the composite alarm performs + // its actions. + // + // ExtensionPeriod is required only when ActionsSuppressor is specified. + ActionsSuppressorExtensionPeriod *int64 `type:"integer"` + + // The maximum time in seconds that the composite alarm waits for the suppressor + // alarm to go into the ALARM state. After this time, the composite alarm performs + // its actions. + // + // WaitPeriod is required only when ActionsSuppressor is specified. + ActionsSuppressorWaitPeriod *int64 `type:"integer"` + // The actions to execute when this alarm transitions to the ALARM state from // any other state. Each action is specified as an Amazon Resource Name (ARN). // @@ -9766,6 +9858,9 @@ func (s PutCompositeAlarmInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutCompositeAlarmInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutCompositeAlarmInput"} + if s.ActionsSuppressor != nil && len(*s.ActionsSuppressor) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionsSuppressor", 1)) + } if s.AlarmName == nil { invalidParams.Add(request.NewErrParamRequired("AlarmName")) } @@ -9801,6 +9896,24 @@ func (s *PutCompositeAlarmInput) SetActionsEnabled(v bool) *PutCompositeAlarmInp return s } +// SetActionsSuppressor sets the ActionsSuppressor field's value. +func (s *PutCompositeAlarmInput) SetActionsSuppressor(v string) *PutCompositeAlarmInput { + s.ActionsSuppressor = &v + return s +} + +// SetActionsSuppressorExtensionPeriod sets the ActionsSuppressorExtensionPeriod field's value. +func (s *PutCompositeAlarmInput) SetActionsSuppressorExtensionPeriod(v int64) *PutCompositeAlarmInput { + s.ActionsSuppressorExtensionPeriod = &v + return s +} + +// SetActionsSuppressorWaitPeriod sets the ActionsSuppressorWaitPeriod field's value. +func (s *PutCompositeAlarmInput) SetActionsSuppressorWaitPeriod(v int64) *PutCompositeAlarmInput { + s.ActionsSuppressorWaitPeriod = &v + return s +} + // SetAlarmActions sets the AlarmActions field's value. func (s *PutCompositeAlarmInput) SetAlarmActions(v []*string) *PutCompositeAlarmInput { s.AlarmActions = v @@ -11677,6 +11790,26 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +const ( + // ActionsSuppressedByWaitPeriod is a ActionsSuppressedBy enum value + ActionsSuppressedByWaitPeriod = "WaitPeriod" + + // ActionsSuppressedByExtensionPeriod is a ActionsSuppressedBy enum value + ActionsSuppressedByExtensionPeriod = "ExtensionPeriod" + + // ActionsSuppressedByAlarm is a ActionsSuppressedBy enum value + ActionsSuppressedByAlarm = "Alarm" +) + +// ActionsSuppressedBy_Values returns all elements of the ActionsSuppressedBy enum +func ActionsSuppressedBy_Values() []string { + return []string{ + ActionsSuppressedByWaitPeriod, + ActionsSuppressedByExtensionPeriod, + ActionsSuppressedByAlarm, + } +} + const ( // AlarmTypeCompositeAlarm is a AlarmType enum value AlarmTypeCompositeAlarm = "CompositeAlarm" diff --git a/service/databasemigrationservice/api.go b/service/databasemigrationservice/api.go index ce8dae7bbbe..cd9b91c290d 100644 --- a/service/databasemigrationservice/api.go +++ b/service/databasemigrationservice/api.go @@ -8102,8 +8102,9 @@ type CreateEndpointInput struct { // The type of engine for the endpoint. Valid values, depending on the EndpointType // value, include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", - // "opensearch", "redshift", "s3", "db2", "azuredb", "sybase", "dynamodb", "mongodb", - // "kinesis", "kafka", "elasticsearch", "docdb", "sqlserver", and "neptune". + // "opensearch", "redshift", "s3", "db2", db2-zos, "azuredb", "sybase", "dynamodb", + // "mongodb", "kinesis", "kafka", "elasticsearch", "docdb", "sqlserver", "neptune", + // and babelfish. // // EngineName is a required field EngineName *string `type:"string" required:"true"` @@ -24317,8 +24318,7 @@ type TableStatistics struct { // The state of the tables described. // // Valid states: Table does not exist | Before load | Full load | Table completed - // | Table cancelled | Table error | Table all | Table updates | Table is being - // reloaded + // | Table cancelled | Table error | Table is being reloaded TableState *string `type:"string"` // The number of update actions performed on a table. diff --git a/service/docdb/api.go b/service/docdb/api.go index 843076c9c5d..f490ee3297c 100644 --- a/service/docdb/api.go +++ b/service/docdb/api.go @@ -7083,6 +7083,10 @@ type CreateDBInstanceInput struct { // Example: us-east-1d AvailabilityZone *string `type:"string"` + // A value that indicates whether to copy tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. + CopyTagsToSnapshot *bool `type:"boolean"` + // The identifier of the cluster that the instance will belong to. // // DBClusterIdentifier is a required field @@ -7210,6 +7214,12 @@ func (s *CreateDBInstanceInput) SetAvailabilityZone(v string) *CreateDBInstanceI return s } +// SetCopyTagsToSnapshot sets the CopyTagsToSnapshot field's value. +func (s *CreateDBInstanceInput) SetCopyTagsToSnapshot(v bool) *CreateDBInstanceInput { + s.CopyTagsToSnapshot = &v + return s +} + // SetDBClusterIdentifier sets the DBClusterIdentifier field's value. func (s *CreateDBInstanceInput) SetDBClusterIdentifier(v string) *CreateDBInstanceInput { s.DBClusterIdentifier = &v @@ -7734,6 +7744,9 @@ type DBCluster struct { // Specifies the number of days for which automatic snapshots are retained. BackupRetentionPeriod *int64 `type:"integer"` + // Identifies the clone group to which the DB cluster is associated. + CloneGroupId *string `type:"string"` + // Specifies the time when the cluster was created, in Universal Coordinated // Time (UTC). ClusterCreateTime *time.Time `type:"timestamp"` @@ -7881,6 +7894,12 @@ func (s *DBCluster) SetBackupRetentionPeriod(v int64) *DBCluster { return s } +// SetCloneGroupId sets the CloneGroupId field's value. +func (s *DBCluster) SetCloneGroupId(v string) *DBCluster { + s.CloneGroupId = &v + return s +} + // SetClusterCreateTime sets the ClusterCreateTime field's value. func (s *DBCluster) SetClusterCreateTime(v time.Time) *DBCluster { s.ClusterCreateTime = &v @@ -8619,6 +8638,10 @@ type DBInstance struct { // The identifier of the CA certificate for this DB instance. CACertificateIdentifier *string `type:"string"` + // A value that indicates whether to copy tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. + CopyTagsToSnapshot *bool `type:"boolean"` + // Contains the name of the cluster that the instance is a member of if the // instance is a member of a cluster. DBClusterIdentifier *string `type:"string"` @@ -8743,6 +8766,12 @@ func (s *DBInstance) SetCACertificateIdentifier(v string) *DBInstance { return s } +// SetCopyTagsToSnapshot sets the CopyTagsToSnapshot field's value. +func (s *DBInstance) SetCopyTagsToSnapshot(v bool) *DBInstance { + s.CopyTagsToSnapshot = &v + return s +} + // SetDBClusterIdentifier sets the DBClusterIdentifier field's value. func (s *DBInstance) SetDBClusterIdentifier(v string) *DBInstance { s.DBClusterIdentifier = &v @@ -13204,6 +13233,10 @@ type ModifyDBInstanceInput struct { // Indicates the certificate that needs to be associated with the instance. CACertificateIdentifier *string `type:"string"` + // A value that indicates whether to copy all tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. + CopyTagsToSnapshot *bool `type:"boolean"` + // The new compute and memory capacity of the instance; for example, db.r5.large. // Not all instance classes are available in all Amazon Web Services Regions. // @@ -13332,6 +13365,12 @@ func (s *ModifyDBInstanceInput) SetCACertificateIdentifier(v string) *ModifyDBIn return s } +// SetCopyTagsToSnapshot sets the CopyTagsToSnapshot field's value. +func (s *ModifyDBInstanceInput) SetCopyTagsToSnapshot(v bool) *ModifyDBInstanceInput { + s.CopyTagsToSnapshot = &v + return s +} + // SetDBInstanceClass sets the DBInstanceClass field's value. func (s *ModifyDBInstanceInput) SetDBInstanceClass(v string) *ModifyDBInstanceInput { s.DBInstanceClass = &v @@ -15069,6 +15108,22 @@ type RestoreDBClusterToPointInTimeInput struct { // Example: 2015-03-07T23:45:00Z RestoreToTime *time.Time `type:"timestamp"` + // The type of restore to be performed. You can specify one of the following + // values: + // + // * full-copy - The new DB cluster is restored as a full copy of the source + // DB cluster. + // + // * copy-on-write - The new DB cluster is restored as a clone of the source + // DB cluster. + // + // Constraints: You can't specify copy-on-write if the engine version of the + // source DB cluster is earlier than 1.11. + // + // If you don't specify a RestoreType value, then the new DB cluster is restored + // as a full copy of the source DB cluster. + RestoreType *string `type:"string"` + // The identifier of the source cluster from which to restore. // // Constraints: @@ -15169,6 +15224,12 @@ func (s *RestoreDBClusterToPointInTimeInput) SetRestoreToTime(v time.Time) *Rest return s } +// SetRestoreType sets the RestoreType field's value. +func (s *RestoreDBClusterToPointInTimeInput) SetRestoreType(v string) *RestoreDBClusterToPointInTimeInput { + s.RestoreType = &v + return s +} + // SetSourceDBClusterIdentifier sets the SourceDBClusterIdentifier field's value. func (s *RestoreDBClusterToPointInTimeInput) SetSourceDBClusterIdentifier(v string) *RestoreDBClusterToPointInTimeInput { s.SourceDBClusterIdentifier = &v diff --git a/service/ec2instanceconnect/api.go b/service/ec2instanceconnect/api.go index 764de612f78..0b729c0524f 100644 --- a/service/ec2instanceconnect/api.go +++ b/service/ec2instanceconnect/api.go @@ -92,6 +92,9 @@ func (c *EC2InstanceConnect) SendSSHPublicKeyRequest(input *SendSSHPublicKeyInpu // to a stopped or terminated instance is not supported. If the instance is // stopped, start your instance, and try to connect again. // +// * EC2InstanceUnavailableException +// The instance is currently unavailable. Wait a few minutes and try again. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-instance-connect-2018-04-02/SendSSHPublicKey func (c *EC2InstanceConnect) SendSSHPublicKey(input *SendSSHPublicKeyInput) (*SendSSHPublicKeyOutput, error) { req, out := c.SendSSHPublicKeyRequest(input) @@ -212,6 +215,9 @@ func (c *EC2InstanceConnect) SendSerialConsoleSSHPublicKeyRequest(input *SendSer // to a stopped or terminated instance is not supported. If the instance is // stopped, start your instance, and try to connect again. // +// * EC2InstanceUnavailableException +// The instance is currently unavailable. Wait a few minutes and try again. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-instance-connect-2018-04-02/SendSerialConsoleSSHPublicKey func (c *EC2InstanceConnect) SendSerialConsoleSSHPublicKey(input *SendSerialConsoleSSHPublicKeyInput) (*SendSerialConsoleSSHPublicKeyOutput, error) { req, out := c.SendSerialConsoleSSHPublicKeyRequest(input) @@ -494,6 +500,70 @@ func (s *EC2InstanceTypeInvalidException) RequestID() string { return s.RespMetadata.RequestID } +// The instance is currently unavailable. Wait a few minutes and try again. +type EC2InstanceUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EC2InstanceUnavailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EC2InstanceUnavailableException) GoString() string { + return s.String() +} + +func newErrorEC2InstanceUnavailableException(v protocol.ResponseMetadata) error { + return &EC2InstanceUnavailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *EC2InstanceUnavailableException) Code() string { + return "EC2InstanceUnavailableException" +} + +// Message returns the exception's message. +func (s *EC2InstanceUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *EC2InstanceUnavailableException) OrigErr() error { + return nil +} + +func (s *EC2InstanceUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *EC2InstanceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *EC2InstanceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + // One of the parameters is not valid. type InvalidArgsException struct { _ struct{} `type:"structure"` diff --git a/service/ec2instanceconnect/errors.go b/service/ec2instanceconnect/errors.go index 975b63cb08e..b08b35aff89 100644 --- a/service/ec2instanceconnect/errors.go +++ b/service/ec2instanceconnect/errors.go @@ -36,6 +36,12 @@ const ( // Only Nitro instance types are currently supported. ErrCodeEC2InstanceTypeInvalidException = "EC2InstanceTypeInvalidException" + // ErrCodeEC2InstanceUnavailableException for service response error code + // "EC2InstanceUnavailableException". + // + // The instance is currently unavailable. Wait a few minutes and try again. + ErrCodeEC2InstanceUnavailableException = "EC2InstanceUnavailableException" + // ErrCodeInvalidArgsException for service response error code // "InvalidArgsException". // @@ -85,6 +91,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "EC2InstanceNotFoundException": newErrorEC2InstanceNotFoundException, "EC2InstanceStateInvalidException": newErrorEC2InstanceStateInvalidException, "EC2InstanceTypeInvalidException": newErrorEC2InstanceTypeInvalidException, + "EC2InstanceUnavailableException": newErrorEC2InstanceUnavailableException, "InvalidArgsException": newErrorInvalidArgsException, "SerialConsoleAccessDisabledException": newErrorSerialConsoleAccessDisabledException, "SerialConsoleSessionLimitExceededException": newErrorSerialConsoleSessionLimitExceededException, diff --git a/service/ec2instanceconnect/examples_test.go b/service/ec2instanceconnect/examples_test.go index 47bea01c04b..3ff485f7c04 100644 --- a/service/ec2instanceconnect/examples_test.go +++ b/service/ec2instanceconnect/examples_test.go @@ -54,6 +54,8 @@ func ExampleEC2InstanceConnect_SendSSHPublicKey_shared00() { fmt.Println(ec2instanceconnect.ErrCodeEC2InstanceNotFoundException, aerr.Error()) case ec2instanceconnect.ErrCodeEC2InstanceStateInvalidException: fmt.Println(ec2instanceconnect.ErrCodeEC2InstanceStateInvalidException, aerr.Error()) + case ec2instanceconnect.ErrCodeEC2InstanceUnavailableException: + fmt.Println(ec2instanceconnect.ErrCodeEC2InstanceUnavailableException, aerr.Error()) default: fmt.Println(aerr.Error()) } diff --git a/service/frauddetector/api.go b/service/frauddetector/api.go index b6669bd824f..2324907956e 100644 --- a/service/frauddetector/api.go +++ b/service/frauddetector/api.go @@ -7371,6 +7371,155 @@ func (c *FraudDetector) UpdateVariableWithContext(ctx aws.Context, input *Update return out, req.Send() } +// The Account Takeover Insights (ATI) model performance metrics data points. +type ATIMetricDataPoint struct { + _ struct{} `type:"structure"` + + // The anomaly discovery rate. This metric quantifies the percentage of anomalies + // that can be detected by the model at the selected score threshold. A lower + // score threshold increases the percentage of anomalies captured by the model, + // but would also require challenging a larger percentage of login events, leading + // to a higher customer friction. + Adr *float64 `locationName:"adr" type:"float"` + + // The account takeover discovery rate. This metric quantifies the percentage + // of account compromise events that can be detected by the model at the selected + // score threshold. This metric is only available if 50 or more entities with + // at-least one labeled account takeover event is present in the ingested dataset. + Atodr *float64 `locationName:"atodr" type:"float"` + + // The challenge rate. This indicates the percentage of login events that the + // model recommends to challenge such as one-time password, multi-factor authentication, + // and investigations. + Cr *float64 `locationName:"cr" type:"float"` + + // The model's threshold that specifies an acceptable fraud capture rate. For + // example, a threshold of 500 means any model score 500 or above is labeled + // as fraud. + Threshold *float64 `locationName:"threshold" type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ATIMetricDataPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ATIMetricDataPoint) GoString() string { + return s.String() +} + +// SetAdr sets the Adr field's value. +func (s *ATIMetricDataPoint) SetAdr(v float64) *ATIMetricDataPoint { + s.Adr = &v + return s +} + +// SetAtodr sets the Atodr field's value. +func (s *ATIMetricDataPoint) SetAtodr(v float64) *ATIMetricDataPoint { + s.Atodr = &v + return s +} + +// SetCr sets the Cr field's value. +func (s *ATIMetricDataPoint) SetCr(v float64) *ATIMetricDataPoint { + s.Cr = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *ATIMetricDataPoint) SetThreshold(v float64) *ATIMetricDataPoint { + s.Threshold = &v + return s +} + +// The Account Takeover Insights (ATI) model performance score. +type ATIModelPerformance struct { + _ struct{} `type:"structure"` + + // The anomaly separation index (ASI) score. This metric summarizes the overall + // ability of the model to separate anomalous activities from the normal behavior. + // Depending on the business, a large fraction of these anomalous activities + // can be malicious and correspond to the account takeover attacks. A model + // with no separability power will have the lowest possible ASI score of 0.5, + // whereas the a model with a high separability power will have the highest + // possible ASI score of 1.0 + Asi *float64 `locationName:"asi" type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ATIModelPerformance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ATIModelPerformance) GoString() string { + return s.String() +} + +// SetAsi sets the Asi field's value. +func (s *ATIModelPerformance) SetAsi(v float64) *ATIModelPerformance { + s.Asi = &v + return s +} + +// The Account Takeover Insights (ATI) model training metric details. +type ATITrainingMetricsValue struct { + _ struct{} `type:"structure"` + + // The model's performance metrics data points. + MetricDataPoints []*ATIMetricDataPoint `locationName:"metricDataPoints" type:"list"` + + // The model's overall performance scores. + ModelPerformance *ATIModelPerformance `locationName:"modelPerformance" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ATITrainingMetricsValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ATITrainingMetricsValue) GoString() string { + return s.String() +} + +// SetMetricDataPoints sets the MetricDataPoints field's value. +func (s *ATITrainingMetricsValue) SetMetricDataPoints(v []*ATIMetricDataPoint) *ATITrainingMetricsValue { + s.MetricDataPoints = v + return s +} + +// SetModelPerformance sets the ModelPerformance field's value. +func (s *ATITrainingMetricsValue) SetModelPerformance(v *ATIModelPerformance) *ATITrainingMetricsValue { + s.ModelPerformance = v + return s +} + // An exception indicating Amazon Fraud Detector does not have the needed permissions. // This can occur if you submit a request, such as PutExternalModel, that specifies // a role that is not in your account. @@ -7437,6 +7586,160 @@ func (s *AccessDeniedException) RequestID() string { return s.RespMetadata.RequestID } +// The log odds metric details. +// +// Account Takeover Insights (ATI) model uses event variables from the login +// data you provide to continuously calculate a set of variables (aggregated +// variables) based on historical events. For example, your ATI model might +// calculate the number of times an user has logged in using the same IP address. +// In this case, event variables used to derive the aggregated variables are +// IP address and user. +type AggregatedLogOddsMetric struct { + _ struct{} `type:"structure"` + + // The relative importance of the variables in the list to the other event variable. + // + // AggregatedVariablesImportance is a required field + AggregatedVariablesImportance *float64 `locationName:"aggregatedVariablesImportance" type:"float" required:"true"` + + // The names of all the variables. + // + // VariableNames is a required field + VariableNames []*string `locationName:"variableNames" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AggregatedLogOddsMetric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AggregatedLogOddsMetric) GoString() string { + return s.String() +} + +// SetAggregatedVariablesImportance sets the AggregatedVariablesImportance field's value. +func (s *AggregatedLogOddsMetric) SetAggregatedVariablesImportance(v float64) *AggregatedLogOddsMetric { + s.AggregatedVariablesImportance = &v + return s +} + +// SetVariableNames sets the VariableNames field's value. +func (s *AggregatedLogOddsMetric) SetVariableNames(v []*string) *AggregatedLogOddsMetric { + s.VariableNames = v + return s +} + +// The details of the impact of aggregated variables on the prediction score. +// +// Account Takeover Insights (ATI) model uses the login data you provide to +// continuously calculate a set of variables (aggregated variables) based on +// historical events. For example, the model might calculate the number of times +// an user has logged in using the same IP address. In this case, event variables +// used to derive the aggregated variables are IP address and user. +type AggregatedVariablesImpactExplanation struct { + _ struct{} `type:"structure"` + + // The names of all the event variables that were used to derive the aggregated + // variables. + EventVariableNames []*string `locationName:"eventVariableNames" type:"list"` + + // The raw, uninterpreted value represented as log-odds of the fraud. These + // values are usually between -10 to +10, but range from -infinity to +infinity. + // + // * A positive value indicates that the variables drove the risk score up. + // + // * A negative value indicates that the variables drove the risk score down. + LogOddsImpact *float64 `locationName:"logOddsImpact" type:"float"` + + // The relative impact of the aggregated variables in terms of magnitude on + // the prediction scores. + RelativeImpact *string `locationName:"relativeImpact" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AggregatedVariablesImpactExplanation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AggregatedVariablesImpactExplanation) GoString() string { + return s.String() +} + +// SetEventVariableNames sets the EventVariableNames field's value. +func (s *AggregatedVariablesImpactExplanation) SetEventVariableNames(v []*string) *AggregatedVariablesImpactExplanation { + s.EventVariableNames = v + return s +} + +// SetLogOddsImpact sets the LogOddsImpact field's value. +func (s *AggregatedVariablesImpactExplanation) SetLogOddsImpact(v float64) *AggregatedVariablesImpactExplanation { + s.LogOddsImpact = &v + return s +} + +// SetRelativeImpact sets the RelativeImpact field's value. +func (s *AggregatedVariablesImpactExplanation) SetRelativeImpact(v string) *AggregatedVariablesImpactExplanation { + s.RelativeImpact = &v + return s +} + +// The details of the relative importance of the aggregated variables. +// +// Account Takeover Insights (ATI) model uses event variables from the login +// data you provide to continuously calculate a set of variables (aggregated +// variables) based on historical events. For example, your ATI model might +// calculate the number of times an user has logged in using the same IP address. +// In this case, event variables used to derive the aggregated variables are +// IP address and user. +type AggregatedVariablesImportanceMetrics struct { + _ struct{} `type:"structure"` + + // List of variables' metrics. + LogOddsMetrics []*AggregatedLogOddsMetric `locationName:"logOddsMetrics" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AggregatedVariablesImportanceMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AggregatedVariablesImportanceMetrics) GoString() string { + return s.String() +} + +// SetLogOddsMetrics sets the LogOddsMetrics field's value. +func (s *AggregatedVariablesImportanceMetrics) SetLogOddsMetrics(v []*AggregatedLogOddsMetric) *AggregatedVariablesImportanceMetrics { + s.LogOddsMetrics = v + return s +} + // Provides the error of the batch create variable API. type BatchCreateVariableError struct { _ struct{} `type:"structure"` @@ -9451,14 +9754,14 @@ func (s CreateVariableOutput) GoString() string { return s.String() } -// The model training validation messages. +// The model training data validation metrics. type DataValidationMetrics struct { _ struct{} `type:"structure"` // The field-specific model training validation messages. FieldLevelMessages []*FieldValidationMessage `locationName:"fieldLevelMessages" type:"list"` - // The file-specific model training validation messages. + // The file-specific model training data validation messages. FileLevelMessages []*FileValidationMessage `locationName:"fileLevelMessages" type:"list"` } @@ -14879,9 +15182,7 @@ type LabelSchema struct { // "LEGIT" => ["true"]} or {"FRAUD" => ["fraud", "abuse"], "LEGIT" => ["legit", // "safe"]}. The value part of the mapper is a list, because you may have multiple // label variants from your event type for a single Amazon Fraud Detector label. - // - // LabelMapper is a required field - LabelMapper map[string][]*string `locationName:"labelMapper" type:"map" required:"true"` + LabelMapper map[string][]*string `locationName:"labelMapper" type:"map"` // The action to take for unlabeled events. UnlabeledEventsTreatment *string `locationName:"unlabeledEventsTreatment" type:"string" enum:"UnlabeledEventsTreatment"` @@ -14905,19 +15206,6 @@ func (s LabelSchema) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LabelSchema) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LabelSchema"} - if s.LabelMapper == nil { - invalidParams.Add(request.NewErrParamRequired("LabelMapper")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetLabelMapper sets the LabelMapper field's value. func (s *LabelSchema) SetLabelMapper(v map[string][]*string) *LabelSchema { s.LabelMapper = v @@ -15810,6 +16098,10 @@ type ModelVersionDetail struct { // The training results. TrainingResult *TrainingResult `locationName:"trainingResult" type:"structure"` + + // The training result details. The details include the relative importance + // of the variables. + TrainingResultV2 *TrainingResultV2 `locationName:"trainingResultV2" type:"structure"` } // String returns the string representation. @@ -15902,6 +16194,12 @@ func (s *ModelVersionDetail) SetTrainingResult(v *TrainingResult) *ModelVersionD return s } +// SetTrainingResultV2 sets the TrainingResultV2 field's value. +func (s *ModelVersionDetail) SetTrainingResultV2(v *TrainingResultV2) *ModelVersionDetail { + s.TrainingResultV2 = v + return s +} + // The model version evalutions. type ModelVersionEvaluation struct { _ struct{} `type:"structure"` @@ -15952,6 +16250,144 @@ func (s *ModelVersionEvaluation) SetPredictionExplanations(v *PredictionExplanat return s } +// The Online Fraud Insights (OFI) model performance metrics data points. +type OFIMetricDataPoint struct { + _ struct{} `type:"structure"` + + // The false positive rate. This is the percentage of total legitimate events + // that are incorrectly predicted as fraud. + Fpr *float64 `locationName:"fpr" type:"float"` + + // The percentage of fraud events correctly predicted as fraudulent as compared + // to all events predicted as fraudulent. + Precision *float64 `locationName:"precision" type:"float"` + + // The model threshold that specifies an acceptable fraud capture rate. For + // example, a threshold of 500 means any model score 500 or above is labeled + // as fraud. + Threshold *float64 `locationName:"threshold" type:"float"` + + // The true positive rate. This is the percentage of total fraud the model detects. + // Also known as capture rate. + Tpr *float64 `locationName:"tpr" type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OFIMetricDataPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OFIMetricDataPoint) GoString() string { + return s.String() +} + +// SetFpr sets the Fpr field's value. +func (s *OFIMetricDataPoint) SetFpr(v float64) *OFIMetricDataPoint { + s.Fpr = &v + return s +} + +// SetPrecision sets the Precision field's value. +func (s *OFIMetricDataPoint) SetPrecision(v float64) *OFIMetricDataPoint { + s.Precision = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *OFIMetricDataPoint) SetThreshold(v float64) *OFIMetricDataPoint { + s.Threshold = &v + return s +} + +// SetTpr sets the Tpr field's value. +func (s *OFIMetricDataPoint) SetTpr(v float64) *OFIMetricDataPoint { + s.Tpr = &v + return s +} + +// The Online Fraud Insights (OFI) model performance score. +type OFIModelPerformance struct { + _ struct{} `type:"structure"` + + // The area under the curve (auc). This summarizes the total positive rate (tpr) + // and false positive rate (FPR) across all possible model score thresholds. + Auc *float64 `locationName:"auc" type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OFIModelPerformance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OFIModelPerformance) GoString() string { + return s.String() +} + +// SetAuc sets the Auc field's value. +func (s *OFIModelPerformance) SetAuc(v float64) *OFIModelPerformance { + s.Auc = &v + return s +} + +// The Online Fraud Insights (OFI) model training metric details. +type OFITrainingMetricsValue struct { + _ struct{} `type:"structure"` + + // The model's performance metrics data points. + MetricDataPoints []*OFIMetricDataPoint `locationName:"metricDataPoints" type:"list"` + + // The model's overall performance score. + ModelPerformance *OFIModelPerformance `locationName:"modelPerformance" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OFITrainingMetricsValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OFITrainingMetricsValue) GoString() string { + return s.String() +} + +// SetMetricDataPoints sets the MetricDataPoints field's value. +func (s *OFITrainingMetricsValue) SetMetricDataPoints(v []*OFIMetricDataPoint) *OFITrainingMetricsValue { + s.MetricDataPoints = v + return s +} + +// SetModelPerformance sets the ModelPerformance field's value. +func (s *OFITrainingMetricsValue) SetModelPerformance(v *OFIModelPerformance) *OFITrainingMetricsValue { + s.ModelPerformance = v + return s +} + // The outcome. type Outcome struct { _ struct{} `type:"structure"` @@ -16025,6 +16461,16 @@ func (s *Outcome) SetName(v string) *Outcome { type PredictionExplanations struct { _ struct{} `type:"structure"` + // The details of the aggregated variables impact on the prediction score. + // + // Account Takeover Insights (ATI) model uses event variables from the login + // data you provide to continuously calculate a set of variables (aggregated + // variables) based on historical events. For example, your ATI model might + // calculate the number of times an user has logged in using the same IP address. + // In this case, event variables used to derive the aggregated variables are + // IP address and user. + AggregatedVariablesImpactExplanations []*AggregatedVariablesImpactExplanation `locationName:"aggregatedVariablesImpactExplanations" type:"list"` + // The details of the event variable's impact on the prediction score. VariableImpactExplanations []*VariableImpactExplanation `locationName:"variableImpactExplanations" type:"list"` } @@ -16047,6 +16493,12 @@ func (s PredictionExplanations) GoString() string { return s.String() } +// SetAggregatedVariablesImpactExplanations sets the AggregatedVariablesImpactExplanations field's value. +func (s *PredictionExplanations) SetAggregatedVariablesImpactExplanations(v []*AggregatedVariablesImpactExplanation) *PredictionExplanations { + s.AggregatedVariablesImpactExplanations = v + return s +} + // SetVariableImpactExplanations sets the VariableImpactExplanations field's value. func (s *PredictionExplanations) SetVariableImpactExplanations(v []*VariableImpactExplanation) *PredictionExplanations { s.VariableImpactExplanations = v @@ -17484,6 +17936,145 @@ func (s SendEventOutput) GoString() string { return s.String() } +// The performance metrics data points for Transaction Fraud Insights (TFI) +// model. +type TFIMetricDataPoint struct { + _ struct{} `type:"structure"` + + // The false positive rate. This is the percentage of total legitimate events + // that are incorrectly predicted as fraud. + Fpr *float64 `locationName:"fpr" type:"float"` + + // The percentage of fraud events correctly predicted as fraudulent as compared + // to all events predicted as fraudulent. + Precision *float64 `locationName:"precision" type:"float"` + + // The model threshold that specifies an acceptable fraud capture rate. For + // example, a threshold of 500 means any model score 500 or above is labeled + // as fraud. + Threshold *float64 `locationName:"threshold" type:"float"` + + // The true positive rate. This is the percentage of total fraud the model detects. + // Also known as capture rate. + Tpr *float64 `locationName:"tpr" type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TFIMetricDataPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TFIMetricDataPoint) GoString() string { + return s.String() +} + +// SetFpr sets the Fpr field's value. +func (s *TFIMetricDataPoint) SetFpr(v float64) *TFIMetricDataPoint { + s.Fpr = &v + return s +} + +// SetPrecision sets the Precision field's value. +func (s *TFIMetricDataPoint) SetPrecision(v float64) *TFIMetricDataPoint { + s.Precision = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *TFIMetricDataPoint) SetThreshold(v float64) *TFIMetricDataPoint { + s.Threshold = &v + return s +} + +// SetTpr sets the Tpr field's value. +func (s *TFIMetricDataPoint) SetTpr(v float64) *TFIMetricDataPoint { + s.Tpr = &v + return s +} + +// The Transaction Fraud Insights (TFI) model performance score. +type TFIModelPerformance struct { + _ struct{} `type:"structure"` + + // The area under the curve (auc). This summarizes the total positive rate (tpr) + // and false positive rate (FPR) across all possible model score thresholds. + Auc *float64 `locationName:"auc" type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TFIModelPerformance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TFIModelPerformance) GoString() string { + return s.String() +} + +// SetAuc sets the Auc field's value. +func (s *TFIModelPerformance) SetAuc(v float64) *TFIModelPerformance { + s.Auc = &v + return s +} + +// The Transaction Fraud Insights (TFI) model training metric details. +type TFITrainingMetricsValue struct { + _ struct{} `type:"structure"` + + // The model's performance metrics data points. + MetricDataPoints []*TFIMetricDataPoint `locationName:"metricDataPoints" type:"list"` + + // The model performance score. + ModelPerformance *TFIModelPerformance `locationName:"modelPerformance" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TFITrainingMetricsValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TFITrainingMetricsValue) GoString() string { + return s.String() +} + +// SetMetricDataPoints sets the MetricDataPoints field's value. +func (s *TFITrainingMetricsValue) SetMetricDataPoints(v []*TFIMetricDataPoint) *TFITrainingMetricsValue { + s.MetricDataPoints = v + return s +} + +// SetModelPerformance sets the ModelPerformance field's value. +func (s *TFITrainingMetricsValue) SetModelPerformance(v *TFIModelPerformance) *TFITrainingMetricsValue { + s.ModelPerformance = v + return s +} + // A key and value pair. type Tag struct { _ struct{} `type:"structure"` @@ -17712,9 +18303,7 @@ type TrainingDataSchema struct { _ struct{} `type:"structure"` // The label schema. - // - // LabelSchema is a required field - LabelSchema *LabelSchema `locationName:"labelSchema" type:"structure" required:"true"` + LabelSchema *LabelSchema `locationName:"labelSchema" type:"structure"` // The training data schema variables. // @@ -17743,17 +18332,9 @@ func (s TrainingDataSchema) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *TrainingDataSchema) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TrainingDataSchema"} - if s.LabelSchema == nil { - invalidParams.Add(request.NewErrParamRequired("LabelSchema")) - } if s.ModelVariables == nil { invalidParams.Add(request.NewErrParamRequired("ModelVariables")) } - if s.LabelSchema != nil { - if err := s.LabelSchema.Validate(); err != nil { - invalidParams.AddNested("LabelSchema", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -17817,6 +18398,56 @@ func (s *TrainingMetrics) SetMetricDataPoints(v []*MetricDataPoint) *TrainingMet return s } +// The training metrics details. +type TrainingMetricsV2 struct { + _ struct{} `type:"structure"` + + // The Account Takeover Insights (ATI) model training metric details. + Ati *ATITrainingMetricsValue `locationName:"ati" type:"structure"` + + // The Online Fraud Insights (OFI) model training metric details. + Ofi *OFITrainingMetricsValue `locationName:"ofi" type:"structure"` + + // The Transaction Fraud Insights (TFI) model training metric details. + Tfi *TFITrainingMetricsValue `locationName:"tfi" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TrainingMetricsV2) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TrainingMetricsV2) GoString() string { + return s.String() +} + +// SetAti sets the Ati field's value. +func (s *TrainingMetricsV2) SetAti(v *ATITrainingMetricsValue) *TrainingMetricsV2 { + s.Ati = v + return s +} + +// SetOfi sets the Ofi field's value. +func (s *TrainingMetricsV2) SetOfi(v *OFITrainingMetricsValue) *TrainingMetricsV2 { + s.Ofi = v + return s +} + +// SetTfi sets the Tfi field's value. +func (s *TrainingMetricsV2) SetTfi(v *TFITrainingMetricsValue) *TrainingMetricsV2 { + s.Tfi = v + return s +} + // The training result details. type TrainingResult struct { _ struct{} `type:"structure"` @@ -17867,6 +18498,72 @@ func (s *TrainingResult) SetVariableImportanceMetrics(v *VariableImportanceMetri return s } +// The training result details. +type TrainingResultV2 struct { + _ struct{} `type:"structure"` + + // The variable importance metrics of the aggregated variables. + // + // Account Takeover Insights (ATI) model uses event variables from the login + // data you provide to continuously calculate a set of variables (aggregated + // variables) based on historical events. For example, your ATI model might + // calculate the number of times an user has logged in using the same IP address. + // In this case, event variables used to derive the aggregated variables are + // IP address and user. + AggregatedVariablesImportanceMetrics *AggregatedVariablesImportanceMetrics `locationName:"aggregatedVariablesImportanceMetrics" type:"structure"` + + // The model training data validation metrics. + DataValidationMetrics *DataValidationMetrics `locationName:"dataValidationMetrics" type:"structure"` + + // The training metric details. + TrainingMetricsV2 *TrainingMetricsV2 `locationName:"trainingMetricsV2" type:"structure"` + + // The variable importance metrics details. + VariableImportanceMetrics *VariableImportanceMetrics `locationName:"variableImportanceMetrics" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TrainingResultV2) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TrainingResultV2) GoString() string { + return s.String() +} + +// SetAggregatedVariablesImportanceMetrics sets the AggregatedVariablesImportanceMetrics field's value. +func (s *TrainingResultV2) SetAggregatedVariablesImportanceMetrics(v *AggregatedVariablesImportanceMetrics) *TrainingResultV2 { + s.AggregatedVariablesImportanceMetrics = v + return s +} + +// SetDataValidationMetrics sets the DataValidationMetrics field's value. +func (s *TrainingResultV2) SetDataValidationMetrics(v *DataValidationMetrics) *TrainingResultV2 { + s.DataValidationMetrics = v + return s +} + +// SetTrainingMetricsV2 sets the TrainingMetricsV2 field's value. +func (s *TrainingResultV2) SetTrainingMetricsV2(v *TrainingMetricsV2) *TrainingResultV2 { + s.TrainingMetricsV2 = v + return s +} + +// SetVariableImportanceMetrics sets the VariableImportanceMetrics field's value. +func (s *TrainingResultV2) SetVariableImportanceMetrics(v *VariableImportanceMetrics) *TrainingResultV2 { + s.VariableImportanceMetrics = v + return s +} + type UntagResourceInput struct { _ struct{} `type:"structure"` @@ -19749,6 +20446,9 @@ const ( // ModelTypeEnumTransactionFraudInsights is a ModelTypeEnum enum value ModelTypeEnumTransactionFraudInsights = "TRANSACTION_FRAUD_INSIGHTS" + + // ModelTypeEnumAccountTakeoverInsights is a ModelTypeEnum enum value + ModelTypeEnumAccountTakeoverInsights = "ACCOUNT_TAKEOVER_INSIGHTS" ) // ModelTypeEnum_Values returns all elements of the ModelTypeEnum enum @@ -19756,6 +20456,7 @@ func ModelTypeEnum_Values() []string { return []string{ ModelTypeEnumOnlineFraudInsights, ModelTypeEnumTransactionFraudInsights, + ModelTypeEnumAccountTakeoverInsights, } } diff --git a/service/iotsitewise/api.go b/service/iotsitewise/api.go index 6741b1e6f8d..668a8cdc5e7 100644 --- a/service/iotsitewise/api.go +++ b/service/iotsitewise/api.go @@ -1363,6 +1363,128 @@ func (c *IoTSiteWise) CreateAssetModelWithContext(ctx aws.Context, input *Create return out, req.Send() } +const opCreateBulkImportJob = "CreateBulkImportJob" + +// CreateBulkImportJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateBulkImportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBulkImportJob for more information on using the CreateBulkImportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBulkImportJobRequest method. +// req, resp := client.CreateBulkImportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/CreateBulkImportJob +func (c *IoTSiteWise) CreateBulkImportJobRequest(input *CreateBulkImportJobInput) (req *request.Request, output *CreateBulkImportJobOutput) { + op := &request.Operation{ + Name: opCreateBulkImportJob, + HTTPMethod: "POST", + HTTPPath: "/jobs", + } + + if input == nil { + input = &CreateBulkImportJobInput{} + } + + output = &CreateBulkImportJobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("data.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// CreateBulkImportJob API operation for AWS IoT SiteWise. +// +// +// This API operation is in preview release for IoT SiteWise and is subject +// to change. We recommend that you use this operation only with test data, +// and not in production environments. +// +// Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, +// see Create a bulk import job (CLI) (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/CreateBulkImportJob.html) +// in the Amazon Simple Storage Service User Guide. +// +// You must enable IoT SiteWise to export data to Amazon S3 before you create +// a bulk import job. For more information about how to configure storage settings, +// see PutStorageConfiguration (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_PutStorageConfiguration.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT SiteWise's +// API operation CreateBulkImportJob for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request isn't valid. This can occur if your request contains malformed +// JSON or unsupported characters. Check your request and try again. +// +// * ResourceAlreadyExistsException +// The resource already exists. +// +// * ResourceNotFoundException +// The requested resource can't be found. +// +// * InternalFailureException +// IoT SiteWise can't process your request right now. Try again later. +// +// * ThrottlingException +// Your request exceeded a rate limit. For example, you might have exceeded +// the number of IoT SiteWise assets that can be created per second, the allowed +// number of messages per second, and so on. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the IoT SiteWise User Guide. +// +// * LimitExceededException +// You've reached the limit for a resource. For example, this can occur if you're +// trying to associate more than the allowed number of child assets or attempting +// to create more than the allowed number of properties for an asset model. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the IoT SiteWise User Guide. +// +// * ConflictingOperationException +// Your request has conflicting operations. This can occur if you're trying +// to perform more than one operation on the same resource at the same time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/CreateBulkImportJob +func (c *IoTSiteWise) CreateBulkImportJob(input *CreateBulkImportJobInput) (*CreateBulkImportJobOutput, error) { + req, out := c.CreateBulkImportJobRequest(input) + return out, req.Send() +} + +// CreateBulkImportJobWithContext is the same as CreateBulkImportJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBulkImportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) CreateBulkImportJobWithContext(ctx aws.Context, input *CreateBulkImportJobInput, opts ...request.Option) (*CreateBulkImportJobOutput, error) { + req, out := c.CreateBulkImportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDashboard = "CreateDashboard" // CreateDashboardRequest generates a "aws/request.Request" representing the @@ -2998,6 +3120,109 @@ func (c *IoTSiteWise) DescribeAssetPropertyWithContext(ctx aws.Context, input *D return out, req.Send() } +const opDescribeBulkImportJob = "DescribeBulkImportJob" + +// DescribeBulkImportJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBulkImportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBulkImportJob for more information on using the DescribeBulkImportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeBulkImportJobRequest method. +// req, resp := client.DescribeBulkImportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/DescribeBulkImportJob +func (c *IoTSiteWise) DescribeBulkImportJobRequest(input *DescribeBulkImportJobInput) (req *request.Request, output *DescribeBulkImportJobOutput) { + op := &request.Operation{ + Name: opDescribeBulkImportJob, + HTTPMethod: "GET", + HTTPPath: "/jobs/{jobId}", + } + + if input == nil { + input = &DescribeBulkImportJobInput{} + } + + output = &DescribeBulkImportJobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("data.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// DescribeBulkImportJob API operation for AWS IoT SiteWise. +// +// +// This API operation is in preview release for IoT SiteWise and is subject +// to change. We recommend that you use this operation only with test data, +// and not in production environments. +// +// Retrieves information about a bulk import job request. For more information, +// see Describe a bulk import job (CLI) (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/DescribeBulkImportJob.html) +// in the Amazon Simple Storage Service User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT SiteWise's +// API operation DescribeBulkImportJob for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request isn't valid. This can occur if your request contains malformed +// JSON or unsupported characters. Check your request and try again. +// +// * ResourceNotFoundException +// The requested resource can't be found. +// +// * InternalFailureException +// IoT SiteWise can't process your request right now. Try again later. +// +// * ThrottlingException +// Your request exceeded a rate limit. For example, you might have exceeded +// the number of IoT SiteWise assets that can be created per second, the allowed +// number of messages per second, and so on. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the IoT SiteWise User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/DescribeBulkImportJob +func (c *IoTSiteWise) DescribeBulkImportJob(input *DescribeBulkImportJobInput) (*DescribeBulkImportJobOutput, error) { + req, out := c.DescribeBulkImportJobRequest(input) + return out, req.Send() +} + +// DescribeBulkImportJobWithContext is the same as DescribeBulkImportJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBulkImportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) DescribeBulkImportJobWithContext(ctx aws.Context, input *DescribeBulkImportJobInput, opts ...request.Option) (*DescribeBulkImportJobOutput, error) { + req, out := c.DescribeBulkImportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeDashboard = "DescribeDashboard" // DescribeDashboardRequest generates a "aws/request.Request" representing the @@ -5487,37 +5712,37 @@ func (c *IoTSiteWise) ListAssociatedAssetsPagesWithContext(ctx aws.Context, inpu return p.Err() } -const opListDashboards = "ListDashboards" +const opListBulkImportJobs = "ListBulkImportJobs" -// ListDashboardsRequest generates a "aws/request.Request" representing the -// client's request for the ListDashboards operation. The "output" return +// ListBulkImportJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListBulkImportJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDashboards for more information on using the ListDashboards +// See ListBulkImportJobs for more information on using the ListBulkImportJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListDashboardsRequest method. -// req, resp := client.ListDashboardsRequest(params) +// // Example sending a request using the ListBulkImportJobsRequest method. +// req, resp := client.ListBulkImportJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListDashboards -func (c *IoTSiteWise) ListDashboardsRequest(input *ListDashboardsInput) (req *request.Request, output *ListDashboardsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListBulkImportJobs +func (c *IoTSiteWise) ListBulkImportJobsRequest(input *ListBulkImportJobsInput) (req *request.Request, output *ListBulkImportJobsOutput) { op := &request.Operation{ - Name: opListDashboards, + Name: opListBulkImportJobs, HTTPMethod: "GET", - HTTPPath: "/dashboards", + HTTPPath: "/jobs", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, @@ -5527,26 +5752,33 @@ func (c *IoTSiteWise) ListDashboardsRequest(input *ListDashboardsInput) (req *re } if input == nil { - input = &ListDashboardsInput{} + input = &ListBulkImportJobsInput{} } - output = &ListDashboardsOutput{} + output = &ListBulkImportJobsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("monitor.", nil)) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("data.", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// ListDashboards API operation for AWS IoT SiteWise. +// ListBulkImportJobs API operation for AWS IoT SiteWise. // -// Retrieves a paginated list of dashboards for an IoT SiteWise Monitor project. +// +// This API operation is in preview release for IoT SiteWise and is subject +// to change. We recommend that you use this operation only with test data, +// and not in production environments. +// +// Retrieves a paginated list of bulk import job requests. For more information, +// see List bulk import jobs (CLI) (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/ListBulkImportJobs.html) +// in the Amazon Simple Storage Service User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS IoT SiteWise's -// API operation ListDashboards for usage and error information. +// API operation ListBulkImportJobs for usage and error information. // // Returned Error Types: // * InvalidRequestException @@ -5556,6 +5788,9 @@ func (c *IoTSiteWise) ListDashboardsRequest(input *ListDashboardsInput) (req *re // * InternalFailureException // IoT SiteWise can't process your request right now. Try again later. // +// * ResourceNotFoundException +// The requested resource can't be found. +// // * ThrottlingException // Your request exceeded a rate limit. For example, you might have exceeded // the number of IoT SiteWise assets that can be created per second, the allowed @@ -5564,65 +5799,65 @@ func (c *IoTSiteWise) ListDashboardsRequest(input *ListDashboardsInput) (req *re // For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) // in the IoT SiteWise User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListDashboards -func (c *IoTSiteWise) ListDashboards(input *ListDashboardsInput) (*ListDashboardsOutput, error) { - req, out := c.ListDashboardsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListBulkImportJobs +func (c *IoTSiteWise) ListBulkImportJobs(input *ListBulkImportJobsInput) (*ListBulkImportJobsOutput, error) { + req, out := c.ListBulkImportJobsRequest(input) return out, req.Send() } -// ListDashboardsWithContext is the same as ListDashboards with the addition of +// ListBulkImportJobsWithContext is the same as ListBulkImportJobs with the addition of // the ability to pass a context and additional request options. // -// See ListDashboards for details on how to use this API operation. +// See ListBulkImportJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IoTSiteWise) ListDashboardsWithContext(ctx aws.Context, input *ListDashboardsInput, opts ...request.Option) (*ListDashboardsOutput, error) { - req, out := c.ListDashboardsRequest(input) +func (c *IoTSiteWise) ListBulkImportJobsWithContext(ctx aws.Context, input *ListBulkImportJobsInput, opts ...request.Option) (*ListBulkImportJobsOutput, error) { + req, out := c.ListBulkImportJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDashboardsPages iterates over the pages of a ListDashboards operation, +// ListBulkImportJobsPages iterates over the pages of a ListBulkImportJobs operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListDashboards method for more information on how to use this operation. +// See ListBulkImportJobs method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListDashboards operation. +// // Example iterating over at most 3 pages of a ListBulkImportJobs operation. // pageNum := 0 -// err := client.ListDashboardsPages(params, -// func(page *iotsitewise.ListDashboardsOutput, lastPage bool) bool { +// err := client.ListBulkImportJobsPages(params, +// func(page *iotsitewise.ListBulkImportJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *IoTSiteWise) ListDashboardsPages(input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool) error { - return c.ListDashboardsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *IoTSiteWise) ListBulkImportJobsPages(input *ListBulkImportJobsInput, fn func(*ListBulkImportJobsOutput, bool) bool) error { + return c.ListBulkImportJobsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListDashboardsPagesWithContext same as ListDashboardsPages except +// ListBulkImportJobsPagesWithContext same as ListBulkImportJobsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IoTSiteWise) ListDashboardsPagesWithContext(ctx aws.Context, input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool, opts ...request.Option) error { +func (c *IoTSiteWise) ListBulkImportJobsPagesWithContext(ctx aws.Context, input *ListBulkImportJobsInput, fn func(*ListBulkImportJobsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListDashboardsInput + var inCpy *ListBulkImportJobsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListDashboardsRequest(inCpy) + req, _ := c.ListBulkImportJobsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -5630,7 +5865,7 @@ func (c *IoTSiteWise) ListDashboardsPagesWithContext(ctx aws.Context, input *Lis } for p.Next() { - if !fn(p.Page().(*ListDashboardsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListBulkImportJobsOutput), !p.HasNextPage()) { break } } @@ -5638,37 +5873,37 @@ func (c *IoTSiteWise) ListDashboardsPagesWithContext(ctx aws.Context, input *Lis return p.Err() } -const opListGateways = "ListGateways" +const opListDashboards = "ListDashboards" -// ListGatewaysRequest generates a "aws/request.Request" representing the -// client's request for the ListGateways operation. The "output" return +// ListDashboardsRequest generates a "aws/request.Request" representing the +// client's request for the ListDashboards operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListGateways for more information on using the ListGateways +// See ListDashboards for more information on using the ListDashboards // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListGatewaysRequest method. -// req, resp := client.ListGatewaysRequest(params) +// // Example sending a request using the ListDashboardsRequest method. +// req, resp := client.ListDashboardsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListGateways -func (c *IoTSiteWise) ListGatewaysRequest(input *ListGatewaysInput) (req *request.Request, output *ListGatewaysOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListDashboards +func (c *IoTSiteWise) ListDashboardsRequest(input *ListDashboardsInput) (req *request.Request, output *ListDashboardsOutput) { op := &request.Operation{ - Name: opListGateways, + Name: opListDashboards, HTTPMethod: "GET", - HTTPPath: "/20200301/gateways", + HTTPPath: "/dashboards", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, @@ -5678,26 +5913,26 @@ func (c *IoTSiteWise) ListGatewaysRequest(input *ListGatewaysInput) (req *reques } if input == nil { - input = &ListGatewaysInput{} + input = &ListDashboardsInput{} } - output = &ListGatewaysOutput{} + output = &ListDashboardsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("monitor.", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// ListGateways API operation for AWS IoT SiteWise. +// ListDashboards API operation for AWS IoT SiteWise. // -// Retrieves a paginated list of gateways. +// Retrieves a paginated list of dashboards for an IoT SiteWise Monitor project. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS IoT SiteWise's -// API operation ListGateways for usage and error information. +// API operation ListDashboards for usage and error information. // // Returned Error Types: // * InvalidRequestException @@ -5715,29 +5950,180 @@ func (c *IoTSiteWise) ListGatewaysRequest(input *ListGatewaysInput) (req *reques // For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) // in the IoT SiteWise User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListGateways -func (c *IoTSiteWise) ListGateways(input *ListGatewaysInput) (*ListGatewaysOutput, error) { - req, out := c.ListGatewaysRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListDashboards +func (c *IoTSiteWise) ListDashboards(input *ListDashboardsInput) (*ListDashboardsOutput, error) { + req, out := c.ListDashboardsRequest(input) return out, req.Send() } -// ListGatewaysWithContext is the same as ListGateways with the addition of +// ListDashboardsWithContext is the same as ListDashboards with the addition of // the ability to pass a context and additional request options. // -// See ListGateways for details on how to use this API operation. +// See ListDashboards for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *IoTSiteWise) ListGatewaysWithContext(ctx aws.Context, input *ListGatewaysInput, opts ...request.Option) (*ListGatewaysOutput, error) { - req, out := c.ListGatewaysRequest(input) +func (c *IoTSiteWise) ListDashboardsWithContext(ctx aws.Context, input *ListDashboardsInput, opts ...request.Option) (*ListDashboardsOutput, error) { + req, out := c.ListDashboardsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListGatewaysPages iterates over the pages of a ListGateways operation, +// ListDashboardsPages iterates over the pages of a ListDashboards operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDashboards method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDashboards operation. +// pageNum := 0 +// err := client.ListDashboardsPages(params, +// func(page *iotsitewise.ListDashboardsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoTSiteWise) ListDashboardsPages(input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool) error { + return c.ListDashboardsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDashboardsPagesWithContext same as ListDashboardsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) ListDashboardsPagesWithContext(ctx aws.Context, input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDashboardsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDashboardsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDashboardsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGateways = "ListGateways" + +// ListGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the ListGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGateways for more information on using the ListGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListGatewaysRequest method. +// req, resp := client.ListGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListGateways +func (c *IoTSiteWise) ListGatewaysRequest(input *ListGatewaysInput) (req *request.Request, output *ListGatewaysOutput) { + op := &request.Operation{ + Name: opListGateways, + HTTPMethod: "GET", + HTTPPath: "/20200301/gateways", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListGatewaysInput{} + } + + output = &ListGatewaysOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// ListGateways API operation for AWS IoT SiteWise. +// +// Retrieves a paginated list of gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT SiteWise's +// API operation ListGateways for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request isn't valid. This can occur if your request contains malformed +// JSON or unsupported characters. Check your request and try again. +// +// * InternalFailureException +// IoT SiteWise can't process your request right now. Try again later. +// +// * ThrottlingException +// Your request exceeded a rate limit. For example, you might have exceeded +// the number of IoT SiteWise assets that can be created per second, the allowed +// number of messages per second, and so on. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the IoT SiteWise User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/ListGateways +func (c *IoTSiteWise) ListGateways(input *ListGatewaysInput) (*ListGatewaysOutput, error) { + req, out := c.ListGatewaysRequest(input) + return out, req.Send() +} + +// ListGatewaysWithContext is the same as ListGateways with the addition of +// the ability to pass a context and additional request options. +// +// See ListGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) ListGatewaysWithContext(ctx aws.Context, input *ListGatewaysInput, opts ...request.Option) (*ListGatewaysOutput, error) { + req, out := c.ListGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListGatewaysPages iterates over the pages of a ListGateways operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // @@ -12735,6 +13121,211 @@ func (s *CreateAssetOutput) SetAssetStatus(v *AssetStatus) *CreateAssetOutput { return s } +type CreateBulkImportJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 destination where errors associated with the job creation request + // are saved. + // + // ErrorReportLocation is a required field + ErrorReportLocation *ErrorReportLocation `locationName:"errorReportLocation" type:"structure" required:"true"` + + // The files in the specified Amazon S3 bucket that contain your data. + // + // Files is a required field + Files []*File `locationName:"files" type:"list" required:"true"` + + // Contains the configuration information of a job, such as the file format + // used to save data in Amazon S3. + // + // JobConfiguration is a required field + JobConfiguration *JobConfiguration `locationName:"jobConfiguration" type:"structure" required:"true"` + + // The unique name that helps identify the job request. + // + // JobName is a required field + JobName *string `locationName:"jobName" min:"1" type:"string" required:"true"` + + // The ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of the IAM role that allows IoT SiteWise to read Amazon S3 data. + // + // JobRoleArn is a required field + JobRoleArn *string `locationName:"jobRoleArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBulkImportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBulkImportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBulkImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBulkImportJobInput"} + if s.ErrorReportLocation == nil { + invalidParams.Add(request.NewErrParamRequired("ErrorReportLocation")) + } + if s.Files == nil { + invalidParams.Add(request.NewErrParamRequired("Files")) + } + if s.JobConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("JobConfiguration")) + } + if s.JobName == nil { + invalidParams.Add(request.NewErrParamRequired("JobName")) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.JobRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("JobRoleArn")) + } + if s.JobRoleArn != nil && len(*s.JobRoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobRoleArn", 1)) + } + if s.ErrorReportLocation != nil { + if err := s.ErrorReportLocation.Validate(); err != nil { + invalidParams.AddNested("ErrorReportLocation", err.(request.ErrInvalidParams)) + } + } + if s.Files != nil { + for i, v := range s.Files { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Files", i), err.(request.ErrInvalidParams)) + } + } + } + if s.JobConfiguration != nil { + if err := s.JobConfiguration.Validate(); err != nil { + invalidParams.AddNested("JobConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorReportLocation sets the ErrorReportLocation field's value. +func (s *CreateBulkImportJobInput) SetErrorReportLocation(v *ErrorReportLocation) *CreateBulkImportJobInput { + s.ErrorReportLocation = v + return s +} + +// SetFiles sets the Files field's value. +func (s *CreateBulkImportJobInput) SetFiles(v []*File) *CreateBulkImportJobInput { + s.Files = v + return s +} + +// SetJobConfiguration sets the JobConfiguration field's value. +func (s *CreateBulkImportJobInput) SetJobConfiguration(v *JobConfiguration) *CreateBulkImportJobInput { + s.JobConfiguration = v + return s +} + +// SetJobName sets the JobName field's value. +func (s *CreateBulkImportJobInput) SetJobName(v string) *CreateBulkImportJobInput { + s.JobName = &v + return s +} + +// SetJobRoleArn sets the JobRoleArn field's value. +func (s *CreateBulkImportJobInput) SetJobRoleArn(v string) *CreateBulkImportJobInput { + s.JobRoleArn = &v + return s +} + +type CreateBulkImportJobOutput struct { + _ struct{} `type:"structure"` + + // The ID of the job. + // + // JobId is a required field + JobId *string `locationName:"jobId" min:"36" type:"string" required:"true"` + + // The unique name that helps identify the job request. + // + // JobName is a required field + JobName *string `locationName:"jobName" min:"1" type:"string" required:"true"` + + // The status of the bulk import job can be one of following values. + // + // * PENDING – IoT SiteWise is waiting for the current bulk import job + // to finish. + // + // * CANCELLED – The bulk import job has been canceled. + // + // * RUNNING – IoT SiteWise is processing your request to import your data + // from Amazon S3. + // + // * COMPLETED – IoT SiteWise successfully completed your request to import + // data from Amazon S3. + // + // * FAILED – IoT SiteWise couldn't process your request to import data + // from Amazon S3. You can use logs saved in the specified error report location + // in Amazon S3 to troubleshoot issues. + // + // * COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import + // data from Amazon S3 with errors. You can use logs saved in the specified + // error report location in Amazon S3 to troubleshoot issues. + // + // JobStatus is a required field + JobStatus *string `locationName:"jobStatus" type:"string" required:"true" enum:"JobStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBulkImportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBulkImportJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *CreateBulkImportJobOutput) SetJobId(v string) *CreateBulkImportJobOutput { + s.JobId = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *CreateBulkImportJobOutput) SetJobName(v string) *CreateBulkImportJobOutput { + s.JobName = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *CreateBulkImportJobOutput) SetJobStatus(v string) *CreateBulkImportJobOutput { + s.JobStatus = &v + return s +} + type CreateDashboardInput struct { _ struct{} `type:"structure"` @@ -13472,6 +14063,38 @@ func (s *CreateProjectOutput) SetProjectId(v string) *CreateProjectOutput { return s } +// A .csv file. +type Csv struct { + _ struct{} `type:"structure"` + + // The column names specified in the .csv file. + ColumnNames []*string `locationName:"columnNames" type:"list" enum:"ColumnName"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Csv) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Csv) GoString() string { + return s.String() +} + +// SetColumnNames sets the ColumnNames field's value. +func (s *Csv) SetColumnNames(v []*string) *Csv { + s.ColumnNames = v + return s +} + // Contains information about a customer managed Amazon S3 bucket. type CustomerManagedS3Storage struct { _ struct{} `type:"structure"` @@ -15019,6 +15642,198 @@ func (s *DescribeAssetPropertyOutput) SetCompositeModel(v *CompositeModelPropert return s } +type DescribeBulkImportJobInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The ID of the job. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"jobId" min:"36" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBulkImportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBulkImportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBulkImportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBulkImportJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *DescribeBulkImportJobInput) SetJobId(v string) *DescribeBulkImportJobInput { + s.JobId = &v + return s +} + +type DescribeBulkImportJobOutput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 destination where errors associated with the job creation request + // are saved. + // + // ErrorReportLocation is a required field + ErrorReportLocation *ErrorReportLocation `locationName:"errorReportLocation" type:"structure" required:"true"` + + // The files in the specified Amazon S3 bucket that contain your data. + // + // Files is a required field + Files []*File `locationName:"files" type:"list" required:"true"` + + // Contains the configuration information of a job, such as the file format + // used to save data in Amazon S3. + // + // JobConfiguration is a required field + JobConfiguration *JobConfiguration `locationName:"jobConfiguration" type:"structure" required:"true"` + + // The date the job was created, in Unix epoch TIME. + // + // JobCreationDate is a required field + JobCreationDate *time.Time `locationName:"jobCreationDate" type:"timestamp" required:"true"` + + // The ID of the job. + // + // JobId is a required field + JobId *string `locationName:"jobId" min:"36" type:"string" required:"true"` + + // The date the job was last updated, in Unix epoch time. + // + // JobLastUpdateDate is a required field + JobLastUpdateDate *time.Time `locationName:"jobLastUpdateDate" type:"timestamp" required:"true"` + + // The unique name that helps identify the job request. + // + // JobName is a required field + JobName *string `locationName:"jobName" min:"1" type:"string" required:"true"` + + // The ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of the IAM role that allows IoT SiteWise to read Amazon S3 data. + // + // JobRoleArn is a required field + JobRoleArn *string `locationName:"jobRoleArn" min:"1" type:"string" required:"true"` + + // The status of the bulk import job can be one of following values. + // + // * PENDING – IoT SiteWise is waiting for the current bulk import job + // to finish. + // + // * CANCELLED – The bulk import job has been canceled. + // + // * RUNNING – IoT SiteWise is processing your request to import your data + // from Amazon S3. + // + // * COMPLETED – IoT SiteWise successfully completed your request to import + // data from Amazon S3. + // + // * FAILED – IoT SiteWise couldn't process your request to import data + // from Amazon S3. You can use logs saved in the specified error report location + // in Amazon S3 to troubleshoot issues. + // + // * COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import + // data from Amazon S3 with errors. You can use logs saved in the specified + // error report location in Amazon S3 to troubleshoot issues. + // + // JobStatus is a required field + JobStatus *string `locationName:"jobStatus" type:"string" required:"true" enum:"JobStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBulkImportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBulkImportJobOutput) GoString() string { + return s.String() +} + +// SetErrorReportLocation sets the ErrorReportLocation field's value. +func (s *DescribeBulkImportJobOutput) SetErrorReportLocation(v *ErrorReportLocation) *DescribeBulkImportJobOutput { + s.ErrorReportLocation = v + return s +} + +// SetFiles sets the Files field's value. +func (s *DescribeBulkImportJobOutput) SetFiles(v []*File) *DescribeBulkImportJobOutput { + s.Files = v + return s +} + +// SetJobConfiguration sets the JobConfiguration field's value. +func (s *DescribeBulkImportJobOutput) SetJobConfiguration(v *JobConfiguration) *DescribeBulkImportJobOutput { + s.JobConfiguration = v + return s +} + +// SetJobCreationDate sets the JobCreationDate field's value. +func (s *DescribeBulkImportJobOutput) SetJobCreationDate(v time.Time) *DescribeBulkImportJobOutput { + s.JobCreationDate = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *DescribeBulkImportJobOutput) SetJobId(v string) *DescribeBulkImportJobOutput { + s.JobId = &v + return s +} + +// SetJobLastUpdateDate sets the JobLastUpdateDate field's value. +func (s *DescribeBulkImportJobOutput) SetJobLastUpdateDate(v time.Time) *DescribeBulkImportJobOutput { + s.JobLastUpdateDate = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *DescribeBulkImportJobOutput) SetJobName(v string) *DescribeBulkImportJobOutput { + s.JobName = &v + return s +} + +// SetJobRoleArn sets the JobRoleArn field's value. +func (s *DescribeBulkImportJobOutput) SetJobRoleArn(v string) *DescribeBulkImportJobOutput { + s.JobRoleArn = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *DescribeBulkImportJobOutput) SetJobStatus(v string) *DescribeBulkImportJobOutput { + s.JobStatus = &v + return s +} + type DescribeDashboardInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -16636,6 +17451,76 @@ func (s *ErrorDetails) SetMessage(v string) *ErrorDetails { return s } +// The Amazon S3 destination where errors associated with the job creation request +// are saved. +type ErrorReportLocation struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket to which errors associated with the bulk + // import job are sent. + // + // Bucket is a required field + Bucket *string `locationName:"bucket" min:"3" type:"string" required:"true"` + + // Amazon S3 uses the prefix as a folder name to organize data in the bucket. + // Each Amazon S3 object has a key that is its unique identifier in the bucket. + // Each object in a bucket has exactly one key. The prefix must end with a forward + // slash (/). For more information, see Organizing objects using prefixes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) + // in the Amazon Simple Storage Service User Guide. + // + // Prefix is a required field + Prefix *string `locationName:"prefix" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorReportLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorReportLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorReportLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorReportLocation"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ErrorReportLocation) SetBucket(v string) *ErrorReportLocation { + s.Bucket = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ErrorReportLocation) SetPrefix(v string) *ErrorReportLocation { + s.Prefix = &v + return s +} + // Contains expression variable information. type ExpressionVariable struct { _ struct{} `type:"structure"` @@ -16665,26 +17550,95 @@ func (s ExpressionVariable) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ExpressionVariable) GoString() string { +func (s ExpressionVariable) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExpressionVariable) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExpressionVariable"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil { + if err := s.Value.Validate(); err != nil { + invalidParams.AddNested("Value", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *ExpressionVariable) SetName(v string) *ExpressionVariable { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ExpressionVariable) SetValue(v *VariableValue) *ExpressionVariable { + s.Value = v + return s +} + +// The file in Amazon S3 where your data is saved. +type File struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket from which data is imported. + // + // Bucket is a required field + Bucket *string `locationName:"bucket" min:"3" type:"string" required:"true"` + + // The key of the Amazon S3 object that contains your data. Each object has + // a key that is a unique identifier. Each object has exactly one key. + // + // Key is a required field + Key *string `locationName:"key" type:"string" required:"true"` + + // The version ID to identify a specific version of the Amazon S3 object that + // contains your data. + VersionId *string `locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s File) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s File) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ExpressionVariable) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExpressionVariable"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *File) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "File"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) } - if s.Value != nil { - if err := s.Value.Validate(); err != nil { - invalidParams.AddNested("Value", err.(request.ErrInvalidParams)) - } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) } if invalidParams.Len() > 0 { @@ -16693,15 +17647,53 @@ func (s *ExpressionVariable) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *ExpressionVariable) SetName(v string) *ExpressionVariable { - s.Name = &v +// SetBucket sets the Bucket field's value. +func (s *File) SetBucket(v string) *File { + s.Bucket = &v return s } -// SetValue sets the Value field's value. -func (s *ExpressionVariable) SetValue(v *VariableValue) *ExpressionVariable { - s.Value = v +// SetKey sets the Key field's value. +func (s *File) SetKey(v string) *File { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *File) SetVersionId(v string) *File { + s.VersionId = &v + return s +} + +// The file format of the data. +type FileFormat struct { + _ struct{} `type:"structure"` + + // The .csv file format. + Csv *Csv `locationName:"csv" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FileFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FileFormat) GoString() string { + return s.String() +} + +// SetCsv sets the Csv field's value. +func (s *FileFormat) SetCsv(v *Csv) *FileFormat { + s.Csv = v return s } @@ -18479,6 +19471,129 @@ func (s *InvalidRequestException) RequestID() string { return s.RespMetadata.RequestID } +// Contains the configuration information of a job, such as the file format +// used to save data in Amazon S3. +type JobConfiguration struct { + _ struct{} `type:"structure"` + + // The file format of the data in Amazon S3. + // + // FileFormat is a required field + FileFormat *FileFormat `locationName:"fileFormat" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JobConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JobConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JobConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobConfiguration"} + if s.FileFormat == nil { + invalidParams.Add(request.NewErrParamRequired("FileFormat")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFileFormat sets the FileFormat field's value. +func (s *JobConfiguration) SetFileFormat(v *FileFormat) *JobConfiguration { + s.FileFormat = v + return s +} + +// Contains a job summary information. +type JobSummary struct { + _ struct{} `type:"structure"` + + // The ID of the job. + // + // Id is a required field + Id *string `locationName:"id" min:"36" type:"string" required:"true"` + + // The unique name that helps identify the job request. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The status of the bulk import job can be one of following values. + // + // * PENDING – IoT SiteWise is waiting for the current bulk import job + // to finish. + // + // * CANCELLED – The bulk import job has been canceled. + // + // * RUNNING – IoT SiteWise is processing your request to import your data + // from Amazon S3. + // + // * COMPLETED – IoT SiteWise successfully completed your request to import + // data from Amazon S3. + // + // * FAILED – IoT SiteWise couldn't process your request to import data + // from Amazon S3. You can use logs saved in the specified error report location + // in Amazon S3 to troubleshoot issues. + // + // * COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import + // data from Amazon S3 with errors. You can use logs saved in the specified + // error report location in Amazon S3 to troubleshoot issues. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JobSummary) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *JobSummary) SetId(v string) *JobSummary { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *JobSummary) SetName(v string) *JobSummary { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *JobSummary) SetStatus(v string) *JobSummary { + s.Status = &v + return s +} + // You've reached the limit for a resource. For example, this can occur if you're // trying to associate more than the allowed number of child assets or attempting // to create more than the allowed number of properties for an asset model. @@ -19228,6 +20343,114 @@ func (s *ListAssociatedAssetsOutput) SetNextToken(v string) *ListAssociatedAsset return s } +type ListBulkImportJobsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // You can use a filter to select the bulk import jobs that you want to retrieve. + Filter *string `location:"querystring" locationName:"filter" type:"string" enum:"ListBulkImportJobsFilter"` + + // The maximum number of results to return for each paginated request. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token to be used for the next set of paginated results. + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBulkImportJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBulkImportJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBulkImportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBulkImportJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *ListBulkImportJobsInput) SetFilter(v string) *ListBulkImportJobsInput { + s.Filter = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListBulkImportJobsInput) SetMaxResults(v int64) *ListBulkImportJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBulkImportJobsInput) SetNextToken(v string) *ListBulkImportJobsInput { + s.NextToken = &v + return s +} + +type ListBulkImportJobsOutput struct { + _ struct{} `type:"structure"` + + // One or more job summaries to list. + // + // JobSummaries is a required field + JobSummaries []*JobSummary `locationName:"jobSummaries" type:"list" required:"true"` + + // The token for the next set of results, or null if there are no additional + // results. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBulkImportJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBulkImportJobsOutput) GoString() string { + return s.String() +} + +// SetJobSummaries sets the JobSummaries field's value. +func (s *ListBulkImportJobsOutput) SetJobSummaries(v []*JobSummary) *ListBulkImportJobsOutput { + s.JobSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBulkImportJobsOutput) SetNextToken(v string) *ListBulkImportJobsOutput { + s.NextToken = &v + return s +} + type ListDashboardsInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -24427,6 +25650,46 @@ func CapabilitySyncStatus_Values() []string { } } +const ( + // ColumnNameAlias is a ColumnName enum value + ColumnNameAlias = "ALIAS" + + // ColumnNameAssetId is a ColumnName enum value + ColumnNameAssetId = "ASSET_ID" + + // ColumnNamePropertyId is a ColumnName enum value + ColumnNamePropertyId = "PROPERTY_ID" + + // ColumnNameDataType is a ColumnName enum value + ColumnNameDataType = "DATA_TYPE" + + // ColumnNameTimestampSeconds is a ColumnName enum value + ColumnNameTimestampSeconds = "TIMESTAMP_SECONDS" + + // ColumnNameTimestampNanoOffset is a ColumnName enum value + ColumnNameTimestampNanoOffset = "TIMESTAMP_NANO_OFFSET" + + // ColumnNameQuality is a ColumnName enum value + ColumnNameQuality = "QUALITY" + + // ColumnNameValue is a ColumnName enum value + ColumnNameValue = "VALUE" +) + +// ColumnName_Values returns all elements of the ColumnName enum +func ColumnName_Values() []string { + return []string{ + ColumnNameAlias, + ColumnNameAssetId, + ColumnNamePropertyId, + ColumnNameDataType, + ColumnNameTimestampSeconds, + ColumnNameTimestampNanoOffset, + ColumnNameQuality, + ColumnNameValue, + } +} + const ( // ComputeLocationEdge is a ComputeLocation enum value ComputeLocationEdge = "EDGE" @@ -24575,6 +25838,38 @@ func ImageFileType_Values() []string { } } +const ( + // JobStatusPending is a JobStatus enum value + JobStatusPending = "PENDING" + + // JobStatusCancelled is a JobStatus enum value + JobStatusCancelled = "CANCELLED" + + // JobStatusRunning is a JobStatus enum value + JobStatusRunning = "RUNNING" + + // JobStatusCompleted is a JobStatus enum value + JobStatusCompleted = "COMPLETED" + + // JobStatusFailed is a JobStatus enum value + JobStatusFailed = "FAILED" + + // JobStatusCompletedWithFailures is a JobStatus enum value + JobStatusCompletedWithFailures = "COMPLETED_WITH_FAILURES" +) + +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusPending, + JobStatusCancelled, + JobStatusRunning, + JobStatusCompleted, + JobStatusFailed, + JobStatusCompletedWithFailures, + } +} + const ( // ListAssetsFilterAll is a ListAssetsFilter enum value ListAssetsFilterAll = "ALL" @@ -24591,6 +25886,42 @@ func ListAssetsFilter_Values() []string { } } +const ( + // ListBulkImportJobsFilterAll is a ListBulkImportJobsFilter enum value + ListBulkImportJobsFilterAll = "ALL" + + // ListBulkImportJobsFilterPending is a ListBulkImportJobsFilter enum value + ListBulkImportJobsFilterPending = "PENDING" + + // ListBulkImportJobsFilterRunning is a ListBulkImportJobsFilter enum value + ListBulkImportJobsFilterRunning = "RUNNING" + + // ListBulkImportJobsFilterCancelled is a ListBulkImportJobsFilter enum value + ListBulkImportJobsFilterCancelled = "CANCELLED" + + // ListBulkImportJobsFilterFailed is a ListBulkImportJobsFilter enum value + ListBulkImportJobsFilterFailed = "FAILED" + + // ListBulkImportJobsFilterCompletedWithFailures is a ListBulkImportJobsFilter enum value + ListBulkImportJobsFilterCompletedWithFailures = "COMPLETED_WITH_FAILURES" + + // ListBulkImportJobsFilterCompleted is a ListBulkImportJobsFilter enum value + ListBulkImportJobsFilterCompleted = "COMPLETED" +) + +// ListBulkImportJobsFilter_Values returns all elements of the ListBulkImportJobsFilter enum +func ListBulkImportJobsFilter_Values() []string { + return []string{ + ListBulkImportJobsFilterAll, + ListBulkImportJobsFilterPending, + ListBulkImportJobsFilterRunning, + ListBulkImportJobsFilterCancelled, + ListBulkImportJobsFilterFailed, + ListBulkImportJobsFilterCompletedWithFailures, + ListBulkImportJobsFilterCompleted, + } +} + const ( // ListTimeSeriesTypeAssociated is a ListTimeSeriesType enum value ListTimeSeriesTypeAssociated = "ASSOCIATED" diff --git a/service/iotsitewise/iotsitewiseiface/interface.go b/service/iotsitewise/iotsitewiseiface/interface.go index dc0eed705f2..c716abd52bc 100644 --- a/service/iotsitewise/iotsitewiseiface/interface.go +++ b/service/iotsitewise/iotsitewiseiface/interface.go @@ -113,6 +113,10 @@ type IoTSiteWiseAPI interface { CreateAssetModelWithContext(aws.Context, *iotsitewise.CreateAssetModelInput, ...request.Option) (*iotsitewise.CreateAssetModelOutput, error) CreateAssetModelRequest(*iotsitewise.CreateAssetModelInput) (*request.Request, *iotsitewise.CreateAssetModelOutput) + CreateBulkImportJob(*iotsitewise.CreateBulkImportJobInput) (*iotsitewise.CreateBulkImportJobOutput, error) + CreateBulkImportJobWithContext(aws.Context, *iotsitewise.CreateBulkImportJobInput, ...request.Option) (*iotsitewise.CreateBulkImportJobOutput, error) + CreateBulkImportJobRequest(*iotsitewise.CreateBulkImportJobInput) (*request.Request, *iotsitewise.CreateBulkImportJobOutput) + CreateDashboard(*iotsitewise.CreateDashboardInput) (*iotsitewise.CreateDashboardOutput, error) CreateDashboardWithContext(aws.Context, *iotsitewise.CreateDashboardInput, ...request.Option) (*iotsitewise.CreateDashboardOutput, error) CreateDashboardRequest(*iotsitewise.CreateDashboardInput) (*request.Request, *iotsitewise.CreateDashboardOutput) @@ -177,6 +181,10 @@ type IoTSiteWiseAPI interface { DescribeAssetPropertyWithContext(aws.Context, *iotsitewise.DescribeAssetPropertyInput, ...request.Option) (*iotsitewise.DescribeAssetPropertyOutput, error) DescribeAssetPropertyRequest(*iotsitewise.DescribeAssetPropertyInput) (*request.Request, *iotsitewise.DescribeAssetPropertyOutput) + DescribeBulkImportJob(*iotsitewise.DescribeBulkImportJobInput) (*iotsitewise.DescribeBulkImportJobOutput, error) + DescribeBulkImportJobWithContext(aws.Context, *iotsitewise.DescribeBulkImportJobInput, ...request.Option) (*iotsitewise.DescribeBulkImportJobOutput, error) + DescribeBulkImportJobRequest(*iotsitewise.DescribeBulkImportJobInput) (*request.Request, *iotsitewise.DescribeBulkImportJobOutput) + DescribeDashboard(*iotsitewise.DescribeDashboardInput) (*iotsitewise.DescribeDashboardOutput, error) DescribeDashboardWithContext(aws.Context, *iotsitewise.DescribeDashboardInput, ...request.Option) (*iotsitewise.DescribeDashboardOutput, error) DescribeDashboardRequest(*iotsitewise.DescribeDashboardInput) (*request.Request, *iotsitewise.DescribeDashboardOutput) @@ -281,6 +289,13 @@ type IoTSiteWiseAPI interface { ListAssociatedAssetsPages(*iotsitewise.ListAssociatedAssetsInput, func(*iotsitewise.ListAssociatedAssetsOutput, bool) bool) error ListAssociatedAssetsPagesWithContext(aws.Context, *iotsitewise.ListAssociatedAssetsInput, func(*iotsitewise.ListAssociatedAssetsOutput, bool) bool, ...request.Option) error + ListBulkImportJobs(*iotsitewise.ListBulkImportJobsInput) (*iotsitewise.ListBulkImportJobsOutput, error) + ListBulkImportJobsWithContext(aws.Context, *iotsitewise.ListBulkImportJobsInput, ...request.Option) (*iotsitewise.ListBulkImportJobsOutput, error) + ListBulkImportJobsRequest(*iotsitewise.ListBulkImportJobsInput) (*request.Request, *iotsitewise.ListBulkImportJobsOutput) + + ListBulkImportJobsPages(*iotsitewise.ListBulkImportJobsInput, func(*iotsitewise.ListBulkImportJobsOutput, bool) bool) error + ListBulkImportJobsPagesWithContext(aws.Context, *iotsitewise.ListBulkImportJobsInput, func(*iotsitewise.ListBulkImportJobsOutput, bool) bool, ...request.Option) error + ListDashboards(*iotsitewise.ListDashboardsInput) (*iotsitewise.ListDashboardsOutput, error) ListDashboardsWithContext(aws.Context, *iotsitewise.ListDashboardsInput, ...request.Option) (*iotsitewise.ListDashboardsOutput, error) ListDashboardsRequest(*iotsitewise.ListDashboardsInput) (*request.Request, *iotsitewise.ListDashboardsOutput) diff --git a/service/kendra/api.go b/service/kendra/api.go index 5ce955351a4..6a5b6cc4bb2 100644 --- a/service/kendra/api.go +++ b/service/kendra/api.go @@ -642,13 +642,13 @@ func (c *Kendra) CreateAccessControlConfigurationRequest(input *CreateAccessCont // without indexing all of your documents again. For example, your index contains // top-secret company documents that only certain employees or users should // access. One of these users leaves the company or switches to a team that -// should be blocked from access to top-secret documents. Your documents in -// your index still give this user access to top-secret documents due to the -// user having access at the time your documents were indexed. You can create -// a specific access control configuration for this user with deny access. You -// can later update the access control configuration to allow access in the -// case the user returns to the company and re-joins the 'top-secret' team. -// You can re-configure access control for your documents circumstances change. +// should be blocked from accessing top-secret documents. The user still has +// access to top-secret documents because the user had access when your documents +// were previously indexed. You can create a specific access control configuration +// for the user with deny access. You can later update the access control configuration +// to allow access if the user returns to the company and re-joins the 'top-secret' +// team. You can re-configure access control for your documents as circumstances +// change. // // To apply your access control configuration to certain documents, you call // the BatchPutDocument (https://docs.aws.amazon.com/kendra/latest/dg/API_BatchPutDocument.html) @@ -5677,9 +5677,9 @@ func (c *Kendra) UpdateAccessControlConfigurationRequest(input *UpdateAccessCont // API to apply the updated access control configuration, with the AccessControlConfigurationId // included in the Document (https://docs.aws.amazon.com/kendra/latest/dg/API_Document.html) // object. If you use an S3 bucket as a data source, you synchronize your data -// source to apply the the AccessControlConfigurationId in the .metadata.json -// file. Amazon Kendra currently only supports access control configuration -// for S3 data sources and documents indexed using the BatchPutDocument API. +// source to apply the AccessControlConfigurationId in the .metadata.json file. +// Amazon Kendra currently only supports access control configuration for S3 +// data sources and documents indexed using the BatchPutDocument API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -18925,7 +18925,7 @@ type ListAccessControlConfigurationsInput struct { // The maximum number of access control configurations to return. MaxResults *int64 `min:"1" type:"integer"` - // If the previous response was incomplete (because there is more data to retrieve), + // If the previous response was incomplete (because there's more data to retrieve), // Amazon Kendra returns a pagination token in the response. You can use this // pagination token to retrieve the next set of access control configurations. NextToken *string `min:"1" type:"string"` @@ -18997,8 +18997,8 @@ type ListAccessControlConfigurationsOutput struct { // AccessControlConfigurations is a required field AccessControlConfigurations []*AccessControlConfigurationSummary `type:"list" required:"true"` - // If the response is truncated, Amazon Kendra returns this token that you can - // use in the subsequent request to retrieve the next set of access control + // If the response is truncated, Amazon Kendra returns this token, which you + // can use in the subsequent request to retrieve the next set of access control // configurations. NextToken *string `min:"1" type:"string"` } @@ -24141,6 +24141,11 @@ func (s *ServiceQuotaExceededException) RequestID() string { type SharePointConfiguration struct { _ struct{} `type:"structure"` + // Whether you want to connect to SharePoint using basic authentication of user + // name and password, or OAuth authentication of user name, password, client + // ID, and client secret. You can use OAuth authentication for SharePoint Online. + AuthenticationType *string `type:"string" enum:"SharePointOnlineAuthenticationType"` + // TRUE to index document attachments. CrawlAttachments *bool `type:"boolean"` @@ -24182,6 +24187,10 @@ type SharePointConfiguration struct { // as part of the credentials. For more information, see Using a Microsoft SharePoint // Data Source (https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html). // + // You can also provide OAuth authentication credentials of user name, password, + // client ID, and client secret. For more information, see Authentication for + // a SharePoint data source (https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html#sharepoint-authentication). + // // SecretArn is a required field SecretArn *string `min:"1" type:"string" required:"true"` @@ -24280,6 +24289,12 @@ func (s *SharePointConfiguration) Validate() error { return nil } +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *SharePointConfiguration) SetAuthenticationType(v string) *SharePointConfiguration { + s.AuthenticationType = &v + return s +} + // SetCrawlAttachments sets the CrawlAttachments field's value. func (s *SharePointConfiguration) SetCrawlAttachments(v bool) *SharePointConfiguration { s.CrawlAttachments = &v @@ -29039,6 +29054,22 @@ func ServiceNowBuildVersionType_Values() []string { } } +const ( + // SharePointOnlineAuthenticationTypeHttpBasic is a SharePointOnlineAuthenticationType enum value + SharePointOnlineAuthenticationTypeHttpBasic = "HTTP_BASIC" + + // SharePointOnlineAuthenticationTypeOauth2 is a SharePointOnlineAuthenticationType enum value + SharePointOnlineAuthenticationTypeOauth2 = "OAUTH2" +) + +// SharePointOnlineAuthenticationType_Values returns all elements of the SharePointOnlineAuthenticationType enum +func SharePointOnlineAuthenticationType_Values() []string { + return []string{ + SharePointOnlineAuthenticationTypeHttpBasic, + SharePointOnlineAuthenticationTypeOauth2, + } +} + const ( // SharePointVersionSharepoint2013 is a SharePointVersion enum value SharePointVersionSharepoint2013 = "SHAREPOINT_2013" diff --git a/service/networkfirewall/api.go b/service/networkfirewall/api.go index 3212195836b..31d9a6a2030 100644 --- a/service/networkfirewall/api.go +++ b/service/networkfirewall/api.go @@ -4026,6 +4026,93 @@ func (s *Attachment) SetSubnetId(v string) *Attachment { return s } +// Summarizes the CIDR blocks used by the IP set references in a firewall. Network +// Firewall calculates the number of CIDRs by taking an aggregated count of +// all CIDRs used by the IP sets you are referencing. +type CIDRSummary struct { + _ struct{} `type:"structure"` + + // The number of CIDR blocks available for use by the IP set references in a + // firewall. + AvailableCIDRCount *int64 `type:"integer"` + + // The list of the IP set references used by a firewall. + IPSetReferences map[string]*IPSetMetadata `type:"map"` + + // The number of CIDR blocks used by the IP set references in a firewall. + UtilizedCIDRCount *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CIDRSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CIDRSummary) GoString() string { + return s.String() +} + +// SetAvailableCIDRCount sets the AvailableCIDRCount field's value. +func (s *CIDRSummary) SetAvailableCIDRCount(v int64) *CIDRSummary { + s.AvailableCIDRCount = &v + return s +} + +// SetIPSetReferences sets the IPSetReferences field's value. +func (s *CIDRSummary) SetIPSetReferences(v map[string]*IPSetMetadata) *CIDRSummary { + s.IPSetReferences = v + return s +} + +// SetUtilizedCIDRCount sets the UtilizedCIDRCount field's value. +func (s *CIDRSummary) SetUtilizedCIDRCount(v int64) *CIDRSummary { + s.UtilizedCIDRCount = &v + return s +} + +// The capacity usage summary of the resources used by the ReferenceSets in +// a firewall. +type CapacityUsageSummary struct { + _ struct{} `type:"structure"` + + // Describes the capacity usage of the CIDR blocks used by the IP set references + // in a firewall. + CIDRs *CIDRSummary `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityUsageSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityUsageSummary) GoString() string { + return s.String() +} + +// SetCIDRs sets the CIDRs field's value. +func (s *CapacityUsageSummary) SetCIDRs(v *CIDRSummary) *CapacityUsageSummary { + s.CIDRs = v + return s +} + type CreateFirewallInput struct { _ struct{} `type:"structure"` @@ -6861,6 +6948,11 @@ func (s *FirewallPolicyResponse) SetTags(v []*Tag) *FirewallPolicyResponse { type FirewallStatus struct { _ struct{} `type:"structure"` + // Describes the capacity usage of the resources contained in a firewall's reference + // sets. Network Firewall calclulates the capacity usage by taking an aggregated + // count of all of the resources used by all of the reference sets in a firewall. + CapacityUsageSummary *CapacityUsageSummary `type:"structure"` + // The configuration sync state for the firewall. This summarizes the sync states // reported in the Config settings for all of the Availability Zones where you // have configured the firewall. @@ -6911,6 +7003,12 @@ func (s FirewallStatus) GoString() string { return s.String() } +// SetCapacityUsageSummary sets the CapacityUsageSummary field's value. +func (s *FirewallStatus) SetCapacityUsageSummary(v *CapacityUsageSummary) *FirewallStatus { + s.CapacityUsageSummary = v + return s +} + // SetConfigurationSyncStateSummary sets the ConfigurationSyncStateSummary field's value. func (s *FirewallStatus) SetConfigurationSyncStateSummary(v string) *FirewallStatus { s.ConfigurationSyncStateSummary = &v @@ -7148,6 +7246,98 @@ func (s *IPSet) SetDefinition(v []*string) *IPSet { return s } +// General information about the IP set. +type IPSetMetadata struct { + _ struct{} `type:"structure"` + + // Describes the total number of CIDR blocks currently in use by the IP set + // references in a firewall. To determine how many CIDR blocks are available + // for you to use in a firewall, you can call AvailableCIDRCount. + ResolvedCIDRCount *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IPSetMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IPSetMetadata) GoString() string { + return s.String() +} + +// SetResolvedCIDRCount sets the ResolvedCIDRCount field's value. +func (s *IPSetMetadata) SetResolvedCIDRCount(v int64) *IPSetMetadata { + s.ResolvedCIDRCount = &v + return s +} + +// Configures one or more IP set references for a Suricata-compatible rule group. +// This is used in CreateRuleGroup or UpdateRuleGroup. An IP set reference is +// a rule variable that references a resource that you create and manage in +// another Amazon Web Services service, such as an Amazon VPC prefix list. Network +// Firewall IP set references enable you to dynamically update the contents +// of your rules. When you create, update, or delete the IP set you are referencing +// in your rule, Network Firewall automatically updates the rule's content with +// the changes. For more information about IP set references in Network Firewall, +// see Using IP set references (https://docs.aws.amazon.com/network-firewall/latest/developerguide/rule-groups-ip-set-references) +// in the Network Firewall Developer Guide. +// +// Network Firewall currently supports only Amazon VPC prefix lists (https://docs.aws.amazon.com/vpc/latest/userguide/managed-prefix-lists.html) +// as IP set references. +type IPSetReference struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you are referencing in + // your rule group. + ReferenceArn *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IPSetReference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IPSetReference) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IPSetReference) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IPSetReference"} + if s.ReferenceArn != nil && len(*s.ReferenceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReferenceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReferenceArn sets the ReferenceArn field's value. +func (s *IPSetReference) SetReferenceArn(v string) *IPSetReference { + s.ReferenceArn = &v + return s +} + // Amazon Web Services doesn't currently have enough available capacity to fulfill // your request. Try your request later. type InsufficientCapacityException struct { @@ -8796,6 +8986,58 @@ func (s PutResourcePolicyOutput) GoString() string { return s.String() } +// Contains a set of IP set references. +type ReferenceSets struct { + _ struct{} `type:"structure"` + + // The list of IP set references. + IPSetReferences map[string]*IPSetReference `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReferenceSets) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReferenceSets) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReferenceSets) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReferenceSets"} + if s.IPSetReferences != nil { + for i, v := range s.IPSetReferences { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "IPSetReferences", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIPSetReferences sets the IPSetReferences field's value. +func (s *ReferenceSets) SetIPSetReferences(v map[string]*IPSetReference) *ReferenceSets { + s.IPSetReferences = v + return s +} + // Unable to locate a resource using the parameters that you provided. type ResourceNotFoundException struct { _ struct{} `type:"structure"` @@ -9036,6 +9278,9 @@ func (s *RuleDefinition) SetMatchAttributes(v *MatchAttributes) *RuleDefinition type RuleGroup struct { _ struct{} `type:"structure"` + // The list of a rule group's reference sets. + ReferenceSets *ReferenceSets `type:"structure"` + // Settings that are available for use in the rules in the rule group. You can // only use these for stateful rule groups. RuleVariables *RuleVariables `type:"structure"` @@ -9075,6 +9320,11 @@ func (s *RuleGroup) Validate() error { if s.RulesSource == nil { invalidParams.Add(request.NewErrParamRequired("RulesSource")) } + if s.ReferenceSets != nil { + if err := s.ReferenceSets.Validate(); err != nil { + invalidParams.AddNested("ReferenceSets", err.(request.ErrInvalidParams)) + } + } if s.RuleVariables != nil { if err := s.RuleVariables.Validate(); err != nil { invalidParams.AddNested("RuleVariables", err.(request.ErrInvalidParams)) @@ -9092,6 +9342,12 @@ func (s *RuleGroup) Validate() error { return nil } +// SetReferenceSets sets the ReferenceSets field's value. +func (s *RuleGroup) SetReferenceSets(v *ReferenceSets) *RuleGroup { + s.ReferenceSets = v + return s +} + // SetRuleVariables sets the RuleVariables field's value. func (s *RuleGroup) SetRuleVariables(v *RuleVariables) *RuleGroup { s.RuleVariables = v @@ -12300,6 +12556,9 @@ const ( // ConfigurationSyncStateInSync is a ConfigurationSyncState enum value ConfigurationSyncStateInSync = "IN_SYNC" + + // ConfigurationSyncStateCapacityConstrained is a ConfigurationSyncState enum value + ConfigurationSyncStateCapacityConstrained = "CAPACITY_CONSTRAINED" ) // ConfigurationSyncState_Values returns all elements of the ConfigurationSyncState enum @@ -12307,6 +12566,7 @@ func ConfigurationSyncState_Values() []string { return []string{ ConfigurationSyncStatePending, ConfigurationSyncStateInSync, + ConfigurationSyncStateCapacityConstrained, } } @@ -12416,6 +12676,9 @@ const ( // PerObjectSyncStatusInSync is a PerObjectSyncStatus enum value PerObjectSyncStatusInSync = "IN_SYNC" + + // PerObjectSyncStatusCapacityConstrained is a PerObjectSyncStatus enum value + PerObjectSyncStatusCapacityConstrained = "CAPACITY_CONSTRAINED" ) // PerObjectSyncStatus_Values returns all elements of the PerObjectSyncStatus enum @@ -12423,6 +12686,7 @@ func PerObjectSyncStatus_Values() []string { return []string{ PerObjectSyncStatusPending, PerObjectSyncStatusInSync, + PerObjectSyncStatusCapacityConstrained, } } diff --git a/service/rds/api.go b/service/rds/api.go index fe32dcc7cea..b038c91a2c0 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -619,7 +619,7 @@ func (c *RDS) BacktrackDBClusterRequest(input *BacktrackDBClusterInput) (req *re // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Managing.Backtrack.html) // in the Amazon Aurora User Guide. // -// This action only applies to Aurora MySQL DB clusters. +// This action applies only to Aurora MySQL DB clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -877,50 +877,21 @@ func (c *RDS) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) (r // // You can copy an encrypted DB cluster snapshot from another Amazon Web Services // Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot -// action is the destination Amazon Web Services Region for the encrypted DB -// cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot +// operation is the destination Amazon Web Services Region for the encrypted +// DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot // from another Amazon Web Services Region, you must provide the following values: // // * KmsKeyId - The Amazon Web Services Key Management System (Amazon Web // Services KMS) key identifier for the key to use to encrypt the copy of // the DB cluster snapshot in the destination Amazon Web Services Region. // -// * PreSignedUrl - A URL that contains a Signature Version 4 signed request -// for the CopyDBClusterSnapshot action to be called in the source Amazon -// Web Services Region where the DB cluster snapshot is copied from. The -// pre-signed URL must be a valid request for the CopyDBClusterSnapshot API -// action that can be executed in the source Amazon Web Services Region that -// contains the encrypted DB cluster snapshot to be copied. The pre-signed -// URL request must contain the following parameter values: KmsKeyId - The -// Amazon Web Services KMS key identifier for the KMS key to use to encrypt -// the copy of the DB cluster snapshot in the destination Amazon Web Services -// Region. This is the same identifier for both the CopyDBClusterSnapshot -// action that is called in the destination Amazon Web Services Region, and -// the action contained in the pre-signed URL. DestinationRegion - The name -// of the Amazon Web Services Region that the DB cluster snapshot is to be -// created in. SourceDBClusterSnapshotIdentifier - The DB cluster snapshot -// identifier for the encrypted DB cluster snapshot to be copied. This identifier -// must be in the Amazon Resource Name (ARN) format for the source Amazon -// Web Services Region. For example, if you are copying an encrypted DB cluster -// snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier -// looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115. -// To learn how to generate a Signature Version 4 signed request, see Authenticating -// Requests: Using Query Parameters (Amazon Web Services Signature Version -// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) -// and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). -// If you are using an Amazon Web Services SDK tool or the CLI, you can specify -// SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl -// manually. Specifying SourceRegion autogenerates a pre-signed URL that -// is a valid request for the operation that can be executed in the source -// Amazon Web Services Region. -// // * TargetDBClusterSnapshotIdentifier - The identifier for the new copy // of the DB cluster snapshot in the destination Amazon Web Services Region. // // * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier // for the encrypted DB cluster snapshot to be copied. This identifier must // be in the ARN format for the source Amazon Web Services Region and is -// the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed +// the same value as the SourceDBClusterSnapshotIdentifier in the presigned // URL. // // To cancel the copy operation once it is in progress, delete the target DB @@ -1122,7 +1093,7 @@ func (c *RDS) CopyDBSnapshotRequest(input *CopyDBSnapshotInput) (req *request.Re // // You can copy a snapshot from one Amazon Web Services Region to another. In // that case, the Amazon Web Services Region where you call the CopyDBSnapshot -// action is the destination Amazon Web Services Region for the DB snapshot +// operation is the destination Amazon Web Services Region for the DB snapshot // copy. // // This command doesn't apply to RDS Custom. @@ -1435,9 +1406,7 @@ func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request. // // You can use the ReplicationSourceIdentifier parameter to create an Amazon // Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL -// or PostgreSQL DB instance. For cross-Region replication where the DB cluster -// identified by ReplicationSourceIdentifier is encrypted, also specify the -// PreSignedUrl parameter. +// or PostgreSQL DB instance. // // For more information on Amazon Aurora, see What is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. @@ -1587,7 +1556,7 @@ func (c *RDS) CreateDBClusterEndpointRequest(input *CreateDBClusterEndpointInput // Creates a new custom endpoint and associates it with an Amazon Aurora DB // cluster. // -// This action only applies to Aurora DB clusters. +// This action applies only to Aurora DB clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1709,8 +1678,8 @@ func (c *RDS) CreateDBClusterParameterGroupRequest(input *CreateDBClusterParamet // character set for the default database defined by the character_set_database // parameter. You can use the Parameter Groups option of the Amazon RDS console // (https://console.aws.amazon.com/rds/) or the DescribeDBClusterParameters -// action to verify that your DB cluster parameter group has been created or -// modified. +// operation to verify that your DB cluster parameter group has been created +// or modified. // // For more information on Amazon Aurora, see What is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. @@ -1900,6 +1869,18 @@ func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *reques // // Creates a new DB instance. // +// The new DB instance can be an RDS DB instance, or it can be a DB instance +// in an Aurora DB cluster. For an Aurora DB cluster, you can call this operation +// multiple times to add more than one DB instance to the cluster. +// +// For more information about creating an RDS DB instance, see Creating an Amazon +// RDS DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CreateDBInstance.html) +// in the Amazon RDS User Guide. +// +// For more information about creating a DB instance in an Aurora DB cluster, +// see Creating an Amazon Aurora DB cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.CreateInstance.html) +// in the Amazon Aurora User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2049,7 +2030,7 @@ func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadRepl // with Read Replicas (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html) // in the Amazon RDS User Guide. // -// Amazon Aurora doesn't support this action. Call the CreateDBInstance action +// Amazon Aurora doesn't support this operation. Call the CreateDBInstance operation // to create a DB instance for an Aurora DB cluster. // // All read replica DB instances are created with backups disabled. All other @@ -2767,10 +2748,11 @@ func (c *RDS) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput // CreateEventSubscription API operation for Amazon Relational Database Service. // -// Creates an RDS event notification subscription. This action requires a topic -// Amazon Resource Name (ARN) created by either the RDS console, the SNS console, -// or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon -// SNS and subscribe to the topic. The ARN is displayed in the SNS console. +// Creates an RDS event notification subscription. This operation requires a +// topic Amazon Resource Name (ARN) created by either the RDS console, the SNS +// console, or the SNS API. To obtain an ARN with SNS, you must create a topic +// in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS +// console. // // You can specify the type of source (SourceType) that you want to be notified // of and provide a list of RDS sources (SourceIds) that triggers the events. @@ -2896,7 +2878,7 @@ func (c *RDS) CreateGlobalClusterRequest(input *CreateGlobalClusterInput) (req * // Aurora cluster during the create operation, and this cluster becomes the // primary cluster of the global database. // -// This action only applies to Aurora DB clusters. +// This action applies only to Aurora DB clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8141,6 +8123,11 @@ func (c *RDS) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Re // DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, // or RDS Proxy can be obtained by providing the name as a parameter. // +// For more information on working with events, see Monitoring Amazon RDS events +// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/working-with-events.html) +// in the Amazon RDS User Guide and Monitoring Amazon Aurora events (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/working-with-events.html) +// in the Amazon Aurora User Guide. +// // By default, RDS returns events that were generated in the past hour. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9738,7 +9725,7 @@ func (c *RDS) FailoverDBClusterRequest(input *FailoverDBClusterInput) (req *requ // // An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, // if one exists, when the primary DB instance fails. A Multi-AZ DB cluster -// automatically fails over to a readbable standby DB instance when the primary +// automatically fails over to a readable standby DB instance when the primary // DB instance fails. // // To simulate a failure of a primary instance for testing, you can force a @@ -10577,7 +10564,7 @@ func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParamet // when creating the default database for a DB cluster, such as the character // set for the default database defined by the character_set_database parameter. // You can use the Parameter Groups option of the Amazon RDS console (https://console.aws.amazon.com/rds/) -// or the DescribeDBClusterParameters action to verify that your DB cluster +// or the DescribeDBClusterParameters operation to verify that your DB cluster // parameter group has been created or modified. // // If the modified DB cluster parameter group is used by an Aurora Serverless @@ -10697,7 +10684,7 @@ func (c *RDS) ModifyDBClusterSnapshotAttributeRequest(input *ModifyDBClusterSnap // // To view which Amazon Web Services accounts have access to copy or restore // a manual DB cluster snapshot, or whether a manual DB cluster snapshot is -// public or private, use the DescribeDBClusterSnapshotAttributes API action. +// public or private, use the DescribeDBClusterSnapshotAttributes API operation. // The accounts are returned as values for the restore attribute. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11394,7 +11381,7 @@ func (c *RDS) ModifyDBSnapshotAttributeRequest(input *ModifyDBSnapshotAttributeI // // To view which Amazon Web Services accounts have access to copy or restore // a manual DB snapshot, or whether a manual DB snapshot public or private, -// use the DescribeDBSnapshotAttributes API action. The accounts are returned +// use the DescribeDBSnapshotAttributes API operation. The accounts are returned // as values for the restore attribute. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -12228,6 +12215,9 @@ func (c *RDS) RebootDBInstanceRequest(input *RebootDBInstanceInput) (req *reques // // This command doesn't apply to RDS Custom. // +// If your DB instance is part of a Multi-AZ DB cluster, you can reboot the +// DB cluster with the RebootDBCluster operation. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -14913,7 +14903,7 @@ func (c *RDS) StopDBInstanceAutomatedBackupsReplicationRequest(input *StopDBInst // // Stops automated backup replication for a DB instance. // -// This command doesn't apply to RDS Custom. +// This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL. // // For more information, see Replicating Automated Backups to Another Amazon // Web Services Region (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReplicateBackups.html) @@ -16853,23 +16843,27 @@ type CopyDBClusterSnapshotInput struct { // KmsKeyId parameter, an error is returned. KmsKeyId *string `type:"string"` - // The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot - // API action in the Amazon Web Services Region that contains the source DB - // cluster snapshot to copy. The PreSignedUrl parameter must be used when copying - // an encrypted DB cluster snapshot from another Amazon Web Services Region. - // Don't specify PreSignedUrl when you are copying an encrypted DB cluster snapshot - // in the same Amazon Web Services Region. + // When you are copying a DB cluster snapshot from one Amazon Web Services GovCloud + // (US) Region to another, the URL that contains a Signature Version 4 signed + // request for the CopyDBClusterSnapshot API operation in the Amazon Web Services + // Region that contains the source DB cluster snapshot to copy. Use the PreSignedUrl + // parameter when copying an encrypted DB cluster snapshot from another Amazon + // Web Services Region. Don't specify PreSignedUrl when copying an encrypted + // DB cluster snapshot in the same Amazon Web Services Region. + // + // This setting applies only to Amazon Web Services GovCloud (US) Regions. It's + // ignored in other Amazon Web Services Regions. // - // The pre-signed URL must be a valid request for the CopyDBClusterSnapshot - // API action that can be executed in the source Amazon Web Services Region - // that contains the encrypted DB cluster snapshot to be copied. The pre-signed - // URL request must contain the following parameter values: + // The presigned URL must be a valid request for the CopyDBClusterSnapshot API + // operation that can run in the source Amazon Web Services Region that contains + // the encrypted DB cluster snapshot to copy. The presigned URL request must + // contain the following parameter values: // - // * KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key - // to use to encrypt the copy of the DB cluster snapshot in the destination - // Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot - // action that is called in the destination Amazon Web Services Region, and - // the action contained in the pre-signed URL. + // * KmsKeyId - The KMS key identifier for the KMS key to use to encrypt + // the copy of the DB cluster snapshot in the destination Amazon Web Services + // Region. This is the same identifier for both the CopyDBClusterSnapshot + // operation that is called in the destination Amazon Web Services Region, + // and the operation contained in the presigned URL. // // * DestinationRegion - The name of the Amazon Web Services Region that // the DB cluster snapshot is to be created in. @@ -16888,9 +16882,9 @@ type CopyDBClusterSnapshotInput struct { // // If you are using an Amazon Web Services SDK tool or the CLI, you can specify // SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl - // manually. Specifying SourceRegion autogenerates a pre-signed URL that is - // a valid request for the operation that can be executed in the source Amazon - // Web Services Region. + // manually. Specifying SourceRegion autogenerates a presigned URL that is a + // valid request for the operation that can run in the source Amazon Web Services + // Region. PreSignedUrl *string `type:"string"` // The identifier of the DB cluster snapshot to copy. This parameter isn't case-sensitive. @@ -17236,36 +17230,40 @@ type CopyDBSnapshotInput struct { // in the Amazon RDS User Guide. OptionGroupName *string `type:"string"` - // The URL that contains a Signature Version 4 signed request for the CopyDBSnapshot - // API action in the source Amazon Web Services Region that contains the source - // DB snapshot to copy. + // When you are copying a snapshot from one Amazon Web Services GovCloud (US) + // Region to another, the URL that contains a Signature Version 4 signed request + // for the CopyDBSnapshot API operation in the source Amazon Web Services Region + // that contains the source DB snapshot to copy. + // + // This setting applies only to Amazon Web Services GovCloud (US) Regions. It's + // ignored in other Amazon Web Services Regions. // // You must specify this parameter when you copy an encrypted DB snapshot from // another Amazon Web Services Region by using the Amazon RDS API. Don't specify // PreSignedUrl when you are copying an encrypted DB snapshot in the same Amazon // Web Services Region. // - // The presigned URL must be a valid request for the CopyDBSnapshot API action - // that can be executed in the source Amazon Web Services Region that contains - // the encrypted DB snapshot to be copied. The presigned URL request must contain - // the following parameter values: + // The presigned URL must be a valid request for the CopyDBClusterSnapshot API + // operation that can run in the source Amazon Web Services Region that contains + // the encrypted DB cluster snapshot to copy. The presigned URL request must + // contain the following parameter values: // // * DestinationRegion - The Amazon Web Services Region that the encrypted // DB snapshot is copied to. This Amazon Web Services Region is the same - // one where the CopyDBSnapshot action is called that contains this presigned + // one where the CopyDBSnapshot operation is called that contains this presigned // URL. For example, if you copy an encrypted DB snapshot from the us-west-2 // Amazon Web Services Region to the us-east-1 Amazon Web Services Region, - // then you call the CopyDBSnapshot action in the us-east-1 Amazon Web Services - // Region and provide a presigned URL that contains a call to the CopyDBSnapshot - // action in the us-west-2 Amazon Web Services Region. For this example, - // the DestinationRegion in the presigned URL must be set to the us-east-1 - // Amazon Web Services Region. - // - // * KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key - // to use to encrypt the copy of the DB snapshot in the destination Amazon - // Web Services Region. This is the same identifier for both the CopyDBSnapshot - // action that is called in the destination Amazon Web Services Region, and - // the action contained in the presigned URL. + // then you call the CopyDBSnapshot operation in the us-east-1 Amazon Web + // Services Region and provide a presigned URL that contains a call to the + // CopyDBSnapshot operation in the us-west-2 Amazon Web Services Region. + // For this example, the DestinationRegion in the presigned URL must be set + // to the us-east-1 Amazon Web Services Region. + // + // * KmsKeyId - The KMS key identifier for the KMS key to use to encrypt + // the copy of the DB snapshot in the destination Amazon Web Services Region. + // This is the same identifier for both the CopyDBSnapshot operation that + // is called in the destination Amazon Web Services Region, and the operation + // contained in the presigned URL. // // * SourceDBSnapshotIdentifier - The DB snapshot identifier for the encrypted // snapshot to be copied. This identifier must be in the Amazon Resource @@ -17281,9 +17279,9 @@ type CopyDBSnapshotInput struct { // // If you are using an Amazon Web Services SDK tool or the CLI, you can specify // SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl - // manually. Specifying SourceRegion autogenerates a pre-signed URL that is - // a valid request for the operation that can be executed in the source Amazon - // Web Services Region. + // manually. Specifying SourceRegion autogenerates a presigned URL that is a + // valid request for the operation that can run in the source Amazon Web Services + // Region. PreSignedUrl *string `type:"string"` // The identifier for the source DB snapshot. @@ -17299,8 +17297,7 @@ type CopyDBSnapshotInput struct { // be the Amazon Resource Name (ARN) of the shared DB snapshot. // // If you are copying an encrypted snapshot this parameter must be in the ARN - // format for the source Amazon Web Services Region, and must match the SourceDBSnapshotIdentifier - // in the PreSignedUrl parameter. + // format for the source Amazon Web Services Region. // // Constraints: // @@ -18539,7 +18536,7 @@ type CreateDBClusterInput struct { // isn't enabled. // // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) - // in the Amazon Aurora User Guide.. + // in the Amazon Aurora User Guide. // // Valid for: Aurora DB clusters only EnableIAMDatabaseAuthentication *bool `type:"boolean"` @@ -18584,6 +18581,8 @@ type CreateDBClusterInput struct { // The multimaster engine mode only applies for DB clusters created with Aurora // MySQL version 5.6.10a. // + // The serverless engine mode only applies for Aurora Serverless v1 DB clusters. + // // For Aurora PostgreSQL, the global engine mode isn't required, and both the // parallelquery and the multimaster engine modes currently aren't supported. // @@ -18592,6 +18591,8 @@ type CreateDBClusterInput struct { // // * Limitations of Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) // + // * Requirements for Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) + // // * Limitations of Parallel Query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) // // * Limitations of Aurora Global Databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) @@ -18765,8 +18766,27 @@ type CreateDBClusterInput struct { // Valid for: Multi-AZ DB clusters only PerformanceInsightsKMSKeyId *string `type:"string"` - // The amount of time, in days, to retain Performance Insights data. Valid values - // are 7 or 731 (2 years). + // The number of days to retain Performance Insights data. The default is 7 + // days. The following values are valid: + // + // * 7 + // + // * month * 31, where month is a number of months from 1-23 + // + // * 731 + // + // For example, the following values are valid: + // + // * 93 (3 months * 31) + // + // * 341 (11 months * 31) + // + // * 589 (19 months * 31) + // + // * 731 + // + // If you specify a retention period such as 94, which isn't a valid value, + // RDS issues an error. // // Valid for: Multi-AZ DB clusters only PerformanceInsightsRetentionPeriod *int64 `type:"integer"` @@ -18788,22 +18808,24 @@ type CreateDBClusterInput struct { // Valid for: Aurora DB clusters and Multi-AZ DB clusters Port *int64 `type:"integer"` - // A URL that contains a Signature Version 4 signed request for the CreateDBCluster - // action to be called in the source Amazon Web Services Region where the DB - // cluster is replicated from. Specify PreSignedUrl only when you are performing - // cross-Region replication from an encrypted DB cluster. + // When you are replicating a DB cluster from one Amazon Web Services GovCloud + // (US) Region to another, an URL that contains a Signature Version 4 signed + // request for the CreateDBCluster operation to be called in the source Amazon + // Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl + // only when you are performing cross-Region replication from an encrypted DB + // cluster. // - // The pre-signed URL must be a valid request for the CreateDBCluster API action - // that can be executed in the source Amazon Web Services Region that contains - // the encrypted DB cluster to be copied. + // The presigned URL must be a valid request for the CreateDBCluster API operation + // that can run in the source Amazon Web Services Region that contains the encrypted + // DB cluster to copy. // - // The pre-signed URL request must contain the following parameter values: + // The presigned URL request must contain the following parameter values: // - // * KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key - // to use to encrypt the copy of the DB cluster in the destination Amazon - // Web Services Region. This should refer to the same KMS key for both the - // CreateDBCluster action that is called in the destination Amazon Web Services - // Region, and the action contained in the pre-signed URL. + // * KmsKeyId - The KMS key identifier for the KMS key to use to encrypt + // the copy of the DB cluster in the destination Amazon Web Services Region. + // This should refer to the same KMS key for both the CreateDBCluster operation + // that is called in the destination Amazon Web Services Region, and the + // operation contained in the presigned URL. // // * DestinationRegion - The name of the Amazon Web Services Region that // Aurora read replica will be created in. @@ -18822,9 +18844,9 @@ type CreateDBClusterInput struct { // // If you are using an Amazon Web Services SDK tool or the CLI, you can specify // SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl - // manually. Specifying SourceRegion autogenerates a pre-signed URL that is - // a valid request for the operation that can be executed in the source Amazon - // Web Services Region. + // manually. Specifying SourceRegion autogenerates a presigned URL that is a + // valid request for the operation that can run in the source Amazon Web Services + // Region. // // Valid for: Aurora DB clusters only PreSignedUrl *string `type:"string"` @@ -19734,7 +19756,7 @@ type CreateDBInstanceInput struct { // // * Can't be set to 0 if the DB instance is a source to read replicas // - // * Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance + // * Can't be set to 0 for an RDS Custom for Oracle DB instance BackupRetentionPeriod *int64 `type:"integer"` // Specifies where automated backups and manual snapshots are stored. @@ -19791,11 +19813,12 @@ type CreateDBInstanceInput struct { // This setting doesn't apply to RDS Custom. DBClusterIdentifier *string `type:"string"` - // The compute and memory capacity of the DB instance, for example db.m4.large. + // The compute and memory capacity of the DB instance, for example db.m5.large. // Not all DB instance classes are available in all Amazon Web Services Regions, // or for all database engines. For the full list of DB instance classes, and - // availability for your engine, see DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) - // in the Amazon RDS User Guide. + // availability for your engine, see DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) + // in the Amazon RDS User Guide or Aurora DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) + // in the Amazon Aurora User Guide. // // DBInstanceClass is a required field DBInstanceClass *string `type:"string" required:"true"` @@ -19942,7 +19965,8 @@ type CreateDBInstanceInput struct { // A list of DB security groups to associate with this DB instance. // - // Default: The default DB security group for the database engine. + // This setting applies to the legacy EC2-Classic platform, which is no longer + // used to create new DB instances. Use the VpcSecurityGroupIds setting instead. DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` // A DB subnet group to associate with this DB instance. @@ -19974,12 +19998,20 @@ type CreateDBInstanceInput struct { // in the Amazon RDS User Guide. // // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. The domain is managed by the DB cluster. Domain *string `type:"string"` // Specify the name of the IAM role to be used when making API calls to the // Directory Service. // // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. The domain is managed by the DB cluster. DomainIAMRoleName *string `type:"string"` // The list of log types that need to be enabled for exporting to CloudWatch @@ -20036,13 +20068,16 @@ type CreateDBInstanceInput struct { // and Access Management (IAM) accounts to database accounts. By default, mapping // isn't enabled. // - // This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora, mapping - // Amazon Web Services IAM accounts to database accounts is managed by the DB - // cluster. - // // For more information, see IAM Database Authentication for MySQL and PostgreSQL // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) // in the Amazon RDS User Guide. + // + // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. Mapping Amazon Web Services IAM accounts to database accounts + // is managed by the DB cluster. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // A value that indicates whether to enable Performance Insights for the DB @@ -20099,7 +20134,7 @@ type CreateDBInstanceInput struct { // The version number of the database engine to use. // - // For a list of valid engine versions, use the DescribeDBEngineVersions action. + // For a list of valid engine versions, use the DescribeDBEngineVersions operation. // // The following are the database engines and links to information about the // major and minor versions that are available with Amazon RDS. Not every database @@ -20158,6 +20193,10 @@ type CreateDBInstanceInput struct { // be a multiple between .5 and 50 of the storage amount for the DB instance. // For SQL Server DB instances, must be a multiple between 1 and 50 of the storage // amount for the DB instance. + // + // Amazon Aurora + // + // Not applicable. Storage is managed by the DB cluster. Iops *int64 `type:"integer"` // The Amazon Web Services KMS key identifier for an encrypted DB instance. @@ -20189,6 +20228,10 @@ type CreateDBInstanceInput struct { // Valid values: license-included | bring-your-own-license | general-public-license // // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. LicenseModel *string `type:"string"` // The password for the master user. The password can include any printable @@ -20247,6 +20290,10 @@ type CreateDBInstanceInput struct { // in the Amazon RDS User Guide. // // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. Storage is managed by the DB cluster. MaxAllocatedStorage *int64 `type:"integer"` // The interval, in seconds, between points when Enhanced Monitoring metrics @@ -20278,6 +20325,11 @@ type CreateDBInstanceInput struct { // deployment. // // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. DB instance Availability Zones (AZs) are managed by the DB + // cluster. MultiAZ *bool `type:"boolean"` // The name of the NCHAR character set for the Oracle DB instance. @@ -20309,6 +20361,10 @@ type CreateDBInstanceInput struct { // from a DB instance after it is associated with a DB instance. // // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. OptionGroupName *string `type:"string"` // The Amazon Web Services KMS key identifier for encryption of Performance @@ -20325,8 +20381,27 @@ type CreateDBInstanceInput struct { // This setting doesn't apply to RDS Custom. PerformanceInsightsKMSKeyId *string `type:"string"` - // The amount of time, in days, to retain Performance Insights data. Valid values - // are 7 or 731 (2 years). + // The number of days to retain Performance Insights data. The default is 7 + // days. The following values are valid: + // + // * 7 + // + // * month * 31, where month is a number of months from 1-23 + // + // * 731 + // + // For example, the following values are valid: + // + // * 93 (3 months * 31) + // + // * 341 (11 months * 31) + // + // * 589 (19 months * 31) + // + // * 731 + // + // If you specify a retention period such as 94, which isn't a valid value, + // RDS issues an error. // // This setting doesn't apply to RDS Custom. PerformanceInsightsRetentionPeriod *int64 `type:"integer"` @@ -20421,6 +20496,10 @@ type CreateDBInstanceInput struct { // class of the DB instance. // // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` // A value that specifies the order in which an Aurora Replica is promoted to @@ -20487,6 +20566,10 @@ type CreateDBInstanceInput struct { // If you specify io1, you must also include a value for the Iops parameter. // // Default: io1 if the Iops parameter is specified, otherwise gp2 + // + // Amazon Aurora + // + // Not applicable. Storage is managed by the DB cluster. StorageType *string `type:"string"` // Tags to assign to the DB instance. @@ -20495,6 +20578,10 @@ type CreateDBInstanceInput struct { // The ARN from the key store with which to associate the instance for TDE encryption. // // This setting doesn't apply to RDS Custom. + // + // Amazon Aurora + // + // Not applicable. TdeCredentialArn *string `type:"string"` // The password for the given ARN from the key store in order to access the @@ -20961,8 +21048,8 @@ type CreateDBInstanceReadReplicaInput struct { // or the default DBParameterGroup for the specified DB engine for a cross-Region // read replica. // - // Specifying a parameter group for this operation is only supported for Oracle - // DB instances. It isn't supported for RDS Custom. + // Specifying a parameter group for this operation is only supported for MySQL + // and Oracle DB instances. It isn't supported for RDS Custom. // // Constraints: // @@ -21157,8 +21244,27 @@ type CreateDBInstanceReadReplicaInput struct { // This setting doesn't apply to RDS Custom. PerformanceInsightsKMSKeyId *string `type:"string"` - // The amount of time, in days, to retain Performance Insights data. Valid values - // are 7 or 731 (2 years). + // The number of days to retain Performance Insights data. The default is 7 + // days. The following values are valid: + // + // * 7 + // + // * month * 31, where month is a number of months from 1-23 + // + // * 731 + // + // For example, the following values are valid: + // + // * 93 (3 months * 31) + // + // * 341 (11 months * 31) + // + // * 589 (19 months * 31) + // + // * 731 + // + // If you specify a retention period such as 94, which isn't a valid value, + // RDS issues an error. // // This setting doesn't apply to RDS Custom. PerformanceInsightsRetentionPeriod *int64 `type:"integer"` @@ -21170,9 +21276,15 @@ type CreateDBInstanceReadReplicaInput struct { // Valid Values: 1150-65535 Port *int64 `type:"integer"` - // The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica - // API action in the source Amazon Web Services Region that contains the source - // DB instance. + // When you are creating a read replica from one Amazon Web Services GovCloud + // (US) Region to another or from one China Amazon Web Services Region to another, + // the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica + // API operation in the source Amazon Web Services Region that contains the + // source DB instance. + // + // This setting applies only to Amazon Web Services GovCloud (US) Regions and + // China Amazon Web Services Regions. It's ignored in other Amazon Web Services + // Regions. // // You must specify this parameter when you create an encrypted read replica // from another Amazon Web Services Region by using the Amazon RDS API. Don't @@ -21180,27 +21292,27 @@ type CreateDBInstanceReadReplicaInput struct { // same Amazon Web Services Region. // // The presigned URL must be a valid request for the CreateDBInstanceReadReplica - // API action that can be executed in the source Amazon Web Services Region - // that contains the encrypted source DB instance. The presigned URL request - // must contain the following parameter values: + // API operation that can run in the source Amazon Web Services Region that + // contains the encrypted source DB instance. The presigned URL request must + // contain the following parameter values: // // * DestinationRegion - The Amazon Web Services Region that the encrypted // read replica is created in. This Amazon Web Services Region is the same - // one where the CreateDBInstanceReadReplica action is called that contains + // one where the CreateDBInstanceReadReplica operation is called that contains // this presigned URL. For example, if you create an encrypted DB instance // in the us-west-1 Amazon Web Services Region, from a source DB instance // in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica - // action in the us-east-1 Amazon Web Services Region and provide a presigned - // URL that contains a call to the CreateDBInstanceReadReplica action in - // the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion + // operation in the us-east-1 Amazon Web Services Region and provide a presigned + // URL that contains a call to the CreateDBInstanceReadReplica operation + // in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion // in the presigned URL must be set to the us-east-1 Amazon Web Services // Region. // - // * KmsKeyId - The Amazon Web Services KMS key identifier for the key to - // use to encrypt the read replica in the destination Amazon Web Services - // Region. This is the same identifier for both the CreateDBInstanceReadReplica - // action that is called in the destination Amazon Web Services Region, and - // the action contained in the presigned URL. + // * KmsKeyId - The KMS key identifier for the key to use to encrypt the + // read replica in the destination Amazon Web Services Region. This is the + // same identifier for both the CreateDBInstanceReadReplica operation that + // is called in the destination Amazon Web Services Region, and the operation + // contained in the presigned URL. // // * SourceDBInstanceIdentifier - The DB instance identifier for the encrypted // DB instance to be replicated. This identifier must be in the Amazon Resource @@ -21217,11 +21329,11 @@ type CreateDBInstanceReadReplicaInput struct { // If you are using an Amazon Web Services SDK tool or the CLI, you can specify // SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl // manually. Specifying SourceRegion autogenerates a presigned URL that is a - // valid request for the operation that can be executed in the source Amazon - // Web Services Region. + // valid request for the operation that can run in the source Amazon Web Services + // Region. // - // SourceRegion isn't supported for SQL Server, because SQL Server on Amazon - // RDS doesn't support cross-Region read replicas. + // SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server + // doesn't support cross-Region read replicas. // // This setting doesn't apply to RDS Custom. PreSignedUrl *string `type:"string"` @@ -21969,8 +22081,9 @@ type CreateDBProxyInput struct { // The kinds of databases that the proxy can connect to. This value determines // which database network protocol the proxy recognizes when it interprets network - // traffic to and from the database. The engine family applies to MySQL and - // PostgreSQL for both RDS and Aurora. + // traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and + // RDS for MySQL databases, specify MYSQL. For Aurora PostgreSQL and RDS for + // PostgreSQL databases, specify POSTGRESQL. // // EngineFamily is a required field EngineFamily *string `type:"string" required:"true" enum:"EngineFamily"` @@ -22685,7 +22798,7 @@ func (s *CreateEventSubscriptionOutput) SetEventSubscription(v *EventSubscriptio type CreateGlobalClusterInput struct { _ struct{} `type:"structure"` - // The name for your database of up to 64 alpha-numeric characters. If you do + // The name for your database of up to 64 alphanumeric characters. If you do // not provide a name, Amazon Aurora will not create a database in the global // database cluster you are creating. DatabaseName *string `type:"string"` @@ -23227,8 +23340,24 @@ type DBCluster struct { // This setting is only for non-Aurora Multi-AZ DB clusters. PerformanceInsightsKMSKeyId *string `type:"string"` - // The amount of time, in days, to retain Performance Insights data. Valid values - // are 7 or 731 (2 years). + // The number of days to retain Performance Insights data. The default is 7 + // days. The following values are valid: + // + // * 7 + // + // * month * 31, where month is a number of months from 1-23 + // + // * 731 + // + // For example, the following values are valid: + // + // * 93 (3 months * 31) + // + // * 341 (11 months * 31) + // + // * 589 (19 months * 31) + // + // * 731 // // This setting is only for non-Aurora Multi-AZ DB clusters. PerformanceInsightsRetentionPeriod *int64 `type:"integer"` @@ -25080,8 +25209,24 @@ type DBInstance struct { // ARN, or alias name for the KMS key. PerformanceInsightsKMSKeyId *string `type:"string"` - // The amount of time, in days, to retain Performance Insights data. Valid values - // are 7 or 731 (2 years). + // The number of days to retain Performance Insights data. The default is 7 + // days. The following values are valid: + // + // * 7 + // + // * month * 31, where month is a number of months from 1-23 + // + // * 731 + // + // For example, the following values are valid: + // + // * 93 (3 months * 31) + // + // * 341 (11 months * 31) + // + // * 589 (19 months * 31) + // + // * 731 PerformanceInsightsRetentionPeriod *int64 `type:"integer"` // Specifies the daily time range during which automated backups are created @@ -26285,7 +26430,11 @@ type DBProxy struct { // endpoint value in the connection string for a database client application. Endpoint *string `type:"string"` - // The engine family applies to MySQL and PostgreSQL for both RDS and Aurora. + // The kinds of databases that the proxy can connect to. This value determines + // which database network protocol the proxy recognizes when it interprets network + // traffic to and from the database. MYSQL supports Aurora MySQL, RDS for MariaDB, + // and RDS for MySQL databases. POSTGRESQL supports Aurora PostgreSQL and RDS + // for PostgreSQL databases. EngineFamily *string `type:"string"` // The number of seconds a connection to the proxy can have no activity before @@ -32429,17 +32578,15 @@ type DescribeDBSnapshotsInput struct { _ struct{} `type:"structure"` // The ID of the DB instance to retrieve the list of DB snapshots for. This - // parameter can't be used in conjunction with DBSnapshotIdentifier. This parameter - // isn't case-sensitive. + // parameter isn't case-sensitive. // // Constraints: // // * If supplied, must match the identifier of an existing DBInstance. DBInstanceIdentifier *string `type:"string"` - // A specific DB snapshot identifier to describe. This parameter can't be used - // in conjunction with DBInstanceIdentifier. This value is stored as a lowercase - // string. + // A specific DB snapshot identifier to describe. This value is stored as a + // lowercase string. // // Constraints: // @@ -34273,7 +34420,7 @@ type DescribeOrderableDBInstanceOptionsInput struct { // // Default: 100 // - // Constraints: Minimum 20, maximum 100. + // Constraints: Minimum 20, maximum 10000. MaxRecords *int64 `type:"integer"` // A value that indicates whether to show only VPC or non-VPC offerings. RDS @@ -35617,7 +35764,8 @@ func (s *EngineDefaults) SetParameters(v []*Parameter) *EngineDefaults { return s } -// This data type is used as a response element in the DescribeEvents action. +// This data type is used as a response element in the DescribeEvents (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeEvents.html) +// action. type Event struct { _ struct{} `type:"structure"` @@ -35695,6 +35843,7 @@ func (s *Event) SetSourceType(v string) *Event { } // Contains the results of a successful invocation of the DescribeEventCategories +// (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeEventCategories.html) // operation. type EventCategoriesMap struct { _ struct{} `type:"structure"` @@ -37964,8 +38113,27 @@ type ModifyDBClusterInput struct { // Valid for: Multi-AZ DB clusters only PerformanceInsightsKMSKeyId *string `type:"string"` - // The amount of time, in days, to retain Performance Insights data. Valid values - // are 7 or 731 (2 years). + // The number of days to retain Performance Insights data. The default is 7 + // days. The following values are valid: + // + // * 7 + // + // * month * 31, where month is a number of months from 1-23 + // + // * 731 + // + // For example, the following values are valid: + // + // * 93 (3 months * 31) + // + // * 341 (11 months * 31) + // + // * 589 (19 months * 31) + // + // * 731 + // + // If you specify a retention period such as 94, which isn't a valid value, + // RDS issues an error. // // Valid for: Multi-AZ DB clusters only PerformanceInsightsRetentionPeriod *int64 `type:"integer"` @@ -38416,7 +38584,7 @@ type ModifyDBClusterSnapshotAttributeInput struct { // restore a manual DB cluster snapshot, set this value to restore. // // To view the list of attributes available to modify, use the DescribeDBClusterSnapshotAttributes - // API action. + // API operation. // // AttributeName is a required field AttributeName *string `type:"string" required:"true"` @@ -38632,8 +38800,8 @@ type ModifyDBInstanceInput struct { // Constraints: // // * It must be a value from 0 to 35. It can't be set to 0 if the DB instance - // is a source to read replicas. It can't be set to 0 or 35 for an RDS Custom - // for Oracle DB instance. + // is a source to read replicas. It can't be set to 0 for an RDS Custom for + // Oracle DB instance. // // * It can be specified for a MySQL read replica only if the source is running // MySQL 5.6 or later. @@ -38690,11 +38858,12 @@ type ModifyDBInstanceInput struct { // For more information, see ModifyDBCluster. CopyTagsToSnapshot *bool `type:"boolean"` - // The new compute and memory capacity of the DB instance, for example db.m4.large. + // The new compute and memory capacity of the DB instance, for example db.m5.large. // Not all DB instance classes are available in all Amazon Web Services Regions, // or for all database engines. For the full list of DB instance classes, and - // availability for your engine, see DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) - // in the Amazon RDS User Guide. + // availability for your engine, see DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) + // in the Amazon RDS User Guide or Aurora DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) + // in the Amazon Aurora User Guide. // // If you modify the DB instance class, an outage occurs during the change. // The change is applied during the next maintenance window, unless ApplyImmediately @@ -38865,7 +39034,7 @@ type ModifyDBInstanceInput struct { // instance. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) - // in the Amazon RDS User Guide.. + // in the Amazon RDS User Guide. // // This setting doesn't apply to RDS Custom. EnablePerformanceInsights *bool `type:"boolean"` @@ -38960,7 +39129,7 @@ type ModifyDBInstanceInput struct { // // Constraints: Must contain from 8 to 128 characters. // - // Amazon RDS API actions never return the password, so this action provides + // Amazon RDS API operations never return the password, so this action provides // a way to regain access to a primary instance user if the password is lost. // This includes restoring privileges that might have been accidentally revoked. MasterUserPassword *string `type:"string"` @@ -39075,8 +39244,27 @@ type ModifyDBInstanceInput struct { // This setting doesn't apply to RDS Custom. PerformanceInsightsKMSKeyId *string `type:"string"` - // The amount of time, in days, to retain Performance Insights data. Valid values - // are 7 or 731 (2 years). + // The number of days to retain Performance Insights data. The default is 7 + // days. The following values are valid: + // + // * 7 + // + // * month * 31, where month is a number of months from 1-23 + // + // * 731 + // + // For example, the following values are valid: + // + // * 93 (3 months * 31) + // + // * 341 (11 months * 31) + // + // * 589 (19 months * 31) + // + // * 731 + // + // If you specify a retention period such as 94, which isn't a valid value, + // RDS issues an error. // // This setting doesn't apply to RDS Custom. PerformanceInsightsRetentionPeriod *int64 `type:"integer"` @@ -39952,7 +40140,7 @@ type ModifyDBProxyTargetGroupInput struct { // for the target group. ConnectionPoolConfig *ConnectionPoolConfiguration `type:"structure"` - // The name of the new proxy to which to assign the target group. + // The name of the proxy. // // DBProxyName is a required field DBProxyName *string `type:"string" required:"true"` @@ -39962,7 +40150,7 @@ type ModifyDBProxyTargetGroupInput struct { // end with a hyphen or contain two consecutive hyphens. NewName *string `type:"string"` - // The name of the new target group to assign to the proxy. + // The name of the target group to modify. // // TargetGroupName is a required field TargetGroupName *string `type:"string" required:"true"` @@ -40066,7 +40254,7 @@ type ModifyDBSnapshotAttributeInput struct { // restore a manual DB snapshot, set this value to restore. // // To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes - // API action. + // API operation. // // AttributeName is a required field AttributeName *string `type:"string" required:"true"` @@ -44263,8 +44451,8 @@ type RestoreDBClusterFromS3Input struct { // The name of the database engine to be used for this DB cluster. // - // Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for - // MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql + // Valid Values: aurora (for MySQL 5.6-compatible Aurora) and aurora-mysql (for + // MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) // // Engine is a required field Engine *string `type:"string" required:"true"` @@ -44281,19 +44469,9 @@ type RestoreDBClusterFromS3Input struct { // // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" // - // To list all of the available engine versions for aurora-postgresql, use the - // following command: - // - // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" - // // Aurora MySQL // - // Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5, - // 8.0.mysql_aurora.3.01.0 - // - // Aurora PostgreSQL - // - // Example: 9.6.3, 10.7 + // Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.mysql_aurora.2.07.1, 8.0.mysql_aurora.3.02.0 EngineVersion *string `type:"string"` // The Amazon Web Services KMS key identifier for an encrypted DB cluster. @@ -44859,7 +45037,7 @@ type RestoreDBClusterFromSnapshotInput struct { // // For more information about exporting CloudWatch Logs for Amazon RDS, see // Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) - // in the Amazon RDS User Guide.. + // in the Amazon RDS User Guide. // // For more information about exporting CloudWatch Logs for Amazon Aurora, see // Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) @@ -45447,7 +45625,7 @@ type RestoreDBClusterToPointInTimeInput struct { // // For more information about exporting CloudWatch Logs for Amazon RDS, see // Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) - // in the Amazon RDS User Guide.. + // in the Amazon RDS User Guide. // // For more information about exporting CloudWatch Logs for Amazon Aurora, see // Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) @@ -46614,7 +46792,7 @@ type RestoreDBInstanceFromS3Input struct { // instance. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) - // in the Amazon RDS User Guide.. + // in the Amazon RDS User Guide. EnablePerformanceInsights *bool `type:"boolean"` // The name of the database engine to be used for this instance. @@ -46737,8 +46915,27 @@ type RestoreDBInstanceFromS3Input struct { // KMS key for each Amazon Web Services Region. PerformanceInsightsKMSKeyId *string `type:"string"` - // The amount of time, in days, to retain Performance Insights data. Valid values - // are 7 or 731 (2 years). + // The number of days to retain Performance Insights data. The default is 7 + // days. The following values are valid: + // + // * 7 + // + // * month * 31, where month is a number of months from 1-23 + // + // * 731 + // + // For example, the following values are valid: + // + // * 93 (3 months * 31) + // + // * 341 (11 months * 31) + // + // * 589 (19 months * 31) + // + // * 731 + // + // If you specify a retention period such as 94, which isn't a valid value, + // RDS issues an error. PerformanceInsightsRetentionPeriod *int64 `type:"integer"` // The port number on which the database accepts connections. @@ -48168,7 +48365,7 @@ type ScalingConfigurationInfo struct { // The maximum capacity for an Aurora DB cluster in serverless DB engine mode. MaxCapacity *int64 `type:"integer"` - // The maximum capacity for the Aurora DB cluster in serverless DB engine mode. + // The minimum capacity for an Aurora DB cluster in serverless DB engine mode. MinCapacity *int64 `type:"integer"` // The number of seconds before scaling times out. What happens when an attempted @@ -48692,11 +48889,26 @@ type StartDBInstanceAutomatedBackupsReplicationInput struct { // arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE. KmsKeyId *string `type:"string"` - // A URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication - // action to be called in the Amazon Web Services Region of the source DB instance. + // In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature + // Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication + // operation to call in the Amazon Web Services Region of the source DB instance. // The presigned URL must be a valid request for the StartDBInstanceAutomatedBackupsReplication - // API action that can be executed in the Amazon Web Services Region that contains + // API operation that can run in the Amazon Web Services Region that contains // the source DB instance. + // + // This setting applies only to Amazon Web Services GovCloud (US) Regions. It's + // ignored in other Amazon Web Services Regions. + // + // To learn how to generate a Signature Version 4 signed request, see Authenticating + // Requests: Using Query Parameters (Amazon Web Services Signature Version 4) + // (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + // and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + // + // If you are using an Amazon Web Services SDK tool or the CLI, you can specify + // SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl + // manually. Specifying SourceRegion autogenerates a presigned URL that is a + // valid request for the operation that can run in the source Amazon Web Services + // Region. PreSignedUrl *string `type:"string"` // The Amazon Resource Name (ARN) of the source DB instance for the replicated @@ -50186,7 +50398,9 @@ func (s *ValidStorageOptions) SetSupportsStorageAutoscaling(v bool) *ValidStorag type VpcSecurityGroupMembership struct { _ struct{} `type:"structure"` - // The status of the VPC security group. + // The membership status of the VPC security group. + // + // Currently, the only valid status is active. Status *string `type:"string"` // The name of the VPC security group.