From d7b4743f7055573a30da33b6cd03c4c11673f15d Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Wed, 14 Dec 2022 11:29:00 -0800 Subject: [PATCH] Release v1.44.160 (2022-12-14) (#4663) Release v1.44.160 (2022-12-14) === ### Service Client Updates * `service/ce`: Updates service API and documentation * `service/monitoring`: Updates service API and documentation * Adding support for Metrics Insights Alarms * `service/networkmanager`: Updates service API and documentation * `service/redshift-data`: Updates service API and documentation * `service/sagemaker-metrics`: Updates service documentation --- CHANGELOG.md | 11 + aws/endpoints/defaults.go | 9 + aws/version.go | 2 +- models/apis/ce/2017-10-25/api-2.json | 31 +- models/apis/ce/2017-10-25/docs-2.json | 25 +- models/apis/monitoring/2010-08-01/api-2.json | 12 +- models/apis/monitoring/2010-08-01/docs-2.json | 11 +- .../2010-08-01/endpoint-rule-set-1.json | 11 +- .../apis/networkmanager/2019-07-05/api-2.json | 3 +- .../networkmanager/2019-07-05/docs-2.json | 5 +- .../2019-07-05/endpoint-rule-set-1.json | 861 ++++++++++++++++++ .../2019-07-05/endpoint-tests-1.json | 87 ++ .../apis/redshift-data/2019-12-20/api-2.json | 13 + .../apis/redshift-data/2019-12-20/docs-2.json | 33 +- .../2019-12-20/endpoint-rule-set-1.json | 309 +++++++ .../2019-12-20/endpoint-tests-1.json | 43 + .../sagemaker-metrics/2022-09-30/docs-2.json | 12 +- .../2022-09-30/endpoint-tests-1.json | 86 +- models/endpoints/endpoints.json | 7 + service/cloudwatch/api.go | 40 +- service/costexplorer/api.go | 616 +++++++++---- service/networkmanager/api.go | 13 +- service/redshiftdataapiservice/api.go | 70 +- service/redshiftdataapiservice/doc.go | 2 +- service/sagemakermetrics/api.go | 20 +- 25 files changed, 2041 insertions(+), 291 deletions(-) create mode 100644 models/apis/networkmanager/2019-07-05/endpoint-rule-set-1.json create mode 100644 models/apis/networkmanager/2019-07-05/endpoint-tests-1.json create mode 100644 models/apis/redshift-data/2019-12-20/endpoint-rule-set-1.json create mode 100644 models/apis/redshift-data/2019-12-20/endpoint-tests-1.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ce5e4f8afb..4b58c5ca5e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.44.160 (2022-12-14) +=== + +### Service Client Updates +* `service/ce`: Updates service API and documentation +* `service/monitoring`: Updates service API and documentation + * Adding support for Metrics Insights Alarms +* `service/networkmanager`: Updates service API and documentation +* `service/redshift-data`: Updates service API and documentation +* `service/sagemaker-metrics`: Updates service documentation + Release v1.44.159 (2022-12-13) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index da37660070f..5d70f6e4707 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -14375,6 +14375,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-southeast-4-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, diff --git a/aws/version.go b/aws/version.go index 8604de29650..c048c41afbc 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.159" +const SDKVersion = "1.44.160" diff --git a/models/apis/ce/2017-10-25/api-2.json b/models/apis/ce/2017-10-25/api-2.json index a1b97a4e11f..4c4eee367af 100644 --- a/models/apis/ce/2017-10-25/api-2.json +++ b/models/apis/ce/2017-10-25/api-2.json @@ -607,7 +607,6 @@ "required":[ "MonitorArnList", "Subscribers", - "Threshold", "Frequency", "SubscriptionName" ], @@ -616,9 +615,14 @@ "AccountId":{"shape":"GenericString"}, "MonitorArnList":{"shape":"MonitorArnList"}, "Subscribers":{"shape":"Subscribers"}, - "Threshold":{"shape":"NullableNonNegativeDouble"}, + "Threshold":{ + "shape":"NullableNonNegativeDouble", + "deprecated":true, + "deprecatedMessage":"Threshold has been deprecated in favor of ThresholdExpression" + }, "Frequency":{"shape":"AnomalySubscriptionFrequency"}, - "SubscriptionName":{"shape":"GenericString"} + "SubscriptionName":{"shape":"GenericString"}, + "ThresholdExpression":{"shape":"Expression"} } }, "AnomalySubscriptionFrequency":{ @@ -1136,7 +1140,9 @@ "PAYMENT_OPTION", "AGREEMENT_END_DATE_TIME_AFTER", "AGREEMENT_END_DATE_TIME_BEFORE", - "INVOICING_ENTITY" + "INVOICING_ENTITY", + "ANOMALY_TOTAL_IMPACT_ABSOLUTE", + "ANOMALY_TOTAL_IMPACT_PERCENTAGE" ] }, "DimensionValues":{ @@ -1809,7 +1815,10 @@ "required":["MaxImpact"], "members":{ "MaxImpact":{"shape":"GenericDouble"}, - "TotalImpact":{"shape":"GenericDouble"} + "TotalImpact":{"shape":"GenericDouble"}, + "TotalActualSpend":{"shape":"NullableNonNegativeDouble"}, + "TotalExpectedSpend":{"shape":"NullableNonNegativeDouble"}, + "TotalImpactPercentage":{"shape":"NullableNonNegativeDouble"} } }, "InstanceDetails":{ @@ -1925,7 +1934,8 @@ "ENDS_WITH", "CONTAINS", "CASE_SENSITIVE", - "CASE_INSENSITIVE" + "CASE_INSENSITIVE", + "GREATER_THAN_OR_EQUAL" ] }, "MatchOptions":{ @@ -2801,11 +2811,16 @@ "required":["SubscriptionArn"], "members":{ "SubscriptionArn":{"shape":"GenericString"}, - "Threshold":{"shape":"NullableNonNegativeDouble"}, + "Threshold":{ + "shape":"NullableNonNegativeDouble", + "deprecated":true, + "deprecatedMessage":"Threshold has been deprecated in favor of ThresholdExpression" + }, "Frequency":{"shape":"AnomalySubscriptionFrequency"}, "MonitorArnList":{"shape":"MonitorArnList"}, "Subscribers":{"shape":"Subscribers"}, - "SubscriptionName":{"shape":"GenericString"} + "SubscriptionName":{"shape":"GenericString"}, + "ThresholdExpression":{"shape":"Expression"} } }, "UpdateAnomalySubscriptionResponse":{ diff --git a/models/apis/ce/2017-10-25/docs-2.json b/models/apis/ce/2017-10-25/docs-2.json index 0d4d0152eb9..a4795f7f7df 100644 --- a/models/apis/ce/2017-10-25/docs-2.json +++ b/models/apis/ce/2017-10-25/docs-2.json @@ -3,13 +3,13 @@ "service": "

You can use the Cost Explorer API to programmatically query your cost and usage data. You can query for aggregated data such as total monthly costs or total daily usage. You can also query for granular data. This might include the number of daily write operations for Amazon DynamoDB database tables in your production environment.

Service Endpoint

The Cost Explorer API provides the following endpoint:

For information about the costs that are associated with the Cost Explorer API, see Amazon Web Services Cost Management Pricing.

", "operations": { "CreateAnomalyMonitor": "

Creates a new cost anomaly detection monitor with the requested type and monitor specification.

", - "CreateAnomalySubscription": "

Adds a subscription to a cost anomaly detection monitor. You can use each subscription to define subscribers with email or SNS notifications. Email subscribers can set a dollar threshold and a time frequency for receiving notifications.

", + "CreateAnomalySubscription": "

Adds an alert subscription to a cost anomaly detection monitor. You can use each subscription to define subscribers with email or SNS notifications. Email subscribers can set an absolute or percentage threshold and a time frequency for receiving notifications.

", "CreateCostCategoryDefinition": "

Creates a new Cost Category with the requested name and rules.

", "DeleteAnomalyMonitor": "

Deletes a cost anomaly monitor.

", "DeleteAnomalySubscription": "

Deletes a cost anomaly subscription.

", "DeleteCostCategoryDefinition": "

Deletes a Cost Category. Expenses from this month going forward will no longer be categorized with this Cost Category.

", "DescribeCostCategoryDefinition": "

Returns the name, Amazon Resource Name (ARN), rules, definition, and effective dates of a Cost Category that's defined in the account.

You have the option to use EffectiveOn to return a Cost Category that's active on a specific date. If there's no EffectiveOn specified, you see a Cost Category that's effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

", - "GetAnomalies": "

Retrieves all of the cost anomalies detected on your account during the time period that's specified by the DateInterval object.

", + "GetAnomalies": "

Retrieves all of the cost anomalies detected on your account during the time period that's specified by the DateInterval object. Anomalies are available for up to 90 days.

", "GetAnomalyMonitors": "

Retrieves the cost anomaly monitor definitions for your account. You can filter using a list of cost anomaly monitor Amazon Resource Names (ARNs).

", "GetAnomalySubscriptions": "

Retrieves the cost anomaly subscription objects for your account. You can filter using a list of cost anomaly monitor Amazon Resource Names (ARNs).

", "GetCostAndUsage": "

Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric that you want the request to return. For example, you can specify BlendedCosts or UsageQuantity. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have access to all member accounts.

For information about filter limitations, see Quotas and restrictions in the Billing and Cost Management User Guide.

", @@ -568,7 +568,7 @@ "Dimension": { "base": null, "refs": { - "DimensionValues$Key": "

The names of the metadata types that you can use to filter and group your results. For example, AZ returns a list of Availability Zones. LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in CostCategoryRule.

", + "DimensionValues$Key": "

The names of the metadata types that you can use to filter and group your results. For example, AZ returns a list of Availability Zones.

Not all dimensions are supported in each API. Refer to the documentation for each specific API to see what is supported.

LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in CostCategoryRule.

ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE can only be used in AnomalySubscriptions.

", "GetDimensionValuesRequest$Dimension": "

The name of the dimension. Each Dimension is available for a different Context. For more information, see Context. LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in CostCategoryRule.

" } }, @@ -671,9 +671,10 @@ } }, "Expression": { - "base": "

Use Expression to filter by cost or by usage. There are two patterns:

For the GetRightsizingRecommendation action, a combination of OR and NOT isn't supported. OR isn't supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE.

For the GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR aren't supported. Dimensions are limited to LINKED_ACCOUNT.

", + "base": "

Use Expression to filter in various Cost Explorer APIs.

Not all Expression types are supported in each API. Refer to the documentation for each specific API to see what is supported.

There are two patterns:

For the GetRightsizingRecommendation action, a combination of OR and NOT isn't supported. OR isn't supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE.

For the GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR aren't supported. Dimensions are limited to LINKED_ACCOUNT.

", "refs": { "AnomalyMonitor$MonitorSpecification": null, + "AnomalySubscription$ThresholdExpression": "

An Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000.

One of Threshold or ThresholdExpression is required for this resource.

The following are examples of valid ThresholdExpressions:

", "CostCategoryRule$Rule": "

An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, and LINKED_ACCOUNT_NAME.

Root level OR isn't supported. We recommend that you create a separate rule instead.

RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.

", "Expression$Not": "

Return results that don't match a Dimension object.

", "Expressions$member": null, @@ -691,7 +692,8 @@ "GetSavingsPlansUtilizationDetailsRequest$Filter": "

Filters Savings Plans utilization coverage data for active Savings Plans dimensions. You can filter data with the following dimensions:

GetSavingsPlansUtilizationDetails uses the same Expression object as the other operations, but only AND is supported among each dimension.

", "GetSavingsPlansUtilizationRequest$Filter": "

Filters Savings Plans utilization coverage data for active Savings Plans dimensions. You can filter data with the following dimensions:

GetSavingsPlansUtilization uses the same Expression object as the other operations, but only AND is supported among each dimension.

", "GetTagsRequest$Filter": null, - "GetUsageForecastRequest$Filter": "

The filters that you want to use to filter your forecast. The GetUsageForecast API supports filtering by the following dimensions:

" + "GetUsageForecastRequest$Filter": "

The filters that you want to use to filter your forecast. The GetUsageForecast API supports filtering by the following dimensions:

", + "UpdateAnomalySubscriptionRequest$ThresholdExpression": "

The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000.

The following are examples of valid ThresholdExpressions:

" } }, "Expressions": { @@ -772,8 +774,8 @@ "refs": { "AnomalyScore$MaxScore": "

The maximum score that's observed during the AnomalyDateInterval.

", "AnomalyScore$CurrentScore": "

The last observed score.

", - "Impact$MaxImpact": "

The maximum dollar value that's observed for an anomaly.

", - "Impact$TotalImpact": "

The cumulative dollar value that's observed for an anomaly.

", + "Impact$MaxImpact": "

The maximum dollar value that's observed for an anomaly.

", + "Impact$TotalImpact": "

The cumulative dollar difference between the total actual spend and total expected spend. It is calculated as TotalActualSpend - TotalExpectedSpend.

", "TotalImpactFilter$StartValue": "

The lower bound dollar value that's used in the filter.

", "TotalImpactFilter$EndValue": "

The upper bound dollar value that's used in the filter.

" } @@ -1286,7 +1288,7 @@ "base": null, "refs": { "CostCategoryValues$MatchOptions": "

The match options that you can use to filter your results. MatchOptions is only applicable for actions related to cost category. The default values for MatchOptions is EQUALS and CASE_SENSITIVE.

", - "DimensionValues$MatchOptions": "

The match options that you can use to filter your results. MatchOptions is only applicable for actions related to Cost Category. The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

", + "DimensionValues$MatchOptions": "

The match options that you can use to filter your results.

MatchOptions is only applicable for actions related to Cost Category and Anomaly Subscriptions. Refer to the documentation for each specific API to see what is supported.

The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

", "TagValues$MatchOptions": "

The match options that you can use to filter your results. MatchOptions is only applicable for actions related to Cost Category. The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

" } }, @@ -1444,8 +1446,11 @@ "NullableNonNegativeDouble": { "base": null, "refs": { - "AnomalySubscription$Threshold": "

The dollar value that triggers a notification if the threshold is exceeded.

", - "UpdateAnomalySubscriptionRequest$Threshold": "

The update to the threshold value for receiving notifications.

" + "AnomalySubscription$Threshold": "

(deprecated)

The dollar value that triggers a notification if the threshold is exceeded.

This field has been deprecated. To specify a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.

One of Threshold or ThresholdExpression is required for this resource.

", + "Impact$TotalActualSpend": "

The cumulative dollar amount that was actually spent during the anomaly.

", + "Impact$TotalExpectedSpend": "

The cumulative dollar amount that was expected to be spent during the anomaly. It is calculated using advanced machine learning models to determine the typical spending pattern based on historical data for a customer.

", + "Impact$TotalImpactPercentage": "

The cumulative percentage difference between the total actual spend and total expected spend. It is calculated as (TotalImpact / TotalExpectedSpend) * 100. When TotalExpectedSpend is zero, this field is omitted. Expected spend can be zero in situations such as when you start to use a service for the first time.

", + "UpdateAnomalySubscriptionRequest$Threshold": "

(deprecated)

The update to the threshold value for receiving notifications.

This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.

" } }, "NumericOperator": { diff --git a/models/apis/monitoring/2010-08-01/api-2.json b/models/apis/monitoring/2010-08-01/api-2.json index ce98212dfdd..3845dda2f55 100644 --- a/models/apis/monitoring/2010-08-01/api-2.json +++ b/models/apis/monitoring/2010-08-01/api-2.json @@ -1214,6 +1214,10 @@ "type":"integer", "min":1 }, + "EvaluationState":{ + "type":"string", + "enum":["PARTIAL_DATA"] + }, "ExceptionType":{"type":"string"}, "ExtendedStatistic":{"type":"string"}, "ExtendedStatistics":{ @@ -1789,7 +1793,9 @@ "TreatMissingData":{"shape":"TreatMissingData"}, "EvaluateLowSampleCountPercentile":{"shape":"EvaluateLowSampleCountPercentile"}, "Metrics":{"shape":"MetricDataQueries"}, - "ThresholdMetricId":{"shape":"MetricId"} + "ThresholdMetricId":{"shape":"MetricId"}, + "EvaluationState":{"shape":"EvaluationState"}, + "StateTransitionedTimestamp":{"shape":"Timestamp"} }, "xmlOrder":[ "AlarmName", @@ -1818,7 +1824,9 @@ "EvaluateLowSampleCountPercentile", "DatapointsToAlarm", "Metrics", - "ThresholdMetricId" + "ThresholdMetricId", + "EvaluationState", + "StateTransitionedTimestamp" ] }, "MetricAlarms":{ diff --git a/models/apis/monitoring/2010-08-01/docs-2.json b/models/apis/monitoring/2010-08-01/docs-2.json index d4736b7516a..c4ad8bb32ac 100644 --- a/models/apis/monitoring/2010-08-01/docs-2.json +++ b/models/apis/monitoring/2010-08-01/docs-2.json @@ -32,7 +32,7 @@ "PutDashboard": "

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

", "PutInsightRule": "

Creates a Contributor Insights rule. Rules evaluate log events in a CloudWatch Logs log group, enabling you to find contributor data for the log events in that log group. For more information, see Using Contributor Insights to Analyze High-Cardinality Data.

If you create a rule, delete it, and then re-create it with the same name, historical data from the first time the rule was created might not be available.

", "PutManagedInsightRules": "

Creates a managed Contributor Insights rule for a specified Amazon Web Services resource. When you enable a managed rule, you create a Contributor Insights rule that collects data from Amazon Web Services services. You cannot edit these rules with PutInsightRule. The rules can be enabled, disabled, and deleted using EnableInsightRules, DisableInsightRules, and DeleteInsightRules. If a previously created managed rule is currently disabled, a subsequent call to this API will re-enable it. Use ListManagedInsightRules to describe all available rules.

", - "PutMetricAlarm": "

Creates or updates an alarm and associates it with the specified metric, metric math expression, or anomaly detection model.

Alarms based on anomaly detection models cannot have Auto Scaling actions.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

The first time you create an alarm in the Amazon Web Services Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked roles are called AWSServiceRoleForCloudWatchEvents and AWSServiceRoleForCloudWatchAlarms_ActionSSM. For more information, see Amazon Web Services service-linked role.

Cross-account alarms

You can set an alarm on metrics in the current account, or in another account. To create a cross-account alarm that watches a metric in a different account, you must have completed the following pre-requisites:

", + "PutMetricAlarm": "

Creates or updates an alarm and associates it with the specified metric, metric math expression, anomaly detection model, or Metrics Insights query. For more information about using a Metrics Insights query for an alarm, see Create alarms on Metrics Insights queries.

Alarms based on anomaly detection models cannot have Auto Scaling actions.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

The first time you create an alarm in the Amazon Web Services Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked roles are called AWSServiceRoleForCloudWatchEvents and AWSServiceRoleForCloudWatchAlarms_ActionSSM. For more information, see Amazon Web Services service-linked role.

Cross-account alarms

You can set an alarm on metrics in the current account, or in another account. To create a cross-account alarm that watches a metric in a different account, you must have completed the following pre-requisites:

", "PutMetricData": "

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

", "PutMetricStream": "

Creates or updates a metric stream. Metric streams can automatically stream CloudWatch metrics to Amazon Web Services destinations, including Amazon S3, and to many third-party solutions.

For more information, see Using Metric Streams.

To create a metric stream, you must be signed in to an account that has the iam:PassRole permission and either the CloudWatchFullAccess policy or the cloudwatch:PutMetricStream permission.

When you create or update a metric stream, you choose one of the following:

By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT statistics for each metric that is streamed. You can use the StatisticsConfigurations parameter to have the metric stream send additional statistics in the stream. Streaming additional statistics incurs additional costs. For more information, see Amazon CloudWatch Pricing.

When you use PutMetricStream to create a new metric stream, the stream is created in the running state. If you use it to update an existing stream, the state of the stream is not changed.

", "SetAlarmState": "

Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to ALARM sends an SNS message.

Metric alarms returns to their actual state quickly, often within seconds. Because the metric alarm state change happens quickly, it is typically only visible in the alarm's History tab in the Amazon CloudWatch console or through DescribeAlarmHistory.

If you use SetAlarmState on a composite alarm, the composite alarm is not guaranteed to return to its actual state. It returns to its actual state only once any of its children alarms change state. It is also reevaluated if you update its configuration.

If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling policies, you must include information in the StateReasonData parameter to enable the policy to take the correct action.

", @@ -612,6 +612,12 @@ "PutMetricAlarmInput$EvaluationPeriods": "

The number of periods over which data is compared to the specified threshold. If you are setting an alarm that requires that a number of consecutive data points be breaching to trigger the alarm, this value specifies that number. If you are setting an \"M out of N\" alarm, this value is the N.

An alarm's total current evaluation period can be no longer than one day, so this number multiplied by Period cannot be more than 86,400 seconds.

" } }, + "EvaluationState": { + "base": null, + "refs": { + "MetricAlarm$EvaluationState": "

If the value of this field is PARTIAL_DATA, the alarm is being evaluated based on only partial data. This happens if the query used for the alarm returns more than 10,000 metrics. For more information, see Create alarms on Metrics Insights queries.

" + } + }, "ExceptionType": { "base": null, "refs": { @@ -1754,7 +1760,8 @@ "InsightRuleContributorDatapoint$Timestamp": "

The timestamp of the data point.

", "InsightRuleMetricDatapoint$Timestamp": "

The timestamp of the data point.

", "MetricAlarm$AlarmConfigurationUpdatedTimestamp": "

The time stamp of the last update to the alarm configuration.

", - "MetricAlarm$StateUpdatedTimestamp": "

The time stamp of the last update to the alarm state.

", + "MetricAlarm$StateUpdatedTimestamp": "

The time stamp of the last update to the value of either the StateValue or EvaluationState parameters.

", + "MetricAlarm$StateTransitionedTimestamp": "

The date and time that the alarm's StateValue most recently changed.

", "MetricDatum$Timestamp": "

The time the metric data was received, expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

", "MetricStreamEntry$CreationDate": "

The date that the metric stream was originally created.

", "MetricStreamEntry$LastUpdateDate": "

The date that the configuration of this metric stream was most recently updated.

", diff --git a/models/apis/monitoring/2010-08-01/endpoint-rule-set-1.json b/models/apis/monitoring/2010-08-01/endpoint-rule-set-1.json index 5bfcb13cd42..b155f7ae141 100644 --- a/models/apis/monitoring/2010-08-01/endpoint-rule-set-1.json +++ b/models/apis/monitoring/2010-08-01/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": false, + "required": true, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -52,15 +52,6 @@ "ref": "Endpoint" } ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], "type": "tree", diff --git a/models/apis/networkmanager/2019-07-05/api-2.json b/models/apis/networkmanager/2019-07-05/api-2.json index be716a00468..6ee60ebe81e 100644 --- a/models/apis/networkmanager/2019-07-05/api-2.json +++ b/models/apis/networkmanager/2019-07-05/api-2.json @@ -5123,7 +5123,8 @@ "VpcOptions":{ "type":"structure", "members":{ - "Ipv6Support":{"shape":"Boolean"} + "Ipv6Support":{"shape":"Boolean"}, + "ApplianceModeSupport":{"shape":"Boolean"} } }, "VpnConnectionArn":{ diff --git a/models/apis/networkmanager/2019-07-05/docs-2.json b/models/apis/networkmanager/2019-07-05/docs-2.json index dc36269d097..2edfd2a9759 100644 --- a/models/apis/networkmanager/2019-07-05/docs-2.json +++ b/models/apis/networkmanager/2019-07-05/docs-2.json @@ -75,7 +75,7 @@ "RegisterTransitGateway": "

Registers a transit gateway in your global network. The transit gateway can be in any Amazon Web Services Region, but it must be owned by the same Amazon Web Services account that owns the global network. You cannot register a transit gateway in more than one global network.

", "RejectAttachment": "

Rejects a core network attachment request.

", "RestoreCoreNetworkPolicyVersion": "

Restores a previous policy version as a new, immutable version of a core network policy. A subsequent change set is created showing the differences between the LIVE policy and restored policy.

", - "StartOrganizationServiceAccessUpdate": "

Enables for the Network Manager service for an Amazon Web Services Organization. This can only be called by a management account within the organization.

", + "StartOrganizationServiceAccessUpdate": "

Enables the Network Manager service for an Amazon Web Services Organization. This can only be called by a management account within the organization.

", "StartRouteAnalysis": "

Starts analyzing the routing path between the specified source and destination. For more information, see Route Analyzer.

", "TagResource": "

Tags a specified resource.

", "UntagResource": "

Removes tags from a specified resource.

", @@ -267,7 +267,8 @@ "RouteAnalysis$UseMiddleboxes": "

Indicates whether to include the location of middlebox appliances in the route analysis.

", "StartRouteAnalysisRequest$IncludeReturnPath": "

Indicates whether to analyze the return path. The default is false.

", "StartRouteAnalysisRequest$UseMiddleboxes": "

Indicates whether to include the location of middlebox appliances in the route analysis. The default is false.

", - "VpcOptions$Ipv6Support": "

Indicates whether IPv6 is supported.

" + "VpcOptions$Ipv6Support": "

Indicates whether IPv6 is supported.

", + "VpcOptions$ApplianceModeSupport": "

Indicates whether appliance mode is supported. If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. The default value is false.

" } }, "ChangeAction": { diff --git a/models/apis/networkmanager/2019-07-05/endpoint-rule-set-1.json b/models/apis/networkmanager/2019-07-05/endpoint-rule-set-1.json new file mode 100644 index 00000000000..2ae881f2c14 --- /dev/null +++ b/models/apis/networkmanager/2019-07-05/endpoint-rule-set-1.json @@ -0,0 +1,861 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws" + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager-fips.{Region}.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager-fips.{Region}.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager.{Region}.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager.us-west-2.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-us-gov" + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager-fips.{Region}.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager-fips.{Region}.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager.{Region}.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager.us-gov-west-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "aws-global" + ] + } + ], + "endpoint": { + "url": "https://networkmanager.us-west-2.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "aws-us-gov-global" + ] + } + ], + "endpoint": { + "url": "https://networkmanager.us-gov-west-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1", + "signingName": "networkmanager" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/networkmanager/2019-07-05/endpoint-tests-1.json b/models/apis/networkmanager/2019-07-05/endpoint-tests-1.json new file mode 100644 index 00000000000..69a0a0f7ff6 --- /dev/null +++ b/models/apis/networkmanager/2019-07-05/endpoint-tests-1.json @@ -0,0 +1,87 @@ +{ + "testCases": [ + { + "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-west-2", + "signingName": "networkmanager", + "name": "sigv4" + } + ] + }, + "url": "https://networkmanager.us-west-2.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "aws-global", + "UseDualStack": false + } + }, + { + "documentation": "For region aws-us-gov-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-gov-west-1", + "signingName": "networkmanager", + "name": "sigv4" + } + ] + }, + "url": "https://networkmanager.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "aws-us-gov-global", + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true, + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/redshift-data/2019-12-20/api-2.json b/models/apis/redshift-data/2019-12-20/api-2.json index 0511a07085d..1bb26b02c0b 100644 --- a/models/apis/redshift-data/2019-12-20/api-2.json +++ b/models/apis/redshift-data/2019-12-20/api-2.json @@ -182,6 +182,10 @@ "Sqls" ], "members":{ + "ClientToken":{ + "shape":"ClientToken", + "idempotencyToken":true + }, "ClusterIdentifier":{"shape":"Location"}, "Database":{"shape":"String"}, "DbUser":{"shape":"String"}, @@ -234,6 +238,11 @@ "Status":{"shape":"Boolean"} } }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1 + }, "ColumnList":{ "type":"list", "member":{"shape":"ColumnMetadata"} @@ -349,6 +358,10 @@ "Sql" ], "members":{ + "ClientToken":{ + "shape":"ClientToken", + "idempotencyToken":true + }, "ClusterIdentifier":{"shape":"Location"}, "Database":{"shape":"String"}, "DbUser":{"shape":"String"}, diff --git a/models/apis/redshift-data/2019-12-20/docs-2.json b/models/apis/redshift-data/2019-12-20/docs-2.json index a6bbf91cfbc..138574cae9d 100644 --- a/models/apis/redshift-data/2019-12-20/docs-2.json +++ b/models/apis/redshift-data/2019-12-20/docs-2.json @@ -1,17 +1,17 @@ { "version": "2.0", - "service": "

You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run SQL statements, which are committed if the statement succeeds.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Cluster Management Guide.

", + "service": "

You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run SQL statements, which are committed if the statement succeeds.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", "operations": { - "BatchExecuteStatement": "

Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). Depending on the authorization method, use one of the following combinations of request parameters:

", - "CancelStatement": "

Cancels a running query. To be canceled, a query must be running.

", - "DescribeStatement": "

Describes the details about a specific instance when a query was run by the Amazon Redshift Data API. The information includes when the query started, when it finished, the query status, the number of rows returned, and the SQL statement.

", - "DescribeTable": "

Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:

", - "ExecuteStatement": "

Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:

", - "GetStatementResult": "

Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement results.

", - "ListDatabases": "

List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:

", - "ListSchemas": "

Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:

", - "ListStatements": "

List of SQL statements. By default, only finished statements are shown. A token is returned to page through the statement list.

", - "ListTables": "

List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:

" + "BatchExecuteStatement": "

Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "CancelStatement": "

Cancels a running query. To be canceled, a query must be running.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "DescribeStatement": "

Describes the details about a specific instance when a query was run by the Amazon Redshift Data API. The information includes when the query started, when it finished, the query status, the number of rows returned, and the SQL statement.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "DescribeTable": "

Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "ExecuteStatement": "

Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "GetStatementResult": "

Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement results.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "ListDatabases": "

List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "ListSchemas": "

Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "ListStatements": "

List of SQL statements. By default, only finished statements are shown. A token is returned to page through the statement list.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

", + "ListTables": "

List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

" }, "shapes": { "ActiveStatementsExceededException": { @@ -81,6 +81,13 @@ "refs": { } }, + "ClientToken": { + "base": null, + "refs": { + "BatchExecuteStatementInput$ClientToken": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "ExecuteStatementInput$ClientToken": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + } + }, "ColumnList": { "base": null, "refs": { @@ -274,7 +281,7 @@ "ParameterValue": { "base": null, "refs": { - "SqlParameter$value": "

The value of the parameter. Amazon Redshift implicitly converts to the proper data type. For more inforation, see Data types in the Amazon Redshift Database Developer Guide.

" + "SqlParameter$value": "

The value of the parameter. Amazon Redshift implicitly converts to the proper data type. For more information, see Data types in the Amazon Redshift Database Developer Guide.

" } }, "ResourceNotFoundException": { @@ -306,7 +313,7 @@ "SqlList": { "base": null, "refs": { - "BatchExecuteStatementInput$Sqls": "

One or more SQL statements to run.

" + "BatchExecuteStatementInput$Sqls": "

One or more SQL statements to run.

 The SQL statements are run as a single transaction. They run serially in the order of the array. Subsequent SQL statements don't start until the previous statement in the array completes. If any SQL statement fails, then because they are run as one transaction, all work is rolled back.</p> 
" } }, "SqlParameter": { diff --git a/models/apis/redshift-data/2019-12-20/endpoint-rule-set-1.json b/models/apis/redshift-data/2019-12-20/endpoint-rule-set-1.json new file mode 100644 index 00000000000..c136deb3c2e --- /dev/null +++ b/models/apis/redshift-data/2019-12-20/endpoint-rule-set-1.json @@ -0,0 +1,309 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://redshift-data-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://redshift-data-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://redshift-data.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://redshift-data.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/redshift-data/2019-12-20/endpoint-tests-1.json b/models/apis/redshift-data/2019-12-20/endpoint-tests-1.json new file mode 100644 index 00000000000..ff1bae97f53 --- /dev/null +++ b/models/apis/redshift-data/2019-12-20/endpoint-tests-1.json @@ -0,0 +1,43 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true, + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/sagemaker-metrics/2022-09-30/docs-2.json b/models/apis/sagemaker-metrics/2022-09-30/docs-2.json index 7d108670eac..bf272dfbb5b 100644 --- a/models/apis/sagemaker-metrics/2022-09-30/docs-2.json +++ b/models/apis/sagemaker-metrics/2022-09-30/docs-2.json @@ -2,7 +2,7 @@ "version": "2.0", "service": "

Contains all data plane API operations and data types for Amazon SageMaker Metrics. Use these APIs to put and retrieve (get) features related to your training run.

", "operations": { - "BatchPutMetrics": "

Used to ingest training metrics into SageMaker which can be visualized in SageMaker Studio and retrieved with the GetMetrics API.

" + "BatchPutMetrics": "

Used to ingest training metrics into SageMaker. These metrics can be visualized in SageMaker Studio and retrieved with the GetMetrics API.

" }, "shapes": { "BatchPutMetricsError": { @@ -14,7 +14,7 @@ "BatchPutMetricsErrorList": { "base": null, "refs": { - "BatchPutMetricsResponse$Errors": "

Any errors that occur when inserting metric data will appear in this.

" + "BatchPutMetricsResponse$Errors": "

Lists any errors that occur when inserting metric data.

" } }, "BatchPutMetricsRequest": { @@ -36,7 +36,7 @@ "ExperimentEntityName": { "base": null, "refs": { - "BatchPutMetricsRequest$TrialComponentName": "

The name of Trial Component to associate the metrics with.

" + "BatchPutMetricsRequest$TrialComponentName": "

The name of the Trial Component to associate with the metrics.

" } }, "Integer": { @@ -54,7 +54,7 @@ "PutMetricsErrorCode": { "base": null, "refs": { - "BatchPutMetricsError$Code": "

The error code of an error that occured when attempting to put metrics.

" + "BatchPutMetricsError$Code": "

The error code of an error that occured when attempting to put metrics.

" } }, "RawMetricData": { @@ -72,13 +72,13 @@ "Step": { "base": null, "refs": { - "RawMetricData$Step": "

Metric step (aka Epoch).

" + "RawMetricData$Step": "

The metric step (epoch).

" } }, "Timestamp": { "base": null, "refs": { - "RawMetricData$Timestamp": "

The time when the metric was recorded.

" + "RawMetricData$Timestamp": "

The time that the metric was recorded.

" } } } diff --git a/models/apis/sagemaker-metrics/2022-09-30/endpoint-tests-1.json b/models/apis/sagemaker-metrics/2022-09-30/endpoint-tests-1.json index 8a32af46e7a..6e0015d2549 100644 --- a/models/apis/sagemaker-metrics/2022-09-30/endpoint-tests-1.json +++ b/models/apis/sagemaker-metrics/2022-09-30/endpoint-tests-1.json @@ -9,8 +9,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { @@ -22,8 +22,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { @@ -48,8 +48,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": true } }, { @@ -74,8 +74,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": true } }, { @@ -100,8 +100,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { @@ -111,8 +111,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": true } }, { @@ -124,8 +124,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { @@ -135,8 +135,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": true } }, { @@ -148,8 +148,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { @@ -161,8 +161,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { @@ -174,8 +174,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": false } }, { @@ -187,8 +187,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { @@ -200,8 +200,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": false } }, { @@ -211,8 +211,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -224,8 +224,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false } }, { @@ -235,8 +235,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -248,8 +248,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false } }, { @@ -261,8 +261,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -273,8 +273,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -285,8 +285,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, "Region": "us-east-1", + "UseDualStack": true, "Endpoint": "https://example.com" } } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 0dfaeaba0b2..e00cdbeea65 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -8371,6 +8371,13 @@ "deprecated" : true, "hostname" : "kms-fips.ap-southeast-3.amazonaws.com" }, + "ap-southeast-4-fips" : { + "credentialScope" : { + "region" : "ap-southeast-4" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-southeast-4.amazonaws.com" + }, "ca-central-1" : { "variants" : [ { "hostname" : "kms-fips.ca-central-1.amazonaws.com", diff --git a/service/cloudwatch/api.go b/service/cloudwatch/api.go index b2216879e60..80ab015c452 100644 --- a/service/cloudwatch/api.go +++ b/service/cloudwatch/api.go @@ -3378,7 +3378,9 @@ func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *req // PutMetricAlarm API operation for Amazon CloudWatch. // // Creates or updates an alarm and associates it with the specified metric, -// metric math expression, or anomaly detection model. +// metric math expression, anomaly detection model, or Metrics Insights query. +// For more information about using a Metrics Insights query for an alarm, see +// Create alarms on Metrics Insights queries (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Metrics_Insights_Alarm.html). // // Alarms based on anomaly detection models cannot have Auto Scaling actions. // @@ -8946,6 +8948,12 @@ type MetricAlarm struct { // The number of periods over which data is compared to the specified threshold. EvaluationPeriods *int64 `min:"1" type:"integer"` + // If the value of this field is PARTIAL_DATA, the alarm is being evaluated + // based on only partial data. This happens if the query used for the alarm + // returns more than 10,000 metrics. For more information, see Create alarms + // on Metrics Insights queries (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Metrics_Insights_Alarm.html). + EvaluationState *string `type:"string" enum:"EvaluationState"` + // The percentile statistic for the metric associated with the alarm. Specify // a value between p0.0 and p100. ExtendedStatistic *string `type:"string"` @@ -8982,7 +8990,11 @@ type MetricAlarm struct { // An explanation for the alarm state, in JSON format. StateReasonData *string `type:"string"` - // The time stamp of the last update to the alarm state. + // The date and time that the alarm's StateValue most recently changed. + StateTransitionedTimestamp *time.Time `type:"timestamp"` + + // The time stamp of the last update to the value of either the StateValue or + // EvaluationState parameters. StateUpdatedTimestamp *time.Time `type:"timestamp"` // The state value for the alarm. @@ -9094,6 +9106,12 @@ func (s *MetricAlarm) SetEvaluationPeriods(v int64) *MetricAlarm { return s } +// SetEvaluationState sets the EvaluationState field's value. +func (s *MetricAlarm) SetEvaluationState(v string) *MetricAlarm { + s.EvaluationState = &v + return s +} + // SetExtendedStatistic sets the ExtendedStatistic field's value. func (s *MetricAlarm) SetExtendedStatistic(v string) *MetricAlarm { s.ExtendedStatistic = &v @@ -9148,6 +9166,12 @@ func (s *MetricAlarm) SetStateReasonData(v string) *MetricAlarm { return s } +// SetStateTransitionedTimestamp sets the StateTransitionedTimestamp field's value. +func (s *MetricAlarm) SetStateTransitionedTimestamp(v time.Time) *MetricAlarm { + s.StateTransitionedTimestamp = &v + return s +} + // SetStateUpdatedTimestamp sets the StateUpdatedTimestamp field's value. func (s *MetricAlarm) SetStateUpdatedTimestamp(v time.Time) *MetricAlarm { s.StateUpdatedTimestamp = &v @@ -12629,6 +12653,18 @@ func ComparisonOperator_Values() []string { } } +const ( + // EvaluationStatePartialData is a EvaluationState enum value + EvaluationStatePartialData = "PARTIAL_DATA" +) + +// EvaluationState_Values returns all elements of the EvaluationState enum +func EvaluationState_Values() []string { + return []string{ + EvaluationStatePartialData, + } +} + const ( // HistoryItemTypeConfigurationUpdate is a HistoryItemType enum value HistoryItemTypeConfigurationUpdate = "ConfigurationUpdate" diff --git a/service/costexplorer/api.go b/service/costexplorer/api.go index ddfd2609b91..eb7929ff068 100644 --- a/service/costexplorer/api.go +++ b/service/costexplorer/api.go @@ -134,10 +134,10 @@ func (c *CostExplorer) CreateAnomalySubscriptionRequest(input *CreateAnomalySubs // CreateAnomalySubscription API operation for AWS Cost Explorer Service. // -// Adds a subscription to a cost anomaly detection monitor. You can use each -// subscription to define subscribers with email or SNS notifications. Email -// subscribers can set a dollar threshold and a time frequency for receiving -// notifications. +// Adds an alert subscription to a cost anomaly detection monitor. You can use +// each subscription to define subscribers with email or SNS notifications. +// Email subscribers can set an absolute or percentage threshold and a time +// frequency for receiving notifications. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -640,7 +640,8 @@ func (c *CostExplorer) GetAnomaliesRequest(input *GetAnomaliesInput) (req *reque // GetAnomalies API operation for AWS Cost Explorer Service. // // Retrieves all of the cost anomalies detected on your account during the time -// period that's specified by the DateInterval object. +// period that's specified by the DateInterval object. Anomalies are available +// for up to 90 days. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3718,31 +3719,48 @@ type AnomalyMonitor struct { // MonitorName is a required field MonitorName *string `type:"string" required:"true"` - // Use Expression to filter by cost or by usage. There are two patterns: - // - // * Simple dimension values - You can set the dimension name and values - // for the filters that you plan to use. For example, you can filter for - // REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, - // the Region is a full name (for example, REGION==US East (N. Virginia). - // The Expression example is as follows: { "Dimensions": { "Key": "REGION", - // "Values": [ "us-east-1", “us-west-1” ] } } The list of dimension values - // are OR'd together to retrieve cost or usage data. You can create Expression - // and DimensionValues objects using either with* methods or set* methods - // in multiple lines. - // - // * Compound dimension values with logical operations - You can use multiple + // Use Expression to filter in various Cost Explorer APIs. + // + // Not all Expression types are supported in each API. Refer to the documentation + // for each specific API to see what is supported. + // + // There are two patterns: + // + // * Simple dimension values. There are three types of simple dimension values: + // CostCategories, Tags, and Dimensions. Specify the CostCategories field + // to define a filter that acts on Cost Categories. Specify the Tags field + // to define a filter that acts on Cost Allocation Tags. Specify the Dimensions + // field to define a filter that acts on the DimensionValues (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DimensionValues.html). + // For each filter type, you can set the dimension name and values for the + // filters that you plan to use. For example, you can filter for REGION==us-east-1 + // OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is + // a full name (for example, REGION==US East (N. Virginia). The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "REGION", + // "Values": [ "us-east-1", “us-west-1” ] } } As shown in the previous + // example, lists of dimension values are combined with OR when applying + // the filter. You can also set different match options to further control + // how the filter behaves. Not all APIs support match options. Refer to the + // documentation for each specific API to see what is supported. For example, + // you can filter for linked account names that start with “a”. The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "LINKED_ACCOUNT_NAME", + // "MatchOptions": [ "STARTS_WITH" ], "Values": [ "a" ] } } + // + // * Compound Expression types with logical operations. You can use multiple // Expression types and the logical operators AND/OR/NOT to create a list - // of one or more Expression objects. By doing this, you can filter on more - // advanced options. For example, you can filter on ((REGION == us-east-1 + // of one or more Expression objects. By doing this, you can filter by more + // advanced options. For example, you can filter by ((REGION == us-east-1 // OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). - // The Expression for that is as follows: { "And": [ {"Or": [ {"Dimensions": - // { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, {"Tags": - // { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if - // more than one is specified. The following example shows an Expression - // object that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": - // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // The corresponding Expression for this example is as follows: { "And": + // [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", + // "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } + // } ]}, {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] + // }}} ] } Because each Expression can have only one operator, the service + // returns an error if more than one is specified. The following example + // shows an Expression object that creates an error: { "And": [ ... ], "Dimensions": + // { "Key": "USAGE_TYPE", "Values": [ "DataTransfer" ] } } The following + // is an example of the corresponding error message: "Expression has more + // than one roots. Only one root operator is allowed for each expression: + // And, Or, Not, Dimensions, Tags, CostCategories" // // For the GetRightsizingRecommendation action, a combination of OR and NOT // isn't supported. OR isn't supported between different dimensions, or dimensions @@ -3930,10 +3948,45 @@ type AnomalySubscription struct { // SubscriptionName is a required field SubscriptionName *string `type:"string" required:"true"` + // (deprecated) + // // The dollar value that triggers a notification if the threshold is exceeded. // - // Threshold is a required field - Threshold *float64 `type:"double" required:"true"` + // This field has been deprecated. To specify a threshold, use ThresholdExpression. + // Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression. + // + // One of Threshold or ThresholdExpression is required for this resource. + // + // Deprecated: Threshold has been deprecated in favor of ThresholdExpression + Threshold *float64 `deprecated:"true" type:"double"` + + // An Expression (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html) + // object used to specify the anomalies that you want to generate alerts for. + // This supports dimensions and nested expressions. The supported dimensions + // are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The + // supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL + // is required. Values must be numbers between 0 and 10,000,000,000. + // + // One of Threshold or ThresholdExpression is required for this resource. + // + // The following are examples of valid ThresholdExpressions: + // + // * Absolute threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", + // "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } + // + // * Percentage threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", + // "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } + // + // * AND two thresholds together: { "And": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", + // "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, + // { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": + // [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] } + // + // * OR two thresholds together: { "Or": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", + // "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, + // { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": + // [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] } + ThresholdExpression *Expression `type:"structure"` } // String returns the string representation. @@ -3969,9 +4022,6 @@ func (s *AnomalySubscription) Validate() error { if s.SubscriptionName == nil { invalidParams.Add(request.NewErrParamRequired("SubscriptionName")) } - if s.Threshold == nil { - invalidParams.Add(request.NewErrParamRequired("Threshold")) - } if s.Subscribers != nil { for i, v := range s.Subscribers { if v == nil { @@ -3982,6 +4032,11 @@ func (s *AnomalySubscription) Validate() error { } } } + if s.ThresholdExpression != nil { + if err := s.ThresholdExpression.Validate(); err != nil { + invalidParams.AddNested("ThresholdExpression", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4031,6 +4086,12 @@ func (s *AnomalySubscription) SetThreshold(v float64) *AnomalySubscription { return s } +// SetThresholdExpression sets the ThresholdExpression field's value. +func (s *AnomalySubscription) SetThresholdExpression(v *Expression) *AnomalySubscription { + s.ThresholdExpression = v + return s +} + // The requested report expired. Update the date interval and try again. type BillExpirationException struct { _ struct{} `type:"structure"` @@ -6199,13 +6260,24 @@ type DimensionValues struct { _ struct{} `type:"structure"` // The names of the metadata types that you can use to filter and group your - // results. For example, AZ returns a list of Availability Zones. LINK_ACCOUNT_NAME - // and SERVICE_CODE can only be used in CostCategoryRule (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/AAPI_CostCategoryRule.html). + // results. For example, AZ returns a list of Availability Zones. + // + // Not all dimensions are supported in each API. Refer to the documentation + // for each specific API to see what is supported. + // + // LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in CostCategoryRule (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_CostCategoryRule.html). + // + // ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE can only + // be used in AnomalySubscriptions (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_AnomalySubscription.html). Key *string `type:"string" enum:"Dimension"` - // The match options that you can use to filter your results. MatchOptions is - // only applicable for actions related to Cost Category. The default values - // for MatchOptions are EQUALS and CASE_SENSITIVE. + // The match options that you can use to filter your results. + // + // MatchOptions is only applicable for actions related to Cost Category and + // Anomaly Subscriptions. Refer to the documentation for each specific API to + // see what is supported. + // + // The default values for MatchOptions are EQUALS and CASE_SENSITIVE. MatchOptions []*string `type:"list" enum:"MatchOption"` // The metadata values that you can use to filter and group your results. You @@ -6875,31 +6947,48 @@ func (s *ElastiCacheInstanceDetails) SetSizeFlexEligible(v bool) *ElastiCacheIns return s } -// Use Expression to filter by cost or by usage. There are two patterns: +// Use Expression to filter in various Cost Explorer APIs. +// +// Not all Expression types are supported in each API. Refer to the documentation +// for each specific API to see what is supported. // -// - Simple dimension values - You can set the dimension name and values -// for the filters that you plan to use. For example, you can filter for -// REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, -// the Region is a full name (for example, REGION==US East (N. Virginia). -// The Expression example is as follows: { "Dimensions": { "Key": "REGION", -// "Values": [ "us-east-1", “us-west-1” ] } } The list of dimension values -// are OR'd together to retrieve cost or usage data. You can create Expression -// and DimensionValues objects using either with* methods or set* methods -// in multiple lines. +// There are two patterns: // -// - Compound dimension values with logical operations - You can use multiple +// - Simple dimension values. There are three types of simple dimension values: +// CostCategories, Tags, and Dimensions. Specify the CostCategories field +// to define a filter that acts on Cost Categories. Specify the Tags field +// to define a filter that acts on Cost Allocation Tags. Specify the Dimensions +// field to define a filter that acts on the DimensionValues (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DimensionValues.html). +// For each filter type, you can set the dimension name and values for the +// filters that you plan to use. For example, you can filter for REGION==us-east-1 +// OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is +// a full name (for example, REGION==US East (N. Virginia). The corresponding +// Expression for this example is as follows: { "Dimensions": { "Key": "REGION", +// "Values": [ "us-east-1", “us-west-1” ] } } As shown in the previous +// example, lists of dimension values are combined with OR when applying +// the filter. You can also set different match options to further control +// how the filter behaves. Not all APIs support match options. Refer to the +// documentation for each specific API to see what is supported. For example, +// you can filter for linked account names that start with “a”. The corresponding +// Expression for this example is as follows: { "Dimensions": { "Key": "LINKED_ACCOUNT_NAME", +// "MatchOptions": [ "STARTS_WITH" ], "Values": [ "a" ] } } +// +// - Compound Expression types with logical operations. You can use multiple // Expression types and the logical operators AND/OR/NOT to create a list -// of one or more Expression objects. By doing this, you can filter on more -// advanced options. For example, you can filter on ((REGION == us-east-1 +// of one or more Expression objects. By doing this, you can filter by more +// advanced options. For example, you can filter by ((REGION == us-east-1 // OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). -// The Expression for that is as follows: { "And": [ {"Or": [ {"Dimensions": -// { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, {"Tags": -// { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": -// { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each -// Expression can have only one operator, the service returns an error if -// more than one is specified. The following example shows an Expression -// object that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": -// "USAGE_TYPE", "Values": [ "DataTransfer" ] } } +// The corresponding Expression for this example is as follows: { "And": +// [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", +// "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } +// } ]}, {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] +// }}} ] } Because each Expression can have only one operator, the service +// returns an error if more than one is specified. The following example +// shows an Expression object that creates an error: { "And": [ ... ], "Dimensions": +// { "Key": "USAGE_TYPE", "Values": [ "DataTransfer" ] } } The following +// is an example of the corresponding error message: "Expression has more +// than one roots. Only one root operator is allowed for each expression: +// And, Or, Not, Dimensions, Tags, CostCategories" // // For the GetRightsizingRecommendation action, a combination of OR and NOT // isn't supported. OR isn't supported between different dimensions, or dimensions @@ -7987,31 +8076,48 @@ type GetCostCategoriesInput struct { // The unique name of the Cost Category. CostCategoryName *string `min:"1" type:"string"` - // Use Expression to filter by cost or by usage. There are two patterns: - // - // * Simple dimension values - You can set the dimension name and values - // for the filters that you plan to use. For example, you can filter for - // REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, - // the Region is a full name (for example, REGION==US East (N. Virginia). - // The Expression example is as follows: { "Dimensions": { "Key": "REGION", - // "Values": [ "us-east-1", “us-west-1” ] } } The list of dimension values - // are OR'd together to retrieve cost or usage data. You can create Expression - // and DimensionValues objects using either with* methods or set* methods - // in multiple lines. - // - // * Compound dimension values with logical operations - You can use multiple + // Use Expression to filter in various Cost Explorer APIs. + // + // Not all Expression types are supported in each API. Refer to the documentation + // for each specific API to see what is supported. + // + // There are two patterns: + // + // * Simple dimension values. There are three types of simple dimension values: + // CostCategories, Tags, and Dimensions. Specify the CostCategories field + // to define a filter that acts on Cost Categories. Specify the Tags field + // to define a filter that acts on Cost Allocation Tags. Specify the Dimensions + // field to define a filter that acts on the DimensionValues (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DimensionValues.html). + // For each filter type, you can set the dimension name and values for the + // filters that you plan to use. For example, you can filter for REGION==us-east-1 + // OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is + // a full name (for example, REGION==US East (N. Virginia). The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "REGION", + // "Values": [ "us-east-1", “us-west-1” ] } } As shown in the previous + // example, lists of dimension values are combined with OR when applying + // the filter. You can also set different match options to further control + // how the filter behaves. Not all APIs support match options. Refer to the + // documentation for each specific API to see what is supported. For example, + // you can filter for linked account names that start with “a”. The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "LINKED_ACCOUNT_NAME", + // "MatchOptions": [ "STARTS_WITH" ], "Values": [ "a" ] } } + // + // * Compound Expression types with logical operations. You can use multiple // Expression types and the logical operators AND/OR/NOT to create a list - // of one or more Expression objects. By doing this, you can filter on more - // advanced options. For example, you can filter on ((REGION == us-east-1 + // of one or more Expression objects. By doing this, you can filter by more + // advanced options. For example, you can filter by ((REGION == us-east-1 // OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). - // The Expression for that is as follows: { "And": [ {"Or": [ {"Dimensions": - // { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, {"Tags": - // { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if - // more than one is specified. The following example shows an Expression - // object that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": - // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // The corresponding Expression for this example is as follows: { "And": + // [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", + // "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } + // } ]}, {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] + // }}} ] } Because each Expression can have only one operator, the service + // returns an error if more than one is specified. The following example + // shows an Expression object that creates an error: { "And": [ ... ], "Dimensions": + // { "Key": "USAGE_TYPE", "Values": [ "DataTransfer" ] } } The following + // is an example of the corresponding error message: "Expression has more + // than one roots. Only one root operator is allowed for each expression: + // And, Or, Not, Dimensions, Tags, CostCategories" // // For the GetRightsizingRecommendation action, a combination of OR and NOT // isn't supported. OR isn't supported between different dimensions, or dimensions @@ -8607,31 +8713,48 @@ type GetDimensionValuesInput struct { // Dimension is a required field Dimension *string `type:"string" required:"true" enum:"Dimension"` - // Use Expression to filter by cost or by usage. There are two patterns: - // - // * Simple dimension values - You can set the dimension name and values - // for the filters that you plan to use. For example, you can filter for - // REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, - // the Region is a full name (for example, REGION==US East (N. Virginia). - // The Expression example is as follows: { "Dimensions": { "Key": "REGION", - // "Values": [ "us-east-1", “us-west-1” ] } } The list of dimension values - // are OR'd together to retrieve cost or usage data. You can create Expression - // and DimensionValues objects using either with* methods or set* methods - // in multiple lines. - // - // * Compound dimension values with logical operations - You can use multiple + // Use Expression to filter in various Cost Explorer APIs. + // + // Not all Expression types are supported in each API. Refer to the documentation + // for each specific API to see what is supported. + // + // There are two patterns: + // + // * Simple dimension values. There are three types of simple dimension values: + // CostCategories, Tags, and Dimensions. Specify the CostCategories field + // to define a filter that acts on Cost Categories. Specify the Tags field + // to define a filter that acts on Cost Allocation Tags. Specify the Dimensions + // field to define a filter that acts on the DimensionValues (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DimensionValues.html). + // For each filter type, you can set the dimension name and values for the + // filters that you plan to use. For example, you can filter for REGION==us-east-1 + // OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is + // a full name (for example, REGION==US East (N. Virginia). The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "REGION", + // "Values": [ "us-east-1", “us-west-1” ] } } As shown in the previous + // example, lists of dimension values are combined with OR when applying + // the filter. You can also set different match options to further control + // how the filter behaves. Not all APIs support match options. Refer to the + // documentation for each specific API to see what is supported. For example, + // you can filter for linked account names that start with “a”. The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "LINKED_ACCOUNT_NAME", + // "MatchOptions": [ "STARTS_WITH" ], "Values": [ "a" ] } } + // + // * Compound Expression types with logical operations. You can use multiple // Expression types and the logical operators AND/OR/NOT to create a list - // of one or more Expression objects. By doing this, you can filter on more - // advanced options. For example, you can filter on ((REGION == us-east-1 + // of one or more Expression objects. By doing this, you can filter by more + // advanced options. For example, you can filter by ((REGION == us-east-1 // OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). - // The Expression for that is as follows: { "And": [ {"Or": [ {"Dimensions": - // { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, {"Tags": - // { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if - // more than one is specified. The following example shows an Expression - // object that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": - // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // The corresponding Expression for this example is as follows: { "And": + // [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", + // "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } + // } ]}, {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] + // }}} ] } Because each Expression can have only one operator, the service + // returns an error if more than one is specified. The following example + // shows an Expression object that creates an error: { "And": [ ... ], "Dimensions": + // { "Key": "USAGE_TYPE", "Values": [ "DataTransfer" ] } } The following + // is an example of the corresponding error message: "Expression has more + // than one roots. Only one root operator is allowed for each expression: + // And, Or, Not, Dimensions, Tags, CostCategories" // // For the GetRightsizingRecommendation action, a combination of OR and NOT // isn't supported. OR isn't supported between different dimensions, or dimensions @@ -9247,31 +9370,48 @@ type GetReservationPurchaseRecommendationInput struct { // calculated for individual member accounts only. AccountScope *string `type:"string" enum:"AccountScope"` - // Use Expression to filter by cost or by usage. There are two patterns: - // - // * Simple dimension values - You can set the dimension name and values - // for the filters that you plan to use. For example, you can filter for - // REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, - // the Region is a full name (for example, REGION==US East (N. Virginia). - // The Expression example is as follows: { "Dimensions": { "Key": "REGION", - // "Values": [ "us-east-1", “us-west-1” ] } } The list of dimension values - // are OR'd together to retrieve cost or usage data. You can create Expression - // and DimensionValues objects using either with* methods or set* methods - // in multiple lines. - // - // * Compound dimension values with logical operations - You can use multiple + // Use Expression to filter in various Cost Explorer APIs. + // + // Not all Expression types are supported in each API. Refer to the documentation + // for each specific API to see what is supported. + // + // There are two patterns: + // + // * Simple dimension values. There are three types of simple dimension values: + // CostCategories, Tags, and Dimensions. Specify the CostCategories field + // to define a filter that acts on Cost Categories. Specify the Tags field + // to define a filter that acts on Cost Allocation Tags. Specify the Dimensions + // field to define a filter that acts on the DimensionValues (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DimensionValues.html). + // For each filter type, you can set the dimension name and values for the + // filters that you plan to use. For example, you can filter for REGION==us-east-1 + // OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is + // a full name (for example, REGION==US East (N. Virginia). The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "REGION", + // "Values": [ "us-east-1", “us-west-1” ] } } As shown in the previous + // example, lists of dimension values are combined with OR when applying + // the filter. You can also set different match options to further control + // how the filter behaves. Not all APIs support match options. Refer to the + // documentation for each specific API to see what is supported. For example, + // you can filter for linked account names that start with “a”. The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "LINKED_ACCOUNT_NAME", + // "MatchOptions": [ "STARTS_WITH" ], "Values": [ "a" ] } } + // + // * Compound Expression types with logical operations. You can use multiple // Expression types and the logical operators AND/OR/NOT to create a list - // of one or more Expression objects. By doing this, you can filter on more - // advanced options. For example, you can filter on ((REGION == us-east-1 + // of one or more Expression objects. By doing this, you can filter by more + // advanced options. For example, you can filter by ((REGION == us-east-1 // OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). - // The Expression for that is as follows: { "And": [ {"Or": [ {"Dimensions": - // { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, {"Tags": - // { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if - // more than one is specified. The following example shows an Expression - // object that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": - // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // The corresponding Expression for this example is as follows: { "And": + // [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", + // "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } + // } ]}, {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] + // }}} ] } Because each Expression can have only one operator, the service + // returns an error if more than one is specified. The following example + // shows an Expression object that creates an error: { "And": [ ... ], "Dimensions": + // { "Key": "USAGE_TYPE", "Values": [ "DataTransfer" ] } } The following + // is an example of the corresponding error message: "Expression has more + // than one roots. Only one root operator is allowed for each expression: + // And, Or, Not, Dimensions, Tags, CostCategories" // // For the GetRightsizingRecommendation action, a combination of OR and NOT // isn't supported. OR isn't supported between different dimensions, or dimensions @@ -9715,31 +9855,48 @@ type GetRightsizingRecommendationInput struct { // of existing Savings Plans or RI benefits, or neither. Configuration *RightsizingRecommendationConfiguration `type:"structure"` - // Use Expression to filter by cost or by usage. There are two patterns: - // - // * Simple dimension values - You can set the dimension name and values - // for the filters that you plan to use. For example, you can filter for - // REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, - // the Region is a full name (for example, REGION==US East (N. Virginia). - // The Expression example is as follows: { "Dimensions": { "Key": "REGION", - // "Values": [ "us-east-1", “us-west-1” ] } } The list of dimension values - // are OR'd together to retrieve cost or usage data. You can create Expression - // and DimensionValues objects using either with* methods or set* methods - // in multiple lines. - // - // * Compound dimension values with logical operations - You can use multiple + // Use Expression to filter in various Cost Explorer APIs. + // + // Not all Expression types are supported in each API. Refer to the documentation + // for each specific API to see what is supported. + // + // There are two patterns: + // + // * Simple dimension values. There are three types of simple dimension values: + // CostCategories, Tags, and Dimensions. Specify the CostCategories field + // to define a filter that acts on Cost Categories. Specify the Tags field + // to define a filter that acts on Cost Allocation Tags. Specify the Dimensions + // field to define a filter that acts on the DimensionValues (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DimensionValues.html). + // For each filter type, you can set the dimension name and values for the + // filters that you plan to use. For example, you can filter for REGION==us-east-1 + // OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is + // a full name (for example, REGION==US East (N. Virginia). The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "REGION", + // "Values": [ "us-east-1", “us-west-1” ] } } As shown in the previous + // example, lists of dimension values are combined with OR when applying + // the filter. You can also set different match options to further control + // how the filter behaves. Not all APIs support match options. Refer to the + // documentation for each specific API to see what is supported. For example, + // you can filter for linked account names that start with “a”. The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "LINKED_ACCOUNT_NAME", + // "MatchOptions": [ "STARTS_WITH" ], "Values": [ "a" ] } } + // + // * Compound Expression types with logical operations. You can use multiple // Expression types and the logical operators AND/OR/NOT to create a list - // of one or more Expression objects. By doing this, you can filter on more - // advanced options. For example, you can filter on ((REGION == us-east-1 + // of one or more Expression objects. By doing this, you can filter by more + // advanced options. For example, you can filter by ((REGION == us-east-1 // OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). - // The Expression for that is as follows: { "And": [ {"Or": [ {"Dimensions": - // { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, {"Tags": - // { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if - // more than one is specified. The following example shows an Expression - // object that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": - // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // The corresponding Expression for this example is as follows: { "And": + // [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", + // "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } + // } ]}, {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] + // }}} ] } Because each Expression can have only one operator, the service + // returns an error if more than one is specified. The following example + // shows an Expression object that creates an error: { "And": [ ... ], "Dimensions": + // { "Key": "USAGE_TYPE", "Values": [ "DataTransfer" ] } } The following + // is an example of the corresponding error message: "Expression has more + // than one roots. Only one root operator is allowed for each expression: + // And, Or, Not, Dimensions, Tags, CostCategories" // // For the GetRightsizingRecommendation action, a combination of OR and NOT // isn't supported. OR isn't supported between different dimensions, or dimensions @@ -10698,31 +10855,48 @@ func (s *GetSavingsPlansUtilizationOutput) SetTotal(v *SavingsPlansUtilizationAg type GetTagsInput struct { _ struct{} `type:"structure"` - // Use Expression to filter by cost or by usage. There are two patterns: - // - // * Simple dimension values - You can set the dimension name and values - // for the filters that you plan to use. For example, you can filter for - // REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, - // the Region is a full name (for example, REGION==US East (N. Virginia). - // The Expression example is as follows: { "Dimensions": { "Key": "REGION", - // "Values": [ "us-east-1", “us-west-1” ] } } The list of dimension values - // are OR'd together to retrieve cost or usage data. You can create Expression - // and DimensionValues objects using either with* methods or set* methods - // in multiple lines. - // - // * Compound dimension values with logical operations - You can use multiple + // Use Expression to filter in various Cost Explorer APIs. + // + // Not all Expression types are supported in each API. Refer to the documentation + // for each specific API to see what is supported. + // + // There are two patterns: + // + // * Simple dimension values. There are three types of simple dimension values: + // CostCategories, Tags, and Dimensions. Specify the CostCategories field + // to define a filter that acts on Cost Categories. Specify the Tags field + // to define a filter that acts on Cost Allocation Tags. Specify the Dimensions + // field to define a filter that acts on the DimensionValues (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_DimensionValues.html). + // For each filter type, you can set the dimension name and values for the + // filters that you plan to use. For example, you can filter for REGION==us-east-1 + // OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is + // a full name (for example, REGION==US East (N. Virginia). The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "REGION", + // "Values": [ "us-east-1", “us-west-1” ] } } As shown in the previous + // example, lists of dimension values are combined with OR when applying + // the filter. You can also set different match options to further control + // how the filter behaves. Not all APIs support match options. Refer to the + // documentation for each specific API to see what is supported. For example, + // you can filter for linked account names that start with “a”. The corresponding + // Expression for this example is as follows: { "Dimensions": { "Key": "LINKED_ACCOUNT_NAME", + // "MatchOptions": [ "STARTS_WITH" ], "Values": [ "a" ] } } + // + // * Compound Expression types with logical operations. You can use multiple // Expression types and the logical operators AND/OR/NOT to create a list - // of one or more Expression objects. By doing this, you can filter on more - // advanced options. For example, you can filter on ((REGION == us-east-1 + // of one or more Expression objects. By doing this, you can filter by more + // advanced options. For example, you can filter by ((REGION == us-east-1 // OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). - // The Expression for that is as follows: { "And": [ {"Or": [ {"Dimensions": - // { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, {"Tags": - // { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": - // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each - // Expression can have only one operator, the service returns an error if - // more than one is specified. The following example shows an Expression - // object that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": - // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // The corresponding Expression for this example is as follows: { "And": + // [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", + // "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } + // } ]}, {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] + // }}} ] } Because each Expression can have only one operator, the service + // returns an error if more than one is specified. The following example + // shows an Expression object that creates an error: { "And": [ ... ], "Dimensions": + // { "Key": "USAGE_TYPE", "Values": [ "DataTransfer" ] } } The following + // is an example of the corresponding error message: "Expression has more + // than one roots. Only one root operator is allowed for each expression: + // And, Or, Not, Dimensions, Tags, CostCategories" // // For the GetRightsizingRecommendation action, a combination of OR and NOT // isn't supported. OR isn't supported between different dimensions, or dimensions @@ -11249,8 +11423,24 @@ type Impact struct { // MaxImpact is a required field MaxImpact *float64 `type:"double" required:"true"` - // The cumulative dollar value that's observed for an anomaly. + // The cumulative dollar amount that was actually spent during the anomaly. + TotalActualSpend *float64 `type:"double"` + + // The cumulative dollar amount that was expected to be spent during the anomaly. + // It is calculated using advanced machine learning models to determine the + // typical spending pattern based on historical data for a customer. + TotalExpectedSpend *float64 `type:"double"` + + // The cumulative dollar difference between the total actual spend and total + // expected spend. It is calculated as TotalActualSpend - TotalExpectedSpend. TotalImpact *float64 `type:"double"` + + // The cumulative percentage difference between the total actual spend and total + // expected spend. It is calculated as (TotalImpact / TotalExpectedSpend) * + // 100. When TotalExpectedSpend is zero, this field is omitted. Expected spend + // can be zero in situations such as when you start to use a service for the + // first time. + TotalImpactPercentage *float64 `type:"double"` } // String returns the string representation. @@ -11277,12 +11467,30 @@ func (s *Impact) SetMaxImpact(v float64) *Impact { return s } +// SetTotalActualSpend sets the TotalActualSpend field's value. +func (s *Impact) SetTotalActualSpend(v float64) *Impact { + s.TotalActualSpend = &v + return s +} + +// SetTotalExpectedSpend sets the TotalExpectedSpend field's value. +func (s *Impact) SetTotalExpectedSpend(v float64) *Impact { + s.TotalExpectedSpend = &v + return s +} + // SetTotalImpact sets the TotalImpact field's value. func (s *Impact) SetTotalImpact(v float64) *Impact { s.TotalImpact = &v return s } +// SetTotalImpactPercentage sets the TotalImpactPercentage field's value. +func (s *Impact) SetTotalImpactPercentage(v float64) *Impact { + s.TotalImpactPercentage = &v + return s +} + // Details about the instances that Amazon Web Services recommends that you // purchase. type InstanceDetails struct { @@ -15760,8 +15968,41 @@ type UpdateAnomalySubscriptionInput struct { // The new name of the subscription. SubscriptionName *string `type:"string"` + // (deprecated) + // // The update to the threshold value for receiving notifications. - Threshold *float64 `type:"double"` + // + // This field has been deprecated. To update a threshold, use ThresholdExpression. + // Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression. + // + // Deprecated: Threshold has been deprecated in favor of ThresholdExpression + Threshold *float64 `deprecated:"true" type:"double"` + + // The update to the Expression (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html) + // object used to specify the anomalies that you want to generate alerts for. + // This supports dimensions and nested expressions. The supported dimensions + // are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The + // supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL + // is required. Values must be numbers between 0 and 10,000,000,000. + // + // The following are examples of valid ThresholdExpressions: + // + // * Absolute threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", + // "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } + // + // * Percentage threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", + // "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } + // + // * AND two thresholds together: { "And": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", + // "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, + // { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": + // [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] } + // + // * OR two thresholds together: { "Or": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", + // "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, + // { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": + // [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] } + ThresholdExpression *Expression `type:"structure"` } // String returns the string representation. @@ -15798,6 +16039,11 @@ func (s *UpdateAnomalySubscriptionInput) Validate() error { } } } + if s.ThresholdExpression != nil { + if err := s.ThresholdExpression.Validate(); err != nil { + invalidParams.AddNested("ThresholdExpression", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -15841,6 +16087,12 @@ func (s *UpdateAnomalySubscriptionInput) SetThreshold(v float64) *UpdateAnomalyS return s } +// SetThresholdExpression sets the ThresholdExpression field's value. +func (s *UpdateAnomalySubscriptionInput) SetThresholdExpression(v *Expression) *UpdateAnomalySubscriptionInput { + s.ThresholdExpression = v + return s +} + type UpdateAnomalySubscriptionOutput struct { _ struct{} `type:"structure"` @@ -16559,6 +16811,12 @@ const ( // DimensionInvoicingEntity is a Dimension enum value DimensionInvoicingEntity = "INVOICING_ENTITY" + + // DimensionAnomalyTotalImpactAbsolute is a Dimension enum value + DimensionAnomalyTotalImpactAbsolute = "ANOMALY_TOTAL_IMPACT_ABSOLUTE" + + // DimensionAnomalyTotalImpactPercentage is a Dimension enum value + DimensionAnomalyTotalImpactPercentage = "ANOMALY_TOTAL_IMPACT_PERCENTAGE" ) // Dimension_Values returns all elements of the Dimension enum @@ -16596,6 +16854,8 @@ func Dimension_Values() []string { DimensionAgreementEndDateTimeAfter, DimensionAgreementEndDateTimeBefore, DimensionInvoicingEntity, + DimensionAnomalyTotalImpactAbsolute, + DimensionAnomalyTotalImpactPercentage, } } @@ -16772,6 +17032,9 @@ const ( // MatchOptionCaseInsensitive is a MatchOption enum value MatchOptionCaseInsensitive = "CASE_INSENSITIVE" + + // MatchOptionGreaterThanOrEqual is a MatchOption enum value + MatchOptionGreaterThanOrEqual = "GREATER_THAN_OR_EQUAL" ) // MatchOption_Values returns all elements of the MatchOption enum @@ -16784,6 +17047,7 @@ func MatchOption_Values() []string { MatchOptionContains, MatchOptionCaseSensitive, MatchOptionCaseInsensitive, + MatchOptionGreaterThanOrEqual, } } diff --git a/service/networkmanager/api.go b/service/networkmanager/api.go index 8f3d0db105b..2e8e474d3a3 100644 --- a/service/networkmanager/api.go +++ b/service/networkmanager/api.go @@ -8134,7 +8134,7 @@ func (c *NetworkManager) StartOrganizationServiceAccessUpdateRequest(input *Star // StartOrganizationServiceAccessUpdate API operation for AWS Network Manager. // -// Enables for the Network Manager service for an Amazon Web Services Organization. +// Enables the Network Manager service for an Amazon Web Services Organization. // This can only be called by a management account within the organization. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -23422,6 +23422,11 @@ func (s *VpcAttachment) SetSubnetArns(v []*string) *VpcAttachment { type VpcOptions struct { _ struct{} `type:"structure"` + // Indicates whether appliance mode is supported. If enabled, traffic flow between + // a source and destination use the same Availability Zone for the VPC attachment + // for the lifetime of that flow. The default value is false. + ApplianceModeSupport *bool `type:"boolean"` + // Indicates whether IPv6 is supported. Ipv6Support *bool `type:"boolean"` } @@ -23444,6 +23449,12 @@ func (s VpcOptions) GoString() string { return s.String() } +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *VpcOptions) SetApplianceModeSupport(v bool) *VpcOptions { + s.ApplianceModeSupport = &v + return s +} + // SetIpv6Support sets the Ipv6Support field's value. func (s *VpcOptions) SetIpv6Support(v bool) *VpcOptions { s.Ipv6Support = &v diff --git a/service/redshiftdataapiservice/api.go b/service/redshiftdataapiservice/api.go index a5f48478485..890cd48169e 100644 --- a/service/redshiftdataapiservice/api.go +++ b/service/redshiftdataapiservice/api.go @@ -72,6 +72,10 @@ func (c *RedshiftDataAPIService) BatchExecuteStatementRequest(input *BatchExecut // name. Also, permission to call the redshift-serverless:GetCredentials // operation is required. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -157,6 +161,10 @@ func (c *RedshiftDataAPIService) CancelStatementRequest(input *CancelStatementIn // // Cancels a running query. To be canceled, a query must be running. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -248,6 +256,10 @@ func (c *RedshiftDataAPIService) DescribeStatementRequest(input *DescribeStateme // when it finished, the query status, the number of rows returned, and the // SQL statement. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -355,6 +367,10 @@ func (c *RedshiftDataAPIService) DescribeTableRequest(input *DescribeTableInput) // name. Also, permission to call the redshift-serverless:GetCredentials // operation is required. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -507,6 +523,10 @@ func (c *RedshiftDataAPIService) ExecuteStatementRequest(input *ExecuteStatement // name. Also, permission to call the redshift-serverless:GetCredentials // operation is required. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -599,6 +619,10 @@ func (c *RedshiftDataAPIService) GetStatementResultRequest(input *GetStatementRe // Fetches the temporarily cached result of an SQL statement. A token is returned // to page through the statement results. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -756,6 +780,10 @@ func (c *RedshiftDataAPIService) ListDatabasesRequest(input *ListDatabasesInput) // name. Also, permission to call the redshift-serverless:GetCredentials // operation is required. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -913,6 +941,10 @@ func (c *RedshiftDataAPIService) ListSchemasRequest(input *ListSchemasInput) (re // name. Also, permission to call the redshift-serverless:GetCredentials // operation is required. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1056,6 +1088,10 @@ func (c *RedshiftDataAPIService) ListStatementsRequest(input *ListStatementsInpu // List of SQL statements. By default, only finished statements are shown. A // token is returned to page through the statement list. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1211,6 +1247,10 @@ func (c *RedshiftDataAPIService) ListTablesRequest(input *ListTablesInput) (req // name. Also, permission to call the redshift-serverless:GetCredentials // operation is required. // +// For more information about the Amazon Redshift Data API and CLI usage examples, +// see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) +// in the Amazon Redshift Management Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1438,6 +1478,10 @@ func (s *BatchExecuteStatementException) RequestID() string { type BatchExecuteStatementInput struct { _ struct{} `type:"structure"` + // A unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + // The cluster identifier. This parameter is required when connecting to a cluster // and authenticating using either Secrets Manager or temporary credentials. ClusterIdentifier *string `type:"string"` @@ -1456,8 +1500,6 @@ type BatchExecuteStatementInput struct { // is required when authenticating using Secrets Manager. SecretArn *string `type:"string"` - // One or more SQL statements to run. - // // Sqls is a required field Sqls []*string `min:"1" type:"list" required:"true"` @@ -1496,6 +1538,9 @@ func (s BatchExecuteStatementInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *BatchExecuteStatementInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "BatchExecuteStatementInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } if s.Database == nil { invalidParams.Add(request.NewErrParamRequired("Database")) } @@ -1515,6 +1560,12 @@ func (s *BatchExecuteStatementInput) Validate() error { return nil } +// SetClientToken sets the ClientToken field's value. +func (s *BatchExecuteStatementInput) SetClientToken(v string) *BatchExecuteStatementInput { + s.ClientToken = &v + return s +} + // SetClusterIdentifier sets the ClusterIdentifier field's value. func (s *BatchExecuteStatementInput) SetClusterIdentifier(v string) *BatchExecuteStatementInput { s.ClusterIdentifier = &v @@ -2480,6 +2531,10 @@ func (s *ExecuteStatementException) RequestID() string { type ExecuteStatementInput struct { _ struct{} `type:"structure"` + // A unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + // The cluster identifier. This parameter is required when connecting to a cluster // and authenticating using either Secrets Manager or temporary credentials. ClusterIdentifier *string `type:"string"` @@ -2541,6 +2596,9 @@ func (s ExecuteStatementInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ExecuteStatementInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ExecuteStatementInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } if s.Database == nil { invalidParams.Add(request.NewErrParamRequired("Database")) } @@ -2570,6 +2628,12 @@ func (s *ExecuteStatementInput) Validate() error { return nil } +// SetClientToken sets the ClientToken field's value. +func (s *ExecuteStatementInput) SetClientToken(v string) *ExecuteStatementInput { + s.ClientToken = &v + return s +} + // SetClusterIdentifier sets the ClusterIdentifier field's value. func (s *ExecuteStatementInput) SetClusterIdentifier(v string) *ExecuteStatementInput { s.ClusterIdentifier = &v @@ -3746,7 +3810,7 @@ type SqlParameter struct { Name *string `locationName:"name" type:"string" required:"true"` // The value of the parameter. Amazon Redshift implicitly converts to the proper - // data type. For more inforation, see Data types (https://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html) + // data type. For more information, see Data types (https://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html) // in the Amazon Redshift Database Developer Guide. // // Value is a required field diff --git a/service/redshiftdataapiservice/doc.go b/service/redshiftdataapiservice/doc.go index 36ebb0f77cb..6248e6e1808 100644 --- a/service/redshiftdataapiservice/doc.go +++ b/service/redshiftdataapiservice/doc.go @@ -9,7 +9,7 @@ // // For more information about the Amazon Redshift Data API and CLI usage examples, // see Using the Amazon Redshift Data API (https://docs.aws.amazon.com/redshift/latest/mgmt/data-api.html) -// in the Amazon Redshift Cluster Management Guide. +// in the Amazon Redshift Management Guide. // // See https://docs.aws.amazon.com/goto/WebAPI/redshift-data-2019-12-20 for more information on this service. // diff --git a/service/sagemakermetrics/api.go b/service/sagemakermetrics/api.go index 54b2adb989b..6d690797c4e 100644 --- a/service/sagemakermetrics/api.go +++ b/service/sagemakermetrics/api.go @@ -54,8 +54,8 @@ func (c *SageMakerMetrics) BatchPutMetricsRequest(input *BatchPutMetricsInput) ( // BatchPutMetrics API operation for Amazon SageMaker Metrics Service. // -// Used to ingest training metrics into SageMaker which can be visualized in -// SageMaker Studio and retrieved with the GetMetrics API. +// Used to ingest training metrics into SageMaker. These metrics can be visualized +// in SageMaker Studio and retrieved with the GetMetrics API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -91,14 +91,14 @@ type BatchPutMetricsError struct { // The error code of an error that occured when attempting to put metrics. // - // * METRIC_LIMIT_EXCEEDED - The max amount of metrics per resource has been + // * METRIC_LIMIT_EXCEEDED: The maximum amount of metrics per resource is // exceeded. // - // * INTERNAL_ERROR - An internal error occured. + // * INTERNAL_ERROR: An internal error occured. // - // * VALIDATION_ERROR - The metric data failed validation. + // * VALIDATION_ERROR: The metric data failed validation. // - // * CONFLICT_ERROR - Multiple requests attempted to modify the same data + // * CONFLICT_ERROR: Multiple requests attempted to modify the same data // simultaneously. Code *string `type:"string" enum:"PutMetricsErrorCode"` @@ -144,7 +144,7 @@ type BatchPutMetricsInput struct { // MetricData is a required field MetricData []*RawMetricData `min:"1" type:"list" required:"true"` - // The name of Trial Component to associate the metrics with. + // The name of the Trial Component to associate with the metrics. // // TrialComponentName is a required field TrialComponentName *string `min:"1" type:"string" required:"true"` @@ -215,7 +215,7 @@ func (s *BatchPutMetricsInput) SetTrialComponentName(v string) *BatchPutMetricsI type BatchPutMetricsOutput struct { _ struct{} `type:"structure"` - // Any errors that occur when inserting metric data will appear in this. + // Lists any errors that occur when inserting metric data. Errors []*BatchPutMetricsError `min:"1" type:"list"` } @@ -252,10 +252,10 @@ type RawMetricData struct { // MetricName is a required field MetricName *string `min:"1" type:"string" required:"true"` - // Metric step (aka Epoch). + // The metric step (epoch). Step *int64 `type:"integer"` - // The time when the metric was recorded. + // The time that the metric was recorded. // // Timestamp is a required field Timestamp *time.Time `type:"timestamp" required:"true"`