diff --git a/CHANGELOG.md b/CHANGELOG.md index 75e52296bdc..96c2f8cd794 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.50.20 (2024-02-16) +=== + +### Service Client Updates +* `service/connectparticipant`: Updates service documentation +* `service/elasticmapreduce`: Updates service API and documentation + * adds fine grained control over Unhealthy Node Replacement to Amazon ElasticMapReduce +* `service/firehose`: Updates service API and documentation + * This release adds support for Data Message Extraction for decompressed CloudWatch logs, and to use a custom file extension or time zone for S3 destinations. +* `service/lambda`: Updates service documentation + * Documentation-only updates for Lambda to clarify a number of existing actions and properties. +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Doc only update for a valid option in DB parameter group +* `service/sns`: Updates service API and documentation + * This release marks phone numbers as sensitive inputs. + Release v1.50.19 (2024-02-15) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 141ed3b8312..a18c83304f9 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -4035,15 +4035,75 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "autoscaling": service{ @@ -5871,6 +5931,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10281,6 +10344,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10320,6 +10392,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11328,6 +11409,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11508,6 +11598,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -22086,12 +22185,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -24473,6 +24578,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -24488,18 +24599,87 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + }, }, }, "rekognition": service{ diff --git a/aws/version.go b/aws/version.go index 8adbffbc646..5b1ea807b1f 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.50.19" +const SDKVersion = "1.50.20" diff --git a/models/apis/connectparticipant/2018-09-07/docs-2.json b/models/apis/connectparticipant/2018-09-07/docs-2.json index a1c616651b1..a36ad04401e 100644 --- a/models/apis/connectparticipant/2018-09-07/docs-2.json +++ b/models/apis/connectparticipant/2018-09-07/docs-2.json @@ -2,13 +2,13 @@ "version": "2.0", "service": "

Amazon Connect is an easy-to-use omnichannel cloud contact center service that enables companies of any size to deliver superior customer service at a lower cost. Amazon Connect communications capabilities make it easy for companies to deliver personalized interactions across communication channels, including chat.

Use the Amazon Connect Participant Service to manage participants (for example, agents, customers, and managers listening in), and to send messages and events within a chat contact. The APIs in the service enable the following: sending chat messages, attachment sharing, managing a participant's connection state and message events, and retrieving chat transcripts.

", "operations": { - "CompleteAttachmentUpload": "

Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", + "CompleteAttachmentUpload": "

Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", "CreateParticipantConnection": "

Creates the participant's connection.

ParticipantToken is used for invoking this API instead of ConnectionToken.

The participant token is valid for the lifetime of the participant – until they are part of a contact.

The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic.

For chat, you need to publish the following on the established websocket connection:

{\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}

Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before.

Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide.

Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", "DescribeView": "

Retrieves the view for the specified view token.

", "DisconnectParticipant": "

Disconnects a participant.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", "GetAttachment": "

Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", - "GetTranscript": "

Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", - "SendEvent": "

Sends an event.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", + "GetTranscript": "

Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.

If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session:

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", + "SendEvent": "

The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field.

Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", "SendMessage": "

Sends a message.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

", "StartAttachmentUpload": "

Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, @@ -88,7 +88,7 @@ "base": null, "refs": { "Item$ContentType": "

The type of content of the item.

", - "SendEventRequest$ContentType": "

The content type of the request. Supported types are:

", + "SendEventRequest$ContentType": "

The content type of the request. Supported types are:

", "SendMessageRequest$ContentType": "

The type of the content. Supported types are text/plain, text/markdown, application/json, and application/vnd.amazonaws.connect.message.interactive.response.

" } }, @@ -127,7 +127,7 @@ } }, "ConflictException": { - "base": "

An attachment with that identifier is already being uploaded.

", + "base": "

The requested operation conflicts with the current state of a service resource associated with the request.

", "refs": { } }, diff --git a/models/apis/elasticmapreduce/2009-03-31/api-2.json b/models/apis/elasticmapreduce/2009-03-31/api-2.json index 23eaa554aaf..47a4b13527b 100644 --- a/models/apis/elasticmapreduce/2009-03-31/api-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -605,6 +605,17 @@ {"shape":"InternalServerError"} ] }, + "SetUnhealthyNodeReplacement":{ + "name":"SetUnhealthyNodeReplacement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetUnhealthyNodeReplacementInput"}, + "errors":[ + {"shape":"InternalServerError"} + ] + }, "SetVisibleToAllUsers":{ "name":"SetVisibleToAllUsers", "http":{ @@ -968,6 +979,7 @@ "ReleaseLabel":{"shape":"String"}, "AutoTerminate":{"shape":"Boolean"}, "TerminationProtected":{"shape":"Boolean"}, + "UnhealthyNodeReplacement":{"shape":"BooleanObject"}, "VisibleToAllUsers":{"shape":"Boolean"}, "Applications":{"shape":"ApplicationList"}, "Tags":{"shape":"TagList"}, @@ -2073,6 +2085,7 @@ "Placement":{"shape":"PlacementType"}, "KeepJobFlowAliveWhenNoSteps":{"shape":"Boolean"}, "TerminationProtected":{"shape":"Boolean"}, + "UnhealthyNodeReplacement":{"shape":"BooleanObject"}, "HadoopVersion":{"shape":"XmlStringMaxLen256"}, "Ec2SubnetId":{"shape":"XmlStringMaxLen256"}, "Ec2SubnetIds":{"shape":"XmlStringMaxLen256List"}, @@ -2103,6 +2116,7 @@ "Placement":{"shape":"PlacementType"}, "KeepJobFlowAliveWhenNoSteps":{"shape":"Boolean"}, "TerminationProtected":{"shape":"Boolean"}, + "UnhealthyNodeReplacement":{"shape":"BooleanObject"}, "HadoopVersion":{"shape":"XmlStringMaxLen256"} } }, @@ -2884,6 +2898,17 @@ "TerminationProtected":{"shape":"Boolean"} } }, + "SetUnhealthyNodeReplacementInput":{ + "type":"structure", + "required":[ + "JobFlowIds", + "UnhealthyNodeReplacement" + ], + "members":{ + "JobFlowIds":{"shape":"XmlStringList"}, + "UnhealthyNodeReplacement":{"shape":"BooleanObject"} + } + }, "SetVisibleToAllUsersInput":{ "type":"structure", "required":[ diff --git a/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/models/apis/elasticmapreduce/2009-03-31/docs-2.json index fe92a502cc8..ef6cd6a9605 100644 --- a/models/apis/elasticmapreduce/2009-03-31/docs-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -50,7 +50,8 @@ "RemoveTags": "

Removes tags from an Amazon EMR resource, such as a cluster or Amazon EMR Studio. Tags make it easier to associate resources in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

The following example removes the stack tag with value Prod from a cluster:

", "RunJobFlow": "

RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps specified. After the steps complete, the cluster stops and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.

For additional protection, you can set the JobFlowInstancesConfig TerminationProtected parameter to TRUE to lock the cluster and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.

A maximum of 256 steps are allowed in each job flow.

If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop.

For long-running clusters, we recommend that you periodically store your results.

The instance fleets configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.

", "SetKeepJobFlowAliveWhenNoSteps": "

You can use the SetKeepJobFlowAliveWhenNoSteps to configure a cluster (job flow) to terminate after the step execution, i.e., all your steps are executed. If you want a transient cluster that shuts down after the last of the current executing steps are completed, you can configure SetKeepJobFlowAliveWhenNoSteps to false. If you want a long running cluster, configure SetKeepJobFlowAliveWhenNoSteps to true.

For more information, see Managing Cluster Termination in the Amazon EMR Management Guide.

", - "SetTerminationProtection": "

SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all Amazon EC2 instances in a cluster.

SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.

", + "SetTerminationProtection": "

SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all Amazon EC2 instances in a cluster.

SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

For more information, see Managing Cluster Termination in the Amazon EMR Management Guide.

", + "SetUnhealthyNodeReplacement": "

Specify whether to enable unhealthy node replacement, which lets Amazon EMR gracefully replace core nodes on a cluster if any nodes become unhealthy. For example, a node becomes unhealthy if disk usage is above 90%. If unhealthy node replacement is on and TerminationProtected are off, Amazon EMR immediately terminates the unhealthy core nodes. To use unhealthy node replacement and retain unhealthy core nodes, use to turn on termination protection. In such cases, Amazon EMR adds the unhealthy nodes to a denylist, reducing job interruptions and failures.

If unhealthy node replacement is on, Amazon EMR notifies YARN and other applications on the cluster to stop scheduling tasks with these nodes, moves the data, and then terminates the nodes.

For more information, see graceful node replacement in the Amazon EMR Management Guide.

", "SetVisibleToAllUsers": "

The SetVisibleToAllUsers parameter is no longer supported. Your cluster may be visible to all users in your account. To restrict cluster access using an IAM policy, see Identity and Access Management for Amazon EMR.

Sets the Cluster$VisibleToAllUsers value for an Amazon EMR cluster. When true, IAM principals in the Amazon Web Services account can perform Amazon EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform Amazon EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.

This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.

For more information, see Understanding the Amazon EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

", "StartNotebookExecution": "

Starts a notebook execution.

", "StopNotebookExecution": "

Stops a notebook execution.

", @@ -234,10 +235,14 @@ "BooleanObject": { "base": null, "refs": { + "Cluster$UnhealthyNodeReplacement": "

Indicates whether Amazon EMR should gracefully replace Amazon EC2 core instances that have degraded within the cluster.

", "CreateStudioInput$TrustedIdentityPropagationEnabled": "

A Boolean indicating whether to enable Trusted identity propagation for the Studio. The default value is false.

", "EbsConfiguration$EbsOptimized": "

Indicates whether an Amazon EBS volume is EBS-optimized.

", "InstanceGroup$EbsOptimized": "

If the instance group is EBS-optimized. An Amazon EBS-optimized instance uses an optimized configuration stack and provides additional, dedicated capacity for Amazon EBS I/O.

", "InstanceTypeSpecification$EbsOptimized": "

Evaluates to TRUE when the specified InstanceType is EBS-optimized.

", + "JobFlowInstancesConfig$UnhealthyNodeReplacement": "

Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.

", + "JobFlowInstancesDetail$UnhealthyNodeReplacement": "

Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.

", + "SetUnhealthyNodeReplacementInput$UnhealthyNodeReplacement": "

Indicates whether to turn on or turn off graceful unhealthy node replacement.

", "Studio$TrustedIdentityPropagationEnabled": "

Indicates whether the Studio has Trusted identity propagation enabled. The default value is false.

" } }, @@ -1850,6 +1855,11 @@ "refs": { } }, + "SetUnhealthyNodeReplacementInput": { + "base": null, + "refs": { + } + }, "SetVisibleToAllUsersInput": { "base": "

The input to the SetVisibleToAllUsers action.

", "refs": { @@ -2354,6 +2364,7 @@ "ScriptBootstrapActionConfig$Args": "

A list of command line arguments to pass to the bootstrap action script.

", "SetKeepJobFlowAliveWhenNoStepsInput$JobFlowIds": "

A list of strings that uniquely identify the clusters to protect. This identifier is returned by RunJobFlow and can also be obtained from DescribeJobFlows.

", "SetTerminationProtectionInput$JobFlowIds": "

A list of strings that uniquely identify the clusters to protect. This identifier is returned by RunJobFlow and can also be obtained from DescribeJobFlows .

", + "SetUnhealthyNodeReplacementInput$JobFlowIds": "

The list of strings that uniquely identify the clusters for which to turn on unhealthy node replacement. You can get these identifiers by running the RunJobFlow or the DescribeJobFlows operations.

", "SetVisibleToAllUsersInput$JobFlowIds": "

The unique identifier of the job flow (cluster).

", "SupportedProductConfig$Args": "

The list of user-supplied arguments.

", "TerminateJobFlowsInput$JobFlowIds": "

A list of job flows to be shut down.

" diff --git a/models/apis/firehose/2015-08-04/api-2.json b/models/apis/firehose/2015-08-04/api-2.json index b85b1f1c280..fc1b7b613b7 100644 --- a/models/apis/firehose/2015-08-04/api-2.json +++ b/models/apis/firehose/2015-08-04/api-2.json @@ -53,6 +53,22 @@ {"shape":"ResourceNotFoundException"} ] }, + "GetKinesisStream":{ + "name":"GetKinesisStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetKinesisStreamInput"}, + "output":{"shape":"GetKinesisStreamOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"InvalidStreamTypeException"}, + {"shape":"InvalidKMSResourceException"} + ], + "internalonly":true + }, "ListDeliveryStreams":{ "name":"ListDeliveryStreams", "http":{ @@ -183,6 +199,24 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ] + }, + "VerifyResourcesExistForTagris":{ + "name":"VerifyResourcesExistForTagris", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagrisVerifyResourcesExistInput"}, + "output":{"shape":"TagrisVerifyResourcesExistOutput"}, + "errors":[ + {"shape":"TagrisAccessDeniedException"}, + {"shape":"TagrisInternalServiceException"}, + {"shape":"TagrisInvalidArnException"}, + {"shape":"TagrisInvalidParameterException"}, + {"shape":"TagrisPartialResourcesExistResultsException"}, + {"shape":"TagrisThrottledException"} + ], + "internalonly":true } }, "shapes":{ @@ -192,6 +226,10 @@ "min":1, "pattern":"arn:.*" }, + "AccessKeyId":{ + "type":"string", + "sensitive":true + }, "AmazonOpenSearchServerlessBufferingHints":{ "type":"structure", "members":{ @@ -540,6 +578,11 @@ "DeliveryStreamARN":{"shape":"DeliveryStreamARN"} } }, + "CustomTimeZone":{ + "type":"string", + "max":50, + "min":0 + }, "Data":{ "type":"blob", "max":1024000, @@ -932,7 +975,9 @@ "S3BackupMode":{"shape":"S3BackupMode"}, "S3BackupConfiguration":{"shape":"S3DestinationConfiguration"}, "DataFormatConversionConfiguration":{"shape":"DataFormatConversionConfiguration"}, - "DynamicPartitioningConfiguration":{"shape":"DynamicPartitioningConfiguration"} + "DynamicPartitioningConfiguration":{"shape":"DynamicPartitioningConfiguration"}, + "FileExtension":{"shape":"FileExtension"}, + "CustomTimeZone":{"shape":"CustomTimeZone"} } }, "ExtendedS3DestinationDescription":{ @@ -957,7 +1002,9 @@ "S3BackupMode":{"shape":"S3BackupMode"}, "S3BackupDescription":{"shape":"S3DestinationDescription"}, "DataFormatConversionConfiguration":{"shape":"DataFormatConversionConfiguration"}, - "DynamicPartitioningConfiguration":{"shape":"DynamicPartitioningConfiguration"} + "DynamicPartitioningConfiguration":{"shape":"DynamicPartitioningConfiguration"}, + "FileExtension":{"shape":"FileExtension"}, + "CustomTimeZone":{"shape":"CustomTimeZone"} } }, "ExtendedS3DestinationUpdate":{ @@ -975,7 +1022,9 @@ "S3BackupMode":{"shape":"S3BackupMode"}, "S3BackupUpdate":{"shape":"S3DestinationUpdate"}, "DataFormatConversionConfiguration":{"shape":"DataFormatConversionConfiguration"}, - "DynamicPartitioningConfiguration":{"shape":"DynamicPartitioningConfiguration"} + "DynamicPartitioningConfiguration":{"shape":"DynamicPartitioningConfiguration"}, + "FileExtension":{"shape":"FileExtension"}, + "CustomTimeZone":{"shape":"CustomTimeZone"} } }, "FailureDescription":{ @@ -989,6 +1038,27 @@ "Details":{"shape":"NonEmptyString"} } }, + "FileExtension":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^$|\\.[0-9a-z!\\-_.*'()]+" + }, + "FirehoseSource":{"type":"string"}, + "GetKinesisStreamInput":{ + "type":"structure", + "required":["DeliveryStreamARN"], + "members":{ + "DeliveryStreamARN":{"shape":"DeliveryStreamARN"} + } + }, + "GetKinesisStreamOutput":{ + "type":"structure", + "members":{ + "KinesisStreamARN":{"shape":"KinesisStreamARN"}, + "CredentialsForReadingKinesisStream":{"shape":"SessionCredentials"} + } + }, "HECAcknowledgmentTimeoutInSeconds":{ "type":"integer", "max":600, @@ -1208,6 +1278,14 @@ }, "exception":true }, + "InvalidStreamTypeException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "source":{"shape":"FirehoseSource"} + }, + "exception":true + }, "KMSEncryptionConfig":{ "type":"structure", "required":["AWSKMSKeyARN"], @@ -1518,7 +1596,8 @@ "BufferIntervalInSeconds", "SubRecordType", "Delimiter", - "CompressionFormat" + "CompressionFormat", + "DataMessageExtraction" ] }, "ProcessorParameterValue":{ @@ -1532,6 +1611,7 @@ "enum":[ "RecordDeAggregation", "Decompression", + "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord" @@ -1794,6 +1874,10 @@ "VersionId":{"shape":"NonEmptyStringWithoutWhitespace"} } }, + "SecretAccessKey":{ + "type":"string", + "sensitive":true + }, "SecurityGroupIdList":{ "type":"list", "member":{"shape":"NonEmptyStringWithoutWhitespace"}, @@ -1815,6 +1899,26 @@ "exception":true, "fault":true }, + "SessionCredentials":{ + "type":"structure", + "required":[ + "AccessKeyId", + "SecretAccessKey", + "SessionToken", + "Expiration" + ], + "members":{ + "AccessKeyId":{"shape":"AccessKeyId"}, + "SecretAccessKey":{"shape":"SecretAccessKey"}, + "SessionToken":{"shape":"SessionToken"}, + "Expiration":{"shape":"Timestamp"} + }, + "sensitive":true + }, + "SessionToken":{ + "type":"string", + "sensitive":true + }, "SizeInMBs":{ "type":"integer", "max":128, @@ -2177,6 +2281,111 @@ "min":0, "pattern":"^[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*$" }, + "TagrisAccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"TagrisExceptionMessage"} + }, + "exception":true + }, + "TagrisAccountId":{ + "type":"string", + "max":12, + "min":12 + }, + "TagrisAmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, + "TagrisExceptionMessage":{ + "type":"string", + "max":2048, + "min":0 + }, + "TagrisInternalId":{ + "type":"string", + "max":64, + "min":0 + }, + "TagrisInternalServiceException":{ + "type":"structure", + "members":{ + "message":{"shape":"TagrisExceptionMessage"} + }, + "exception":true, + "fault":true + }, + "TagrisInvalidArnException":{ + "type":"structure", + "members":{ + "message":{"shape":"TagrisExceptionMessage"}, + "sweepListItem":{"shape":"TagrisSweepListItem"} + }, + "exception":true + }, + "TagrisInvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"TagrisExceptionMessage"} + }, + "exception":true + }, + "TagrisPartialResourcesExistResultsException":{ + "type":"structure", + "members":{ + "message":{"shape":"TagrisExceptionMessage"}, + "resourceExistenceInformation":{"shape":"TagrisSweepListResult"} + }, + "exception":true + }, + "TagrisStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "NOT_ACTIVE" + ] + }, + "TagrisSweepList":{ + "type":"list", + "member":{"shape":"TagrisSweepListItem"} + }, + "TagrisSweepListItem":{ + "type":"structure", + "members":{ + "TagrisAccountId":{"shape":"TagrisAccountId"}, + "TagrisAmazonResourceName":{"shape":"TagrisAmazonResourceName"}, + "TagrisInternalId":{"shape":"TagrisInternalId"}, + "TagrisVersion":{"shape":"TagrisVersion"} + } + }, + "TagrisSweepListResult":{ + "type":"map", + "key":{"shape":"TagrisAmazonResourceName"}, + "value":{"shape":"TagrisStatus"} + }, + "TagrisThrottledException":{ + "type":"structure", + "members":{ + "message":{"shape":"TagrisExceptionMessage"} + }, + "exception":true + }, + "TagrisVerifyResourcesExistInput":{ + "type":"structure", + "required":["TagrisSweepList"], + "members":{ + "TagrisSweepList":{"shape":"TagrisSweepList"} + } + }, + "TagrisVerifyResourcesExistOutput":{ + "type":"structure", + "required":["TagrisSweepListResult"], + "members":{ + "TagrisSweepListResult":{"shape":"TagrisSweepListResult"} + } + }, + "TagrisVersion":{"type":"long"}, "Timestamp":{"type":"timestamp"}, "TopicName":{ "type":"string", diff --git a/models/apis/firehose/2015-08-04/docs-2.json b/models/apis/firehose/2015-08-04/docs-2.json index 7dba048cd44..7c23feb1a32 100644 --- a/models/apis/firehose/2015-08-04/docs-2.json +++ b/models/apis/firehose/2015-08-04/docs-2.json @@ -1,29 +1,37 @@ { "version": "2.0", - "service": "Amazon Kinesis Data Firehose API Reference

Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.

", + "service": "Amazon Data Firehose

Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.

", "operations": { - "CreateDeliveryStream": "

Creates a Kinesis Data Firehose delivery stream.

By default, you can create up to 50 delivery streams per Amazon Web Services Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

", - "DeleteDeliveryStream": "

Deletes a delivery stream and its data.

To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

", + "CreateDeliveryStream": "

Creates a Firehose delivery stream.

By default, you can create up to 50 delivery streams per Amazon Web Services Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.

", + "DeleteDeliveryStream": "

Deletes a delivery stream and its data.

You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. To check the state of a delivery stream, use DescribeDeliveryStream.

DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING state.While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

Removal of a delivery stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state to be removed.

", "DescribeDeliveryStream": "

Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

", + "GetKinesisStream": null, "ListDeliveryStreams": "

Lists your delivery streams in alphabetical order of their names.

The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.

", "ListTagsForDeliveryStream": "

Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.

", - "PutRecord": "

Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", - "PutRecordBatch": "

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

For information about service quota, see Amazon Kinesis Data Firehose Quota.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", - "StartDeliveryStreamEncryption": "

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", - "StopDeliveryStreamEncryption": "

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", + "PutRecord": "

Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", + "PutRecordBatch": "

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

For information about service quota, see Amazon Firehose Quota.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", + "StartDeliveryStreamEncryption": "

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", + "StopDeliveryStreamEncryption": "

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", "TagDeliveryStream": "

Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

Each delivery stream can have up to 50 tags.

This operation has a limit of five transactions per second per account.

", "UntagDeliveryStream": "

Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.

If you specify a tag that doesn't exist, the operation ignores it.

This operation has a limit of five transactions per second per account.

", - "UpdateDestination": "

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.

Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" + "UpdateDestination": "

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

", + "VerifyResourcesExistForTagris": null }, "shapes": { "AWSKMSKeyARN": { "base": null, "refs": { "DeliveryStreamEncryptionConfiguration$KeyARN": "

If KeyType is CUSTOMER_MANAGED_CMK, this field contains the ARN of the customer managed CMK. If KeyType is Amazon Web Services_OWNED_CMK, DeliveryStreamEncryptionConfiguration doesn't contain a value for KeyARN.

", - "DeliveryStreamEncryptionConfigurationInput$KeyARN": "

If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to Amazon Web Services_OWNED_CMK, Kinesis Data Firehose uses a service-account CMK.

", + "DeliveryStreamEncryptionConfigurationInput$KeyARN": "

If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to Amazon Web Services_OWNED_CMK, Firehose uses a service-account CMK.

", "KMSEncryptionConfig$AWSKMSKeyARN": "

The Amazon Resource Name (ARN) of the encryption key. Must belong to the same Amazon Web Services Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" } }, + "AccessKeyId": { + "base": null, + "refs": { + "SessionCredentials$AccessKeyId": null + } + }, "AmazonOpenSearchServerlessBufferingHints": { "base": "

Describes the buffering to perform before delivering data to the Serverless offering for Amazon OpenSearch Service destination.

", "refs": { @@ -81,21 +89,21 @@ "AmazonOpenSearchServerlessRetryDurationInSeconds": { "base": null, "refs": { - "AmazonOpenSearchServerlessRetryOptions$DurationInSeconds": "

After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + "AmazonOpenSearchServerlessRetryOptions$DurationInSeconds": "

After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" } }, "AmazonOpenSearchServerlessRetryOptions": { - "base": "

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.

", + "base": "

Configures retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.

", "refs": { - "AmazonOpenSearchServerlessDestinationConfiguration$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

", + "AmazonOpenSearchServerlessDestinationConfiguration$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

", "AmazonOpenSearchServerlessDestinationDescription$RetryOptions": "

The Serverless offering for Amazon OpenSearch Service retry options.

", - "AmazonOpenSearchServerlessDestinationUpdate$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + "AmazonOpenSearchServerlessDestinationUpdate$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

" } }, "AmazonOpenSearchServerlessS3BackupMode": { "base": null, "refs": { - "AmazonOpenSearchServerlessDestinationConfiguration$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

", + "AmazonOpenSearchServerlessDestinationConfiguration$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

", "AmazonOpenSearchServerlessDestinationDescription$S3BackupMode": "

The Amazon S3 backup mode.

" } }, @@ -123,7 +131,7 @@ "base": null, "refs": { "AmazonopensearchserviceDestinationConfiguration$ClusterEndpoint": "

The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.

", - "AmazonopensearchserviceDestinationDescription$ClusterEndpoint": "

The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.

", + "AmazonopensearchserviceDestinationDescription$ClusterEndpoint": "

The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.

", "AmazonopensearchserviceDestinationUpdate$ClusterEndpoint": "

The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.

" } }, @@ -172,30 +180,30 @@ "AmazonopensearchserviceRetryDurationInSeconds": { "base": null, "refs": { - "AmazonopensearchserviceRetryOptions$DurationInSeconds": "

After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + "AmazonopensearchserviceRetryOptions$DurationInSeconds": "

After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" } }, "AmazonopensearchserviceRetryOptions": { - "base": "

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service.

", + "base": "

Configures retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service.

", "refs": { - "AmazonopensearchserviceDestinationConfiguration$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

", + "AmazonopensearchserviceDestinationConfiguration$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

", "AmazonopensearchserviceDestinationDescription$RetryOptions": "

The Amazon OpenSearch Service retry options.

", - "AmazonopensearchserviceDestinationUpdate$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + "AmazonopensearchserviceDestinationUpdate$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

" } }, "AmazonopensearchserviceS3BackupMode": { "base": null, "refs": { - "AmazonopensearchserviceDestinationConfiguration$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

", + "AmazonopensearchserviceDestinationConfiguration$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

", "AmazonopensearchserviceDestinationDescription$S3BackupMode": "

The Amazon S3 backup mode.

" } }, "AmazonopensearchserviceTypeName": { "base": null, "refs": { - "AmazonopensearchserviceDestinationConfiguration$TypeName": "

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.

", + "AmazonopensearchserviceDestinationConfiguration$TypeName": "

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.

", "AmazonopensearchserviceDestinationDescription$TypeName": "

The Amazon OpenSearch Service type name. This applies to Elasticsearch 6.x and lower versions. For Elasticsearch 7.x and OpenSearch Service 1.x, there's no value for TypeName.

", - "AmazonopensearchserviceDestinationUpdate$TypeName": "

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" + "AmazonopensearchserviceDestinationUpdate$TypeName": "

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" } }, "AuthenticationConfiguration": { @@ -208,8 +216,8 @@ "BlockSizeBytes": { "base": null, "refs": { - "OrcSerDe$BlockSizeBytes": "

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

", - "ParquetSerDe$BlockSizeBytes": "

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

" + "OrcSerDe$BlockSizeBytes": "

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.

", + "ParquetSerDe$BlockSizeBytes": "

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.

" } }, "BooleanObject": { @@ -217,13 +225,13 @@ "refs": { "CloudWatchLoggingOptions$Enabled": "

Enables or disables CloudWatch logging.

", "DataFormatConversionConfiguration$Enabled": "

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

", - "DeleteDeliveryStreamInput$AllowForceDelete": "

Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Kinesis Data Firehose keeps retrying the delete operation.

The default value is false.

", + "DeleteDeliveryStreamInput$AllowForceDelete": "

Set this to true if you want to delete the delivery stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation.

The default value is false.

", "DeliveryStreamDescription$HasMoreDestinations": "

Indicates whether there are more destinations available to list.

", - "DynamicPartitioningConfiguration$Enabled": "

Specifies that the dynamic partitioning is enabled for this Kinesis Data Firehose delivery stream.

", + "DynamicPartitioningConfiguration$Enabled": "

Specifies that the dynamic partitioning is enabled for this Firehose delivery stream.

", "ListDeliveryStreamsOutput$HasMoreDeliveryStreams": "

Indicates whether there are more delivery streams available to list.

", "ListTagsForDeliveryStreamOutput$HasMoreTags": "

If this is true in the response, more tags are available. To list the remaining tags, set ExclusiveStartTagKey to the key of the last tag returned and call ListTagsForDeliveryStream again.

", - "OpenXJsonSerDe$ConvertDotsInJsonKeysToUnderscores": "

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.

The default is false.

", - "OpenXJsonSerDe$CaseInsensitive": "

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

", + "OpenXJsonSerDe$ConvertDotsInJsonKeysToUnderscores": "

When set to true, specifies that the names of the keys include dots and that you want Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.

The default is false.

", + "OpenXJsonSerDe$CaseInsensitive": "

When set to true, which is the default, Firehose converts JSON keys to lowercase before deserializing them.

", "OrcSerDe$EnablePadding": "

Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

", "ParquetSerDe$EnableDictionaryCompression": "

Indicates whether to enable dictionary compression.

", "ProcessingConfiguration$Enabled": "

Enables or disables data processing.

", @@ -244,7 +252,7 @@ } }, "BufferingHints": { - "base": "

Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Kinesis Data Firehose might choose to use different values when it is optimal. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

", + "base": "

Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Firehose might choose to use different values when it is optimal. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

", "refs": { "ExtendedS3DestinationConfiguration$BufferingHints": "

The buffering option.

", "ExtendedS3DestinationDescription$BufferingHints": "

The buffering option.

", @@ -325,7 +333,7 @@ "ContentEncoding": { "base": null, "refs": { - "HttpEndpointRequestConfiguration$ContentEncoding": "

Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.

" + "HttpEndpointRequestConfiguration$ContentEncoding": "

Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.

" } }, "CopyCommand": { @@ -339,7 +347,7 @@ "CopyOptions": { "base": null, "refs": { - "CopyCommand$CopyOptions": "

Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Kinesis Data Firehose are as follows:

delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

delimiter '|' - fields are delimited with \"|\" (this is the default delimiter).

delimiter '|' escape - the delimiter should be escaped.

fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

For more examples, see Amazon Redshift COPY command examples.

" + "CopyCommand$CopyOptions": "

Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Firehose are as follows:

delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

delimiter '|' - fields are delimited with \"|\" (this is the default delimiter).

delimiter '|' escape - the delimiter should be escaped.

fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

For more examples, see Amazon Redshift COPY command examples.

" } }, "CreateDeliveryStreamInput": { @@ -352,6 +360,14 @@ "refs": { } }, + "CustomTimeZone": { + "base": null, + "refs": { + "ExtendedS3DestinationConfiguration$CustomTimeZone": "

The time zone you prefer. UTC is the default.

", + "ExtendedS3DestinationDescription$CustomTimeZone": "

The time zone you prefer. UTC is the default.

", + "ExtendedS3DestinationUpdate$CustomTimeZone": "

The time zone you prefer. UTC is the default.

" + } + }, "Data": { "base": null, "refs": { @@ -359,7 +375,7 @@ } }, "DataFormatConversionConfiguration": { - "base": "

Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.

", + "base": "

Specifies that you want Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Firehose Record Format Conversion.

", "refs": { "ExtendedS3DestinationConfiguration$DataFormatConversionConfiguration": "

The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.

", "ExtendedS3DestinationDescription$DataFormatConversionConfiguration": "

The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.

", @@ -381,7 +397,7 @@ "DefaultDocumentIdFormat": { "base": null, "refs": { - "DocumentIdOptions$DefaultDocumentIdFormat": "

When the FIREHOSE_DEFAULT option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.

When the NO_DOCUMENT_ID option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.

" + "DocumentIdOptions$DefaultDocumentIdFormat": "

When the FIREHOSE_DEFAULT option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.

When the NO_DOCUMENT_ID option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.

" } }, "DeleteDeliveryStreamInput": { @@ -397,15 +413,16 @@ "DeliveryStartTimestamp": { "base": null, "refs": { - "KinesisStreamSourceDescription$DeliveryStartTimestamp": "

Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

", - "MSKSourceDescription$DeliveryStartTimestamp": "

Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.

" + "KinesisStreamSourceDescription$DeliveryStartTimestamp": "

Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

", + "MSKSourceDescription$DeliveryStartTimestamp": "

Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.

" } }, "DeliveryStreamARN": { "base": null, "refs": { "CreateDeliveryStreamOutput$DeliveryStreamARN": "

The ARN of the delivery stream.

", - "DeliveryStreamDescription$DeliveryStreamARN": "

The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" + "DeliveryStreamDescription$DeliveryStreamARN": "

The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", + "GetKinesisStreamInput$DeliveryStreamARN": null } }, "DeliveryStreamDescription": { @@ -502,7 +519,7 @@ } }, "Deserializer": { - "base": "

The deserializer you want Kinesis Data Firehose to use for converting the input data from JSON. Kinesis Data Firehose then serializes the data to its final format using the Serializer. Kinesis Data Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.

", + "base": "

The deserializer you want Firehose to use for converting the input data from JSON. Firehose then serializes the data to its final format using the Serializer. Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.

", "refs": { "InputFormatConfiguration$Deserializer": "

Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.

" } @@ -522,20 +539,20 @@ "DestinationId": { "base": null, "refs": { - "DescribeDeliveryStreamInput$ExclusiveStartDestinationId": "

The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.

", + "DescribeDeliveryStreamInput$ExclusiveStartDestinationId": "

The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream.

", "DestinationDescription$DestinationId": "

The ID of the destination.

", "UpdateDestinationInput$DestinationId": "

The ID of the destination.

" } }, "DocumentIdOptions": { - "base": "

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

", + "base": "

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

", "refs": { - "AmazonopensearchserviceDestinationConfiguration$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

", - "AmazonopensearchserviceDestinationDescription$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

", - "AmazonopensearchserviceDestinationUpdate$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

", - "ElasticsearchDestinationConfiguration$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

", - "ElasticsearchDestinationDescription$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

", - "ElasticsearchDestinationUpdate$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

" + "AmazonopensearchserviceDestinationConfiguration$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

", + "AmazonopensearchserviceDestinationDescription$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

", + "AmazonopensearchserviceDestinationUpdate$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

", + "ElasticsearchDestinationConfiguration$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

", + "ElasticsearchDestinationDescription$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

", + "ElasticsearchDestinationUpdate$DocumentIdOptions": "

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

" } }, "DynamicPartitioningConfiguration": { @@ -570,7 +587,7 @@ "base": null, "refs": { "ElasticsearchDestinationConfiguration$ClusterEndpoint": "

The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.

", - "ElasticsearchDestinationDescription$ClusterEndpoint": "

The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon ES.

", + "ElasticsearchDestinationDescription$ClusterEndpoint": "

The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon ES.

", "ElasticsearchDestinationUpdate$ClusterEndpoint": "

The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.

" } }, @@ -596,7 +613,7 @@ "base": null, "refs": { "ElasticsearchDestinationConfiguration$DomainARN": "

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeDomain, DescribeDomains, and DescribeDomainConfig after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Specify either ClusterEndpoint or DomainARN.

", - "ElasticsearchDestinationDescription$DomainARN": "

The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.

", + "ElasticsearchDestinationDescription$DomainARN": "

The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.

", "ElasticsearchDestinationUpdate$DomainARN": "

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeDomain, DescribeDomains, and DescribeDomainConfig after assuming the IAM role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Specify either ClusterEndpoint or DomainARN.

" } }, @@ -619,30 +636,30 @@ "ElasticsearchRetryDurationInSeconds": { "base": null, "refs": { - "ElasticsearchRetryOptions$DurationInSeconds": "

After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + "ElasticsearchRetryOptions$DurationInSeconds": "

After an initial failure to deliver to Amazon ES, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" } }, "ElasticsearchRetryOptions": { - "base": "

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES.

", + "base": "

Configures retry behavior in case Firehose is unable to deliver documents to Amazon ES.

", "refs": { - "ElasticsearchDestinationConfiguration$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

", + "ElasticsearchDestinationConfiguration$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

", "ElasticsearchDestinationDescription$RetryOptions": "

The Amazon ES retry options.

", - "ElasticsearchDestinationUpdate$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" + "ElasticsearchDestinationUpdate$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" } }, "ElasticsearchS3BackupMode": { "base": null, "refs": { - "ElasticsearchDestinationConfiguration$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

You can't change this backup mode after you create the delivery stream.

", + "ElasticsearchDestinationConfiguration$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

You can't change this backup mode after you create the delivery stream.

", "ElasticsearchDestinationDescription$S3BackupMode": "

The Amazon S3 backup mode.

" } }, "ElasticsearchTypeName": { "base": null, "refs": { - "ElasticsearchDestinationConfiguration$TypeName": "

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.

For Elasticsearch 7.x, don't specify a TypeName.

", + "ElasticsearchDestinationConfiguration$TypeName": "

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.

For Elasticsearch 7.x, don't specify a TypeName.

", "ElasticsearchDestinationDescription$TypeName": "

The Elasticsearch type name. This applies to Elasticsearch 6.x and lower versions. For Elasticsearch 7.x and OpenSearch Service 1.x, there's no value for TypeName.

", - "ElasticsearchDestinationUpdate$TypeName": "

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" + "ElasticsearchDestinationUpdate$TypeName": "

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" } }, "EncryptionConfiguration": { @@ -671,6 +688,7 @@ "InvalidArgumentException$message": "

A message that provides information about the error.

", "InvalidKMSResourceException$message": null, "InvalidSourceException$message": null, + "InvalidStreamTypeException$message": null, "LimitExceededException$message": "

A message that provides information about the error.

", "PutRecordBatchResponseEntry$ErrorMessage": "

The error message for an individual record result.

", "ResourceInUseException$message": "

A message that provides information about the error.

", @@ -681,12 +699,12 @@ "ErrorOutputPrefix": { "base": null, "refs": { - "ExtendedS3DestinationConfiguration$ErrorOutputPrefix": "

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", - "ExtendedS3DestinationDescription$ErrorOutputPrefix": "

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", - "ExtendedS3DestinationUpdate$ErrorOutputPrefix": "

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", - "S3DestinationConfiguration$ErrorOutputPrefix": "

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", - "S3DestinationDescription$ErrorOutputPrefix": "

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", - "S3DestinationUpdate$ErrorOutputPrefix": "

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" + "ExtendedS3DestinationConfiguration$ErrorOutputPrefix": "

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", + "ExtendedS3DestinationDescription$ErrorOutputPrefix": "

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", + "ExtendedS3DestinationUpdate$ErrorOutputPrefix": "

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", + "S3DestinationConfiguration$ErrorOutputPrefix": "

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", + "S3DestinationDescription$ErrorOutputPrefix": "

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

", + "S3DestinationUpdate$ErrorOutputPrefix": "

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" } }, "ExtendedS3DestinationConfiguration": { @@ -714,20 +732,44 @@ "DeliveryStreamEncryptionConfiguration$FailureDescription": "

Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

" } }, + "FileExtension": { + "base": null, + "refs": { + "ExtendedS3DestinationConfiguration$FileExtension": "

Specify a file extension. It will override the default file extension

", + "ExtendedS3DestinationDescription$FileExtension": "

Specify a file extension. It will override the default file extension

", + "ExtendedS3DestinationUpdate$FileExtension": "

Specify a file extension. It will override the default file extension

" + } + }, + "FirehoseSource": { + "base": null, + "refs": { + "InvalidStreamTypeException$source": null + } + }, + "GetKinesisStreamInput": { + "base": null, + "refs": { + } + }, + "GetKinesisStreamOutput": { + "base": null, + "refs": { + } + }, "HECAcknowledgmentTimeoutInSeconds": { "base": null, "refs": { - "SplunkDestinationConfiguration$HECAcknowledgmentTimeoutInSeconds": "

The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

", - "SplunkDestinationDescription$HECAcknowledgmentTimeoutInSeconds": "

The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

", - "SplunkDestinationUpdate$HECAcknowledgmentTimeoutInSeconds": "

The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

" + "SplunkDestinationConfiguration$HECAcknowledgmentTimeoutInSeconds": "

The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

", + "SplunkDestinationDescription$HECAcknowledgmentTimeoutInSeconds": "

The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

", + "SplunkDestinationUpdate$HECAcknowledgmentTimeoutInSeconds": "

The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

" } }, "HECEndpoint": { "base": null, "refs": { - "SplunkDestinationConfiguration$HECEndpoint": "

The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

", - "SplunkDestinationDescription$HECEndpoint": "

The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

", - "SplunkDestinationUpdate$HECEndpoint": "

The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

" + "SplunkDestinationConfiguration$HECEndpoint": "

The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

", + "SplunkDestinationDescription$HECEndpoint": "

The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

", + "SplunkDestinationUpdate$HECEndpoint": "

The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

" } }, "HECEndpointType": { @@ -747,9 +789,9 @@ } }, "HiveJsonSerDe": { - "base": "

The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

", + "base": "

The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

", "refs": { - "Deserializer$HiveJsonSerDe": "

The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

" + "Deserializer$HiveJsonSerDe": "

The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

" } }, "HttpEndpointAccessKey": { @@ -771,11 +813,11 @@ } }, "HttpEndpointBufferingHints": { - "base": "

Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

", + "base": "

Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

", "refs": { - "HttpEndpointDestinationConfiguration$BufferingHints": "

The buffering options that can be used before data is delivered to the specified destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.

", - "HttpEndpointDestinationDescription$BufferingHints": "

Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

", - "HttpEndpointDestinationUpdate$BufferingHints": "

Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" + "HttpEndpointDestinationConfiguration$BufferingHints": "

The buffering options that can be used before data is delivered to the specified destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.

", + "HttpEndpointDestinationDescription$BufferingHints": "

Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

", + "HttpEndpointDestinationUpdate$BufferingHints": "

Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" } }, "HttpEndpointBufferingIntervalInSeconds": { @@ -851,23 +893,23 @@ "HttpEndpointRetryDurationInSeconds": { "base": null, "refs": { - "HttpEndpointRetryOptions$DurationInSeconds": "

The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from the specified destination after each attempt.

" + "HttpEndpointRetryOptions$DurationInSeconds": "

The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Firehose waits for acknowledgment from the specified destination after each attempt.

" } }, "HttpEndpointRetryOptions": { - "base": "

Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

", + "base": "

Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

", "refs": { - "HttpEndpointDestinationConfiguration$RetryOptions": "

Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

", - "HttpEndpointDestinationDescription$RetryOptions": "

Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

", - "HttpEndpointDestinationUpdate$RetryOptions": "

Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" + "HttpEndpointDestinationConfiguration$RetryOptions": "

Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

", + "HttpEndpointDestinationDescription$RetryOptions": "

Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

", + "HttpEndpointDestinationUpdate$RetryOptions": "

Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" } }, "HttpEndpointS3BackupMode": { "base": null, "refs": { - "HttpEndpointDestinationConfiguration$S3BackupMode": "

Describes the S3 bucket backup options for the data that Kinesis Data Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

", - "HttpEndpointDestinationDescription$S3BackupMode": "

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

", - "HttpEndpointDestinationUpdate$S3BackupMode": "

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" + "HttpEndpointDestinationConfiguration$S3BackupMode": "

Describes the S3 bucket backup options for the data that Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

", + "HttpEndpointDestinationDescription$S3BackupMode": "

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

", + "HttpEndpointDestinationUpdate$S3BackupMode": "

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" } }, "HttpEndpointUrl": { @@ -880,7 +922,7 @@ "InputFormatConfiguration": { "base": "

Specifies the deserializer you want to use to convert the format of the input data. This parameter is required if Enabled is set to true.

", "refs": { - "DataFormatConversionConfiguration$InputFormatConfiguration": "

Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.

" + "DataFormatConversionConfiguration$InputFormatConfiguration": "

Specifies the deserializer that you want Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.

" } }, "IntervalInSeconds": { @@ -895,7 +937,7 @@ } }, "InvalidKMSResourceException": { - "base": "

Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

", + "base": "

Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

", "refs": { } }, @@ -904,6 +946,11 @@ "refs": { } }, + "InvalidStreamTypeException": { + "base": null, + "refs": { + } + }, "KMSEncryptionConfig": { "base": "

Describes an encryption key for a destination in Amazon S3.

", "refs": { @@ -914,12 +961,13 @@ "base": null, "refs": { "DeliveryStreamEncryptionConfiguration$KeyType": "

Indicates the type of customer master key (CMK) that is used for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs).

", - "DeliveryStreamEncryptionConfigurationInput$KeyType": "

Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException.

To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

" + "DeliveryStreamEncryptionConfigurationInput$KeyType": "

Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Firehose service to use the customer managed CMK to perform encryption and decryption. Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Firehose schedules the grant it had on the old CMK for retirement.

You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Firehose throws a LimitExceededException.

To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

" } }, "KinesisStreamARN": { "base": null, "refs": { + "GetKinesisStreamOutput$KinesisStreamARN": null, "KinesisStreamSourceConfiguration$KinesisStreamARN": "

The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.

", "KinesisStreamSourceDescription$KinesisStreamARN": "

The Amazon Resource Name (ARN) of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.

" } @@ -931,7 +979,7 @@ } }, "KinesisStreamSourceDescription": { - "base": "

Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.

", + "base": "

Details about a Kinesis data stream used as the source for a Firehose delivery stream.

", "refs": { "SourceDescription$KinesisStreamSourceDescription": "

The KinesisStreamSourceDescription value for the source Kinesis data stream.

" } @@ -960,13 +1008,13 @@ "ListOfNonEmptyStrings": { "base": null, "refs": { - "HiveJsonSerDe$TimestampFormats": "

Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

" + "HiveJsonSerDe$TimestampFormats": "

Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses java.sql.Timestamp::valueOf by default.

" } }, "ListOfNonEmptyStringsWithoutWhitespace": { "base": null, "refs": { - "OrcSerDe$BloomFilterColumns": "

The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null.

" + "OrcSerDe$BloomFilterColumns": "

The column names for which you want Firehose to create bloom filters. The default is null.

" } }, "ListTagsForDeliveryStreamInput": { @@ -1017,7 +1065,7 @@ } }, "MSKSourceDescription": { - "base": "

Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose delivery stream.

", + "base": "

Details about the Amazon MSK cluster used as the source for a Firehose delivery stream.

", "refs": { "SourceDescription$MSKSourceDescription": "

The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.

" } @@ -1041,12 +1089,12 @@ "refs": { "ColumnToJsonKeyMappings$key": null, "ListOfNonEmptyStringsWithoutWhitespace$member": null, - "SchemaConfiguration$RoleARN": "

The role that Kinesis Data Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.

", + "SchemaConfiguration$RoleARN": "

The role that Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Firehose. Cross-account roles aren't allowed.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.

", "SchemaConfiguration$CatalogId": "

The ID of the Amazon Web Services Glue Data Catalog. If you don't supply this, the Amazon Web Services account ID is used by default.

", "SchemaConfiguration$DatabaseName": "

Specifies the name of the Amazon Web Services Glue database that contains the schema for the output data.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the DatabaseName property is required and its value must be specified.

", "SchemaConfiguration$TableName": "

Specifies the Amazon Web Services Glue table that contains the column information that constitutes your data schema.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the TableName property is required and its value must be specified.

", "SchemaConfiguration$Region": "

If you don't specify an Amazon Web Services Region, the default is the current Region.

", - "SchemaConfiguration$VersionId": "

Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

", + "SchemaConfiguration$VersionId": "

Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

", "SecurityGroupIdList$member": null, "SubnetIdList$member": null, "VpcConfigurationDescription$VpcId": "

The ID of the Amazon ES destination's VPC.

" @@ -1060,9 +1108,9 @@ } }, "OpenXJsonSerDe": { - "base": "

The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

", + "base": "

The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

", "refs": { - "Deserializer$OpenXJsonSerDe": "

The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

" + "Deserializer$OpenXJsonSerDe": "

The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

" } }, "OrcCompression": { @@ -1096,9 +1144,9 @@ } }, "OutputFormatConfiguration": { - "base": "

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

", + "base": "

Specifies the serializer that you want Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

", "refs": { - "DataFormatConversionConfiguration$OutputFormatConfiguration": "

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.

" + "DataFormatConversionConfiguration$OutputFormatConfiguration": "

Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.

" } }, "ParquetCompression": { @@ -1173,7 +1221,7 @@ } }, "Processor": { - "base": "

Describes a data processor.

", + "base": "

Describes a data processor.

If you want to add a new line delimiter between records in objects that are delivered to Amazon S3, choose AppendDelimiterToRecord as a processor type. You don’t have to put a processor parameter when you select AppendDelimiterToRecord.

", "refs": { "ProcessorList$member": null } @@ -1217,7 +1265,7 @@ "Proportion": { "base": null, "refs": { - "OrcSerDe$PaddingTolerance": "

A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.

For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.

Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false.

", + "OrcSerDe$PaddingTolerance": "

A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.

For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.

Firehose ignores this parameter when OrcSerDe$EnablePadding is false.

", "OrcSerDe$BloomFilterFalsePositiveProbability": "

The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

", "OrcSerDe$DictionaryKeyThreshold": "

Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

" } @@ -1295,15 +1343,15 @@ "RedshiftRetryDurationInSeconds": { "base": null, "refs": { - "RedshiftRetryOptions$DurationInSeconds": "

The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

" + "RedshiftRetryOptions$DurationInSeconds": "

The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

" } }, "RedshiftRetryOptions": { - "base": "

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift.

", + "base": "

Configures retry behavior in case Firehose is unable to deliver documents to Amazon Redshift.

", "refs": { - "RedshiftDestinationConfiguration$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

", - "RedshiftDestinationDescription$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

", - "RedshiftDestinationUpdate$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" + "RedshiftDestinationConfiguration$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

", + "RedshiftDestinationDescription$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

", + "RedshiftDestinationUpdate$RetryOptions": "

The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" } }, "RedshiftS3BackupMode": { @@ -1327,34 +1375,34 @@ "RetryDurationInSeconds": { "base": null, "refs": { - "RetryOptions$DurationInSeconds": "

The period of time during which Kinesis Data Firehose retries to deliver data to the specified Amazon S3 prefix.

" + "RetryOptions$DurationInSeconds": "

The period of time during which Firehose retries to deliver data to the specified Amazon S3 prefix.

" } }, "RetryOptions": { - "base": "

The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.

", + "base": "

The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.

", "refs": { - "DynamicPartitioningConfiguration$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.

" + "DynamicPartitioningConfiguration$RetryOptions": "

The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.

" } }, "RoleARN": { "base": null, "refs": { - "AmazonOpenSearchServerlessDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

", + "AmazonOpenSearchServerlessDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

", "AmazonOpenSearchServerlessDestinationDescription$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials.

", - "AmazonOpenSearchServerlessDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

", - "AmazonopensearchserviceDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

", + "AmazonOpenSearchServerlessDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

", + "AmazonopensearchserviceDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

", "AmazonopensearchserviceDestinationDescription$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials.

", - "AmazonopensearchserviceDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

", + "AmazonopensearchserviceDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

", "AuthenticationConfiguration$RoleARN": "

The ARN of the role used to access the Amazon MSK cluster.

", - "ElasticsearchDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", + "ElasticsearchDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", "ElasticsearchDestinationDescription$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", - "ElasticsearchDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", + "ElasticsearchDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", "ExtendedS3DestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", "ExtendedS3DestinationDescription$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", "ExtendedS3DestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", - "HttpEndpointDestinationConfiguration$RoleARN": "

Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

", - "HttpEndpointDestinationDescription$RoleARN": "

Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

", - "HttpEndpointDestinationUpdate$RoleARN": "

Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

", + "HttpEndpointDestinationConfiguration$RoleARN": "

Firehose uses this IAM role for all the permissions that the delivery stream needs.

", + "HttpEndpointDestinationDescription$RoleARN": "

Firehose uses this IAM role for all the permissions that the delivery stream needs.

", + "HttpEndpointDestinationUpdate$RoleARN": "

Firehose uses this IAM role for all the permissions that the delivery stream needs.

", "KinesisStreamSourceConfiguration$RoleARN": "

The ARN of the role that provides access to the source Kinesis data stream. For more information, see Amazon Web Services Identity and Access Management (IAM) ARN Format.

", "KinesisStreamSourceDescription$RoleARN": "

The ARN of the role used by the source Kinesis data stream. For more information, see Amazon Web Services Identity and Access Management (IAM) ARN Format.

", "RedshiftDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", @@ -1366,8 +1414,8 @@ "SnowflakeDestinationConfiguration$RoleARN": "

The Amazon Resource Name (ARN) of the Snowflake role

", "SnowflakeDestinationDescription$RoleARN": "

The Amazon Resource Name (ARN) of the Snowflake role

", "SnowflakeDestinationUpdate$RoleARN": "

The Amazon Resource Name (ARN) of the Snowflake role

", - "VpcConfiguration$RoleARN": "

The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:

If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

", - "VpcConfigurationDescription$RoleARN": "

The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:

If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

" + "VpcConfiguration$RoleARN": "

The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.

", + "VpcConfigurationDescription$RoleARN": "

The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

If you revoke these permissions after you create the delivery stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

" } }, "S3BackupMode": { @@ -1424,27 +1472,45 @@ } }, "SchemaConfiguration": { - "base": "

Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

", + "base": "

Specifies the schema to which you want Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

", "refs": { "DataFormatConversionConfiguration$SchemaConfiguration": "

Specifies the Amazon Web Services Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.

" } }, + "SecretAccessKey": { + "base": null, + "refs": { + "SessionCredentials$SecretAccessKey": null + } + }, "SecurityGroupIdList": { "base": null, "refs": { - "VpcConfiguration$SecurityGroupIds": "

The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

", - "VpcConfigurationDescription$SecurityGroupIds": "

The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

" + "VpcConfiguration$SecurityGroupIds": "

The IDs of the security groups that you want Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

", + "VpcConfigurationDescription$SecurityGroupIds": "

The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

" } }, "Serializer": { - "base": "

The serializer that you want Kinesis Data Firehose to use to convert data to the target format before writing it to Amazon S3. Kinesis Data Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.

", + "base": "

The serializer that you want Firehose to use to convert data to the target format before writing it to Amazon S3. Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.

", "refs": { "OutputFormatConfiguration$Serializer": "

Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.

" } }, "ServiceUnavailableException": { - "base": "

The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

", + "base": "

The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Firehose Limits.

", + "refs": { + } + }, + "SessionCredentials": { + "base": null, "refs": { + "GetKinesisStreamOutput$CredentialsForReadingKinesisStream": null + } + }, + "SessionToken": { + "base": null, + "refs": { + "SessionCredentials$SessionToken": null } }, "SizeInMBs": { @@ -1534,15 +1600,15 @@ "SnowflakeRetryDurationInSeconds": { "base": null, "refs": { - "SnowflakeRetryOptions$DurationInSeconds": "

the time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

" + "SnowflakeRetryOptions$DurationInSeconds": "

the time period where Firehose will retry sending data to the chosen HTTP endpoint.

" } }, "SnowflakeRetryOptions": { - "base": "

Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.

", + "base": "

Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.

", "refs": { - "SnowflakeDestinationConfiguration$RetryOptions": "

The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

", - "SnowflakeDestinationDescription$RetryOptions": "

The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

", - "SnowflakeDestinationUpdate$RetryOptions": "

Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.

" + "SnowflakeDestinationConfiguration$RetryOptions": "

The time period where Firehose will retry sending data to the chosen HTTP endpoint.

", + "SnowflakeDestinationDescription$RetryOptions": "

The time period where Firehose will retry sending data to the chosen HTTP endpoint.

", + "SnowflakeDestinationUpdate$RetryOptions": "

Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.

" } }, "SnowflakeRole": { @@ -1599,7 +1665,7 @@ } }, "SourceDescription": { - "base": "

Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.

", + "base": "

Details about a Kinesis data stream used as the source for a Firehose delivery stream.

", "refs": { "DeliveryStreamDescription$Source": "

If the DeliveryStreamType parameter is KinesisStreamAsSource, a SourceDescription object describing the source Kinesis data stream.

" } @@ -1645,23 +1711,23 @@ "SplunkRetryDurationInSeconds": { "base": null, "refs": { - "SplunkRetryOptions$DurationInSeconds": "

The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.

" + "SplunkRetryOptions$DurationInSeconds": "

The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt.

" } }, "SplunkRetryOptions": { - "base": "

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.

", + "base": "

Configures retry behavior in case Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.

", "refs": { - "SplunkDestinationConfiguration$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.

", - "SplunkDestinationDescription$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

", - "SplunkDestinationUpdate$RetryOptions": "

The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

" + "SplunkDestinationConfiguration$RetryOptions": "

The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.

", + "SplunkDestinationDescription$RetryOptions": "

The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

", + "SplunkDestinationUpdate$RetryOptions": "

The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

" } }, "SplunkS3BackupMode": { "base": null, "refs": { - "SplunkDestinationConfiguration$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

", - "SplunkDestinationDescription$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.

", - "SplunkDestinationUpdate$S3BackupMode": "

Specifies how you want Kinesis Data Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

" + "SplunkDestinationConfiguration$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

", + "SplunkDestinationDescription$S3BackupMode": "

Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.

", + "SplunkDestinationUpdate$S3BackupMode": "

Specifies how you want Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

" } }, "StartDeliveryStreamEncryptionInput": { @@ -1687,8 +1753,8 @@ "SubnetIdList": { "base": null, "refs": { - "VpcConfiguration$SubnetIds": "

The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

", - "VpcConfigurationDescription$SubnetIds": "

The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" + "VpcConfiguration$SubnetIds": "

The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

", + "VpcConfigurationDescription$SubnetIds": "

The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" } }, "Tag": { @@ -1735,11 +1801,114 @@ "Tag$Value": "

An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @

" } }, + "TagrisAccessDeniedException": { + "base": null, + "refs": { + } + }, + "TagrisAccountId": { + "base": null, + "refs": { + "TagrisSweepListItem$TagrisAccountId": null + } + }, + "TagrisAmazonResourceName": { + "base": null, + "refs": { + "TagrisSweepListItem$TagrisAmazonResourceName": null, + "TagrisSweepListResult$key": null + } + }, + "TagrisExceptionMessage": { + "base": null, + "refs": { + "TagrisAccessDeniedException$message": null, + "TagrisInternalServiceException$message": null, + "TagrisInvalidArnException$message": null, + "TagrisInvalidParameterException$message": null, + "TagrisPartialResourcesExistResultsException$message": null, + "TagrisThrottledException$message": null + } + }, + "TagrisInternalId": { + "base": null, + "refs": { + "TagrisSweepListItem$TagrisInternalId": null + } + }, + "TagrisInternalServiceException": { + "base": null, + "refs": { + } + }, + "TagrisInvalidArnException": { + "base": null, + "refs": { + } + }, + "TagrisInvalidParameterException": { + "base": null, + "refs": { + } + }, + "TagrisPartialResourcesExistResultsException": { + "base": null, + "refs": { + } + }, + "TagrisStatus": { + "base": null, + "refs": { + "TagrisSweepListResult$value": null + } + }, + "TagrisSweepList": { + "base": null, + "refs": { + "TagrisVerifyResourcesExistInput$TagrisSweepList": null + } + }, + "TagrisSweepListItem": { + "base": null, + "refs": { + "TagrisInvalidArnException$sweepListItem": null, + "TagrisSweepList$member": null + } + }, + "TagrisSweepListResult": { + "base": null, + "refs": { + "TagrisPartialResourcesExistResultsException$resourceExistenceInformation": null, + "TagrisVerifyResourcesExistOutput$TagrisSweepListResult": null + } + }, + "TagrisThrottledException": { + "base": null, + "refs": { + } + }, + "TagrisVerifyResourcesExistInput": { + "base": null, + "refs": { + } + }, + "TagrisVerifyResourcesExistOutput": { + "base": null, + "refs": { + } + }, + "TagrisVersion": { + "base": null, + "refs": { + "TagrisSweepListItem$TagrisVersion": null + } + }, "Timestamp": { "base": null, "refs": { "DeliveryStreamDescription$CreateTimestamp": "

The date and time that the delivery stream was created.

", - "DeliveryStreamDescription$LastUpdateTimestamp": "

The date and time that the delivery stream was last updated.

" + "DeliveryStreamDescription$LastUpdateTimestamp": "

The date and time that the delivery stream was last updated.

", + "SessionCredentials$Expiration": null } }, "TopicName": { diff --git a/models/apis/lambda/2015-03-31/docs-2.json b/models/apis/lambda/2015-03-31/docs-2.json index 3a3428fa4ed..169d43ef657 100644 --- a/models/apis/lambda/2015-03-31/docs-2.json +++ b/models/apis/lambda/2015-03-31/docs-2.json @@ -36,7 +36,7 @@ "GetProvisionedConcurrencyConfig": "

Retrieves the provisioned concurrency configuration for a function's alias or version.

", "GetRuntimeManagementConfig": "

Retrieves the runtime management configuration for a function's version. If the runtime update mode is Manual, this includes the ARN of the runtime version and the runtime update mode. If the runtime update mode is Auto or Function update, this includes the runtime update mode and null is returned for the ARN. For more information, see Runtime updates.

", "Invoke": "

Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. By default, Lambda invokes your function synchronously (i.e. theInvocationType is RequestResponse). To invoke a function asynchronously, set InvocationType to Event. Lambda passes the ClientContext object to your function for synchronous invocations only.

For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.

When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda.

For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

", - "InvokeAsync": "

For asynchronous function invocation, use Invoke.

Invokes a function asynchronously.

", + "InvokeAsync": "

For asynchronous function invocation, use Invoke.

Invokes a function asynchronously.

If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.

", "InvokeWithResponseStream": "

Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.

This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

", "ListAliases": "

Returns a list of aliases for a Lambda function.

", "ListCodeSigningConfigs": "

Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems parameter to return fewer configurations per call.

", @@ -186,7 +186,7 @@ "ApplicationLogLevel": { "base": null, "refs": { - "LoggingConfig$ApplicationLogLevel": "

Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level and lower.

" + "LoggingConfig$ApplicationLogLevel": "

Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE is the highest level and FATAL is the lowest.

" } }, "Architecture": { @@ -210,13 +210,13 @@ "base": null, "refs": { "AddPermissionRequest$SourceArn": "

For Amazon Web Services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.

Note that Lambda configures the comparison using the StringLike operator.

", - "CreateEventSourceMappingRequest$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", + "CreateEventSourceMappingRequest$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", "EventSourceMappingConfiguration$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", "FunctionConfiguration$SigningProfileVersionArn": "

The ARN of the signing profile version.

", "FunctionConfiguration$SigningJobArn": "

The ARN of the signing job.

", "Layer$SigningProfileVersionArn": "

The Amazon Resource Name (ARN) for a signing profile version.

", "Layer$SigningJobArn": "

The Amazon Resource Name (ARN) of a signing job.

", - "ListEventSourceMappingsRequest$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", + "ListEventSourceMappingsRequest$EventSourceArn": "

The Amazon Resource Name (ARN) of the event source.

", "SigningProfileVersionArns$member": null } }, @@ -502,18 +502,18 @@ "DestinationArn": { "base": null, "refs": { - "OnFailure$Destination": "

The Amazon Resource Name (ARN) of the destination resource.

", + "OnFailure$Destination": "

The Amazon Resource Name (ARN) of the destination resource.

To retain records of asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

To retain records of failed invocations from Kinesis and DynamoDB event sources, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.

To retain records of failed invocations from self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.

", "OnSuccess$Destination": "

The Amazon Resource Name (ARN) of the destination resource.

" } }, "DestinationConfig": { "base": "

A configuration object that specifies the destination of an event after Lambda processes it.

", "refs": { - "CreateEventSourceMappingRequest$DestinationConfig": "

(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.

", + "CreateEventSourceMappingRequest$DestinationConfig": "

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

", "EventSourceMappingConfiguration$DestinationConfig": "

(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.

", "FunctionEventInvokeConfig$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

", "PutFunctionEventInvokeConfigRequest$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

", - "UpdateEventSourceMappingRequest$DestinationConfig": "

(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.

", + "UpdateEventSourceMappingRequest$DestinationConfig": "

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

", "UpdateFunctionEventInvokeConfigRequest$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

" } }, @@ -635,11 +635,11 @@ } }, "EphemeralStorage": { - "base": "

The size of the function's /tmp directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.

", + "base": "

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

", "refs": { - "CreateFunctionRequest$EphemeralStorage": "

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.

", - "FunctionConfiguration$EphemeralStorage": "

The size of the function’s /tmp directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.

", - "UpdateFunctionConfigurationRequest$EphemeralStorage": "

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.

" + "CreateFunctionRequest$EphemeralStorage": "

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

", + "FunctionConfiguration$EphemeralStorage": "

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

", + "UpdateFunctionConfigurationRequest$EphemeralStorage": "

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" } }, "EphemeralStorageSize": { @@ -1930,8 +1930,8 @@ "CompatibleRuntimes$member": null, "CreateFunctionRequest$Runtime": "

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", "FunctionConfiguration$Runtime": "

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", - "ListLayerVersionsRequest$CompatibleRuntime": "

A runtime identifier. For example, go1.x.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", - "ListLayersRequest$CompatibleRuntime": "

A runtime identifier. For example, go1.x.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", + "ListLayerVersionsRequest$CompatibleRuntime": "

A runtime identifier. For example, java21.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", + "ListLayersRequest$CompatibleRuntime": "

A runtime identifier. For example, java21.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", "UpdateFunctionConfigurationRequest$Runtime": "

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

" } }, @@ -2193,7 +2193,7 @@ "InvalidSubnetIDException$Message": null, "InvalidZipFileException$Type": null, "InvalidZipFileException$Message": null, - "InvocationRequest$ClientContext": "

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.

", + "InvocationRequest$ClientContext": "

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext object to your function for synchronous invocations only.

", "InvocationResponse$FunctionError": "

If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.

", "InvocationResponse$LogResult": "

The last 4 KB of the execution log, which is base64-encoded.

", "InvokeWithResponseStreamCompleteEvent$ErrorCode": "

An error code.

", @@ -2311,7 +2311,7 @@ "SystemLogLevel": { "base": null, "refs": { - "LoggingConfig$SystemLogLevel": "

Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level and lower.

" + "LoggingConfig$SystemLogLevel": "

Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG is the highest level and WARN is the lowest.

" } }, "TagKey": { diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 04ef5f26334..0b08f91e522 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -2754,7 +2754,7 @@ } }, "Filter": { - "base": "

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as IDs. The filters supported by a describe operation are documented with the describe operation.

Currently, wildcards are not supported in filters.

The following actions can be filtered:

", + "base": "

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as IDs. The filters supported by a describe operation are documented with the describe operation.

Currently, wildcards are not supported in filters.

The following actions can be filtered:

", "refs": { "FilterList$member": null } @@ -4638,7 +4638,7 @@ "CreateDBClusterEndpointMessage$EndpointType": "

The type of the endpoint, one of: READER, WRITER, ANY.

", "CreateDBClusterMessage$CharacterSetName": "

The name of the character set (CharacterSet) to associate the DB cluster with.

Valid for Cluster Type: Aurora DB clusters only

", "CreateDBClusterMessage$DatabaseName": "

The name for your database of up to 64 alphanumeric characters. If you don't provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

", - "CreateDBClusterMessage$DBClusterIdentifier": "

The identifier for this DB cluster. This parameter is stored as a lowercase string.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

Example: my-cluster1

", + "CreateDBClusterMessage$DBClusterIdentifier": "

The identifier for this DB cluster. This parameter is stored as a lowercase string.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

Example: my-cluster1

", "CreateDBClusterMessage$DBClusterParameterGroupName": "

The name of the DB cluster parameter group to associate with this DB cluster. If you don't specify a value, then the default DB cluster parameter group for the specified DB engine and version is used.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

", "CreateDBClusterMessage$DBSubnetGroupName": "

A DB subnet group to associate with this DB cluster.

This setting is required to create a Multi-AZ DB cluster.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

Example: mydbsubnetgroup

", "CreateDBClusterMessage$Engine": "

The database engine to use for this DB cluster.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: aurora-mysql | aurora-postgresql | mysql | postgres

", @@ -4663,7 +4663,7 @@ "CreateDBClusterMessage$DBSystemId": "

Reserved for future use.

", "CreateDBClusterMessage$MasterUserSecretKmsKeyId": "

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

", "CreateDBClusterParameterGroupMessage$DBClusterParameterGroupName": "

The name of the DB cluster parameter group.

Constraints:

This value is stored as a lowercase string.

", - "CreateDBClusterParameterGroupMessage$DBParameterGroupFamily": "

The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

Aurora MySQL

Example: aurora-mysql5.7, aurora-mysql8.0

Aurora PostgreSQL

Example: aurora-postgresql14

RDS for MySQL

Example: mysql8.0

RDS for PostgreSQL

Example: postgres12

To list all of the available parameter group families for a DB engine, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine <engine>

For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql

The output contains duplicates.

The following are the valid DB engine values:

", + "CreateDBClusterParameterGroupMessage$DBParameterGroupFamily": "

The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

Aurora MySQL

Example: aurora-mysql5.7, aurora-mysql8.0

Aurora PostgreSQL

Example: aurora-postgresql14

RDS for MySQL

Example: mysql8.0

RDS for PostgreSQL

Example: postgres13

To list all of the available parameter group families for a DB engine, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine <engine>

For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql

The output contains duplicates.

The following are the valid DB engine values:

", "CreateDBClusterParameterGroupMessage$Description": "

The description for the DB cluster parameter group.

", "CreateDBClusterSnapshotMessage$DBClusterSnapshotIdentifier": "

The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.

Constraints:

Example: my-cluster1-snapshot1

", "CreateDBClusterSnapshotMessage$DBClusterIdentifier": "

The identifier of the DB cluster to create a snapshot for. This parameter isn't case-sensitive.

Constraints:

Example: my-cluster1

", diff --git a/models/apis/sns/2010-03-31/api-2.json b/models/apis/sns/2010-03-31/api-2.json index d5ddd8926e6..d4e2c8b882b 100644 --- a/models/apis/sns/2010-03-31/api-2.json +++ b/models/apis/sns/2010-03-31/api-2.json @@ -1534,12 +1534,15 @@ }, "exception":true }, - "PhoneNumber":{"type":"string"}, + "PhoneNumber":{ + "type":"string", + "sensitive":true + }, "PhoneNumberInformation":{ "type":"structure", "members":{ "CreatedAt":{"shape":"Timestamp"}, - "PhoneNumber":{"shape":"String"}, + "PhoneNumber":{"shape":"PhoneNumber"}, "Status":{"shape":"String"}, "Iso2CountryCode":{"shape":"Iso2CountryCode"}, "RouteType":{"shape":"RouteType"}, @@ -1557,7 +1560,8 @@ "PhoneNumberString":{ "type":"string", "max":20, - "pattern":"^(\\+[0-9]{8,}|[0-9]{0,9})$" + "pattern":"^(\\+[0-9]{8,}|[0-9]{0,9})$", + "sensitive":true }, "PlatformApplication":{ "type":"structure", @@ -1634,7 +1638,7 @@ "members":{ "TopicArn":{"shape":"topicARN"}, "TargetArn":{"shape":"String"}, - "PhoneNumber":{"shape":"String"}, + "PhoneNumber":{"shape":"PhoneNumber"}, "Message":{"shape":"message"}, "Subject":{"shape":"subject"}, "MessageStructure":{"shape":"messageStructure"}, diff --git a/models/apis/sns/2010-03-31/docs-2.json b/models/apis/sns/2010-03-31/docs-2.json index 510129fb746..553f170341a 100644 --- a/models/apis/sns/2010-03-31/docs-2.json +++ b/models/apis/sns/2010-03-31/docs-2.json @@ -5,7 +5,7 @@ "AddPermission": "

Adds a statement to a topic's access control policy, granting access for the specified Amazon Web Services accounts to the specified actions.

To remove the ability to change topic permissions, you must deny permissions to the AddPermission, RemovePermission, and SetTopicAttributes actions in your IAM policy.

", "CheckIfPhoneNumberIsOptedOut": "

Accepts a phone number and indicates whether the phone holder has opted out of receiving SMS messages from your Amazon Web Services account. You cannot send SMS messages to a number that is opted out.

To resume sending messages, you can opt in the number by using the OptInPhoneNumber action.

", "ConfirmSubscription": "

Verifies an endpoint owner's intent to receive messages by validating the token sent to the endpoint by an earlier Subscribe action. If the token is valid, the action creates a new subscription and returns its Amazon Resource Name (ARN). This call requires an AWS signature only when the AuthenticateOnUnsubscribe flag is set to \"true\".

", - "CreatePlatformApplication": "

Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

PlatformPrincipal and PlatformCredential are received from the notification service.

You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

", + "CreatePlatformApplication": "

Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

PlatformPrincipal and PlatformCredential are received from the notification service.

You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

", "CreatePlatformEndpoint": "

Creates an endpoint for a device and mobile app on one of the supported push notification services, such as GCM (Firebase Cloud Messaging) and APNS. CreatePlatformEndpoint requires the PlatformApplicationArn that is returned from CreatePlatformApplication. You can use the returned EndpointArn to send a message to a mobile app or by the Subscribe action for subscription to a topic. The CreatePlatformEndpoint action is idempotent, so if the requester already owns an endpoint with the same device token and attributes, that endpoint's ARN is returned without creating a new endpoint. For more information, see Using Amazon SNS Mobile Push Notifications.

When using CreatePlatformEndpoint with Baidu, two attributes must be provided: ChannelId and UserId. The token field must also contain the ChannelId. For more information, see Creating an Amazon SNS Endpoint for Baidu.

", "CreateSMSSandboxPhoneNumber": "

Adds a destination phone number to an Amazon Web Services account in the SMS sandbox and sends a one-time password (OTP) to that phone number.

When you start using Amazon SNS to send SMS messages, your Amazon Web Services account is in the SMS sandbox. The SMS sandbox provides a safe environment for you to try Amazon SNS features without risking your reputation as an SMS sender. While your Amazon Web Services account is in the SMS sandbox, you can use all of the features of Amazon SNS. However, you can send SMS messages only to verified destination phone numbers. For more information, including how to move out of the sandbox to send messages without restrictions, see SMS sandbox in the Amazon SNS Developer Guide.

", "CreateTopic": "

Creates a topic to which notifications can be published. Users can create at most 100,000 standard topics (at most 1,000 FIFO topics). For more information, see Creating an Amazon SNS topic in the Amazon SNS Developer Guide. This action is idempotent, so if the requester already owns a topic with the specified name, that topic's ARN is returned without creating a new topic.

", @@ -170,12 +170,12 @@ } }, "DeleteEndpointInput": { - "base": "

Input for DeleteEndpoint action.

", + "base": "

Input for DeleteEndpoint action.

", "refs": { } }, "DeletePlatformApplicationInput": { - "base": "

Input for DeletePlatformApplication action.

", + "base": "

Input for DeletePlatformApplication action.

", "refs": { } }, @@ -226,22 +226,22 @@ } }, "GetEndpointAttributesInput": { - "base": "

Input for GetEndpointAttributes action.

", + "base": "

Input for GetEndpointAttributes action.

", "refs": { } }, "GetEndpointAttributesResponse": { - "base": "

Response from GetEndpointAttributes of the EndpointArn.

", + "base": "

Response from GetEndpointAttributes of the EndpointArn.

", "refs": { } }, "GetPlatformApplicationAttributesInput": { - "base": "

Input for GetPlatformApplicationAttributes action.

", + "base": "

Input for GetPlatformApplicationAttributes action.

", "refs": { } }, "GetPlatformApplicationAttributesResponse": { - "base": "

Response for GetPlatformApplicationAttributes action.

", + "base": "

Response for GetPlatformApplicationAttributes action.

", "refs": { } }, @@ -358,25 +358,25 @@ } }, "ListEndpointsByPlatformApplicationInput": { - "base": "

Input for ListEndpointsByPlatformApplication action.

", + "base": "

Input for ListEndpointsByPlatformApplication action.

", "refs": { } }, "ListEndpointsByPlatformApplicationResponse": { - "base": "

Response for ListEndpointsByPlatformApplication action.

", + "base": "

Response for ListEndpointsByPlatformApplication action.

", "refs": { } }, "ListOfEndpoints": { "base": null, "refs": { - "ListEndpointsByPlatformApplicationResponse$Endpoints": "

Endpoints returned for ListEndpointsByPlatformApplication action.

" + "ListEndpointsByPlatformApplicationResponse$Endpoints": "

Endpoints returned for ListEndpointsByPlatformApplication action.

" } }, "ListOfPlatformApplications": { "base": null, "refs": { - "ListPlatformApplicationsResponse$PlatformApplications": "

Platform applications returned when calling ListPlatformApplications action.

" + "ListPlatformApplicationsResponse$PlatformApplications": "

Platform applications returned when calling ListPlatformApplications action.

" } }, "ListOriginationNumbersRequest": { @@ -400,12 +400,12 @@ } }, "ListPlatformApplicationsInput": { - "base": "

Input for ListPlatformApplications action.

", + "base": "

Input for ListPlatformApplications action.

", "refs": { } }, "ListPlatformApplicationsResponse": { - "base": "

Response for ListPlatformApplications action.

", + "base": "

Response for ListPlatformApplications action.

", "refs": { } }, @@ -468,15 +468,15 @@ "MapStringToString": { "base": null, "refs": { - "CreatePlatformApplicationInput$Attributes": "

For a list of attributes, see SetPlatformApplicationAttributes.

", - "CreatePlatformEndpointInput$Attributes": "

For a list of attributes, see SetEndpointAttributes.

", + "CreatePlatformApplicationInput$Attributes": "

For a list of attributes, see SetPlatformApplicationAttributes .

", + "CreatePlatformEndpointInput$Attributes": "

For a list of attributes, see SetEndpointAttributes .

", "Endpoint$Attributes": "

Attributes for endpoint.

", "GetEndpointAttributesResponse$Attributes": "

Attributes include the following:

", - "GetPlatformApplicationAttributesResponse$Attributes": "

Attributes include the following:

", + "GetPlatformApplicationAttributesResponse$Attributes": "

Attributes include the following:

", "GetSMSAttributesResponse$attributes": "

The SMS attribute names and their values.

", "PlatformApplication$Attributes": "

Attributes for platform application object.

", "SetEndpointAttributesInput$Attributes": "

A map of the endpoint attributes. Attributes in this map include the following:

", - "SetPlatformApplicationAttributesInput$Attributes": "

A map of the platform application attributes. Attributes in this map include the following:

The following attributes only apply to APNs token-based authentication:

", + "SetPlatformApplicationAttributesInput$Attributes": "

A map of the platform application attributes. Attributes in this map include the following:

The following attributes only apply to APNs token-based authentication:

", "SetSMSAttributesInput$attributes": "

The default settings for sending SMS messages from your Amazon Web Services account. You can set values for the following attribute names:

MonthlySpendLimit – The maximum amount in USD that you are willing to spend each month to send SMS messages. When Amazon SNS determines that sending an SMS message would incur a cost that exceeds this limit, it stops sending SMS messages within minutes.

Amazon SNS stops sending SMS messages within minutes of the limit being crossed. During that interval, if you continue to send SMS messages, you will incur costs that exceed your limit.

By default, the spend limit is set to the maximum allowed by Amazon SNS. If you want to raise the limit, submit an SNS Limit Increase case. For New limit value, enter your desired monthly spend limit. In the Use Case Description field, explain that you are requesting an SMS monthly spend limit increase.

DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS to write logs about SMS deliveries in CloudWatch Logs. For each SMS message that you send, Amazon SNS writes a log that includes the message price, the success or failure status, the reason for failure (if the message failed), the message dwell time, and other information.

DeliveryStatusSuccessSamplingRate – The percentage of successful SMS deliveries for which Amazon SNS will write logs in CloudWatch Logs. The value can be an integer from 0 - 100. For example, to write logs only for failed deliveries, set this value to 0. To write logs for 10% of your successful deliveries, set it to 10.

DefaultSenderID – A string, such as your business brand, that is displayed as the sender on the receiving device. Support for sender IDs varies by country. The sender ID can be 1 - 11 alphanumeric characters, and it must contain at least one letter.

DefaultSMSType – The type of SMS message that you will send by default. You can assign the following values:

UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily SMS usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage report as a CSV file to the bucket. The report includes the following information for each SMS message that was successfully delivered by your Amazon Web Services account:

To receive the report, the bucket must have a policy that allows the Amazon SNS service principal to perform the s3:PutObject and s3:GetBucketLocation actions.

For an example bucket policy and usage report, see Monitoring SMS Activity in the Amazon SNS Developer Guide.

" } }, @@ -548,7 +548,9 @@ "refs": { "CheckIfPhoneNumberIsOptedOutInput$phoneNumber": "

The phone number for which you want to check the opt out status.

", "OptInPhoneNumberInput$phoneNumber": "

The phone number to opt in. Use E.164 format.

", - "PhoneNumberList$member": null + "PhoneNumberInformation$PhoneNumber": "

The phone number.

", + "PhoneNumberList$member": null, + "PublishInput$PhoneNumber": "

The phone number to which you want to deliver an SMS message. Use E.164 format.

If you don't specify a value for the PhoneNumber parameter, you must specify a value for the TargetArn or TopicArn parameters.

" } }, "PhoneNumberInformation": { @@ -678,12 +680,12 @@ } }, "SetEndpointAttributesInput": { - "base": "

Input for SetEndpointAttributes action.

", + "base": "

Input for SetEndpointAttributes action.

", "refs": { } }, "SetPlatformApplicationAttributesInput": { - "base": "

Input for SetPlatformApplicationAttributes action.

", + "base": "

Input for SetPlatformApplicationAttributes action.

", "refs": { } }, @@ -721,27 +723,26 @@ "CreateEndpointResponse$EndpointArn": "

EndpointArn returned from CreateEndpoint action.

", "CreatePlatformApplicationInput$Name": "

Application names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, hyphens, and periods, and must be between 1 and 256 characters long.

", "CreatePlatformApplicationInput$Platform": "

The following platforms are supported: ADM (Amazon Device Messaging), APNS (Apple Push Notification Service), APNS_SANDBOX, and GCM (Firebase Cloud Messaging).

", - "CreatePlatformApplicationResponse$PlatformApplicationArn": "

PlatformApplicationArn is returned.

", - "CreatePlatformEndpointInput$PlatformApplicationArn": "

PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.

", + "CreatePlatformApplicationResponse$PlatformApplicationArn": "

PlatformApplicationArn is returned.

", + "CreatePlatformEndpointInput$PlatformApplicationArn": "

PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.

", "CreatePlatformEndpointInput$Token": "

Unique identifier created by the notification service for an app on a device. The specific name for Token will vary, depending on which notification service is being used. For example, when using APNS as the notification service, you need the device token. Alternatively, when using GCM (Firebase Cloud Messaging) or ADM, the device token equivalent is called the registration ID.

", "CreatePlatformEndpointInput$CustomUserData": "

Arbitrary user data to associate with the endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format and less than 2KB.

", - "DeleteEndpointInput$EndpointArn": "

EndpointArn of endpoint to delete.

", - "DeletePlatformApplicationInput$PlatformApplicationArn": "

PlatformApplicationArn of platform application object to delete.

", + "DeleteEndpointInput$EndpointArn": "

EndpointArn of endpoint to delete.

", + "DeletePlatformApplicationInput$PlatformApplicationArn": "

PlatformApplicationArn of platform application object to delete.

", "Endpoint$EndpointArn": "

The EndpointArn for mobile app and device.

", - "GetEndpointAttributesInput$EndpointArn": "

EndpointArn for GetEndpointAttributes input.

", - "GetPlatformApplicationAttributesInput$PlatformApplicationArn": "

PlatformApplicationArn for GetPlatformApplicationAttributesInput.

", - "ListEndpointsByPlatformApplicationInput$PlatformApplicationArn": "

PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.

", - "ListEndpointsByPlatformApplicationInput$NextToken": "

NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.

", - "ListEndpointsByPlatformApplicationResponse$NextToken": "

NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.

", - "ListPlatformApplicationsInput$NextToken": "

NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.

", - "ListPlatformApplicationsResponse$NextToken": "

NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.

", + "GetEndpointAttributesInput$EndpointArn": "

EndpointArn for GetEndpointAttributes input.

", + "GetPlatformApplicationAttributesInput$PlatformApplicationArn": "

PlatformApplicationArn for GetPlatformApplicationAttributesInput.

", + "ListEndpointsByPlatformApplicationInput$PlatformApplicationArn": "

PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.

", + "ListEndpointsByPlatformApplicationInput$NextToken": "

NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.

", + "ListEndpointsByPlatformApplicationResponse$NextToken": "

NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.

", + "ListPlatformApplicationsInput$NextToken": "

NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.

", + "ListPlatformApplicationsResponse$NextToken": "

NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.

", "ListString$member": null, "MapStringToString$key": null, "MapStringToString$value": null, "MessageAttributeMap$key": null, "MessageAttributeValue$DataType": "

Amazon SNS supports the following logical data types: String, String.Array, Number, and Binary. For more information, see Message Attribute Data Types.

", "MessageAttributeValue$StringValue": "

Strings are Unicode with UTF8 binary encoding. For a list of code values, see ASCII Printable Characters.

", - "PhoneNumberInformation$PhoneNumber": "

The phone number.

", "PhoneNumberInformation$Status": "

The status of the phone number.

", "PlatformApplication$PlatformApplicationArn": "

PlatformApplicationArn for platform application object.

", "PublishBatchRequestEntry$Id": "

An identifier for the message in this batch.

The Ids of a batch request must be unique within a request.

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

", @@ -750,12 +751,11 @@ "PublishBatchResultEntry$Id": "

The Id of an entry in a batch request.

", "PublishBatchResultEntry$SequenceNumber": "

This parameter applies only to FIFO (first-in-first-out) topics.

The large, non-consecutive number that Amazon SNS assigns to each message.

The length of SequenceNumber is 128 bits. SequenceNumber continues to increase for a particular MessageGroupId.

", "PublishInput$TargetArn": "

If you don't specify a value for the TargetArn parameter, you must specify a value for the PhoneNumber or TopicArn parameters.

", - "PublishInput$PhoneNumber": "

The phone number to which you want to deliver an SMS message. Use E.164 format.

If you don't specify a value for the PhoneNumber parameter, you must specify a value for the TargetArn or TopicArn parameters.

", "PublishInput$MessageDeduplicationId": "

This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

Every message must have a unique MessageDeduplicationId, which is a token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any message sent with the same MessageDeduplicationId during the 5-minute deduplication interval is treated as a duplicate.

If the topic has ContentBasedDeduplication set, the system generates a MessageDeduplicationId based on the contents of the message. Your MessageDeduplicationId overrides the generated one.

", "PublishInput$MessageGroupId": "

This parameter applies only to FIFO (first-in-first-out) topics. The MessageGroupId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

The MessageGroupId is a tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). Every message must include a MessageGroupId.

", "PublishResponse$SequenceNumber": "

This response element applies only to FIFO (first-in-first-out) topics.

The sequence number is a large, non-consecutive number that Amazon SNS assigns to each message. The length of SequenceNumber is 128 bits. SequenceNumber continues to increase for each MessageGroupId.

", - "SetEndpointAttributesInput$EndpointArn": "

EndpointArn used for SetEndpointAttributes action.

", - "SetPlatformApplicationAttributesInput$PlatformApplicationArn": "

PlatformApplicationArn for SetPlatformApplicationAttributes action.

" + "SetEndpointAttributesInput$EndpointArn": "

EndpointArn used for SetEndpointAttributes action.

", + "SetPlatformApplicationAttributesInput$PlatformApplicationArn": "

PlatformApplicationArn for SetPlatformApplicationAttributes action.

" } }, "SubscribeInput": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 760a86de543..c931c4beaa8 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -2259,10 +2259,58 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-west-2.amazonaws.com" + } } }, "autoscaling" : { @@ -3212,6 +3260,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -5815,6 +5864,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "ec2-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -5835,6 +5890,13 @@ "deprecated" : true, "hostname" : "ec2-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "ec2-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -6422,6 +6484,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.eu-central-1.amazonaws.com", @@ -6554,6 +6622,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ca-west-1.amazonaws.com" + }, "fips-eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -12781,8 +12856,10 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -14164,16 +14241,76 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "rekognition" : { diff --git a/service/connectparticipant/api.go b/service/connectparticipant/api.go index 1ea400dc335..8779c50fde9 100644 --- a/service/connectparticipant/api.go +++ b/service/connectparticipant/api.go @@ -57,7 +57,8 @@ func (c *ConnectParticipant) CompleteAttachmentUploadRequest(input *CompleteAtta // CompleteAttachmentUpload API operation for Amazon Connect Participant Service. // // Allows you to confirm that the attachment has been uploaded using the pre-signed -// URL provided in StartAttachmentUpload API. +// URL provided in StartAttachmentUpload API. A conflict exception is thrown +// when an attachment with that identifier is already being uploaded. // // ConnectionToken is used for invoking this API instead of ParticipantToken. // @@ -90,7 +91,8 @@ func (c *ConnectParticipant) CompleteAttachmentUploadRequest(input *CompleteAtta // The number of attachments per contact exceeds the quota. // // - ConflictException -// An attachment with that identifier is already being uploaded. +// The requested operation conflicts with the current state of a service resource +// associated with the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/connectparticipant-2018-09-07/CompleteAttachmentUpload func (c *ConnectParticipant) CompleteAttachmentUpload(input *CompleteAttachmentUploadInput) (*CompleteAttachmentUploadOutput, error) { @@ -571,6 +573,20 @@ func (c *ConnectParticipant) GetTranscriptRequest(input *GetTranscriptInput) (re // For information about accessing past chat contact transcripts for a persistent // chat, see Enable persistent chat (https://docs.aws.amazon.com/connect/latest/adminguide/chat-persistence.html). // +// If you have a process that consumes events in the transcript of an chat that +// has ended, note that chat transcripts contain the following event content +// types if the event has occurred during the chat session: +// +// - application/vnd.amazonaws.connect.event.participant.left +// +// - application/vnd.amazonaws.connect.event.participant.joined +// +// - application/vnd.amazonaws.connect.event.chat.ended +// +// - application/vnd.amazonaws.connect.event.transfer.succeeded +// +// - application/vnd.amazonaws.connect.event.transfer.failed +// // ConnectionToken is used for invoking this API instead of ParticipantToken. // // The Amazon Connect Participant Service APIs do not use Signature Version @@ -714,7 +730,14 @@ func (c *ConnectParticipant) SendEventRequest(input *SendEventInput) (req *reque // SendEvent API operation for Amazon Connect Participant Service. // -// Sends an event. +// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType +// will no longer be supported starting December 31, 2024. This event has been +// migrated to the CreateParticipantConnection (https://docs.aws.amazon.com/connect-participant/latest/APIReference/API_CreateParticipantConnection.html) +// API using the ConnectParticipant field. +// +// Sends an event. Message receipts are not supported when there are more than +// two active participants in the chat. Using the SendEvent API for message +// receipts when a supervisor is barged-in will result in a conflict exception. // // ConnectionToken is used for invoking this API instead of ParticipantToken. // @@ -744,7 +767,8 @@ func (c *ConnectParticipant) SendEventRequest(input *SendEventInput) (req *reque // The input fails to satisfy the constraints specified by Amazon Connect. // // - ConflictException -// An attachment with that identifier is already being uploaded. +// The requested operation conflicts with the current state of a service resource +// associated with the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/connectparticipant-2018-09-07/SendEvent func (c *ConnectParticipant) SendEvent(input *SendEventInput) (*SendEventOutput, error) { @@ -1189,7 +1213,8 @@ func (s CompleteAttachmentUploadOutput) GoString() string { return s.String() } -// An attachment with that identifier is already being uploaded. +// The requested operation conflicts with the current state of a service resource +// associated with the request. type ConflictException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -2259,7 +2284,8 @@ type SendEventInput struct { // // * application/vnd.amazonaws.connect.event.typing // - // * application/vnd.amazonaws.connect.event.connection.acknowledged + // * application/vnd.amazonaws.connect.event.connection.acknowledged (will + // be deprecated on December 31, 2024) // // * application/vnd.amazonaws.connect.event.message.delivered // diff --git a/service/connectparticipant/errors.go b/service/connectparticipant/errors.go index 8880b714425..64e90955b19 100644 --- a/service/connectparticipant/errors.go +++ b/service/connectparticipant/errors.go @@ -17,7 +17,8 @@ const ( // ErrCodeConflictException for service response error code // "ConflictException". // - // An attachment with that identifier is already being uploaded. + // The requested operation conflicts with the current state of a service resource + // associated with the request. ErrCodeConflictException = "ConflictException" // ErrCodeInternalServerException for service response error code diff --git a/service/emr/api.go b/service/emr/api.go index 94f21be74bb..98b590d53c0 100644 --- a/service/emr/api.go +++ b/service/emr/api.go @@ -4822,7 +4822,7 @@ func (c *EMR) SetTerminationProtectionRequest(input *SetTerminationProtectionInp // to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection // in which you set the value to false. // -// For more information, seeManaging Cluster Termination (https://docs.aws.amazon.com/emr/latest/ManagementGuide/UsingEMR_TerminationProtection.html) +// For more information, see Managing Cluster Termination (https://docs.aws.amazon.com/emr/latest/ManagementGuide/UsingEMR_TerminationProtection.html) // in the Amazon EMR Management Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4859,6 +4859,100 @@ func (c *EMR) SetTerminationProtectionWithContext(ctx aws.Context, input *SetTer return out, req.Send() } +const opSetUnhealthyNodeReplacement = "SetUnhealthyNodeReplacement" + +// SetUnhealthyNodeReplacementRequest generates a "aws/request.Request" representing the +// client's request for the SetUnhealthyNodeReplacement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetUnhealthyNodeReplacement for more information on using the SetUnhealthyNodeReplacement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the SetUnhealthyNodeReplacementRequest method. +// req, resp := client.SetUnhealthyNodeReplacementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/SetUnhealthyNodeReplacement +func (c *EMR) SetUnhealthyNodeReplacementRequest(input *SetUnhealthyNodeReplacementInput) (req *request.Request, output *SetUnhealthyNodeReplacementOutput) { + op := &request.Operation{ + Name: opSetUnhealthyNodeReplacement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetUnhealthyNodeReplacementInput{} + } + + output = &SetUnhealthyNodeReplacementOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetUnhealthyNodeReplacement API operation for Amazon EMR. +// +// Specify whether to enable unhealthy node replacement, which lets Amazon EMR +// gracefully replace core nodes on a cluster if any nodes become unhealthy. +// For example, a node becomes unhealthy if disk usage is above 90%. If unhealthy +// node replacement is on and TerminationProtected are off, Amazon EMR immediately +// terminates the unhealthy core nodes. To use unhealthy node replacement and +// retain unhealthy core nodes, use to turn on termination protection. In such +// cases, Amazon EMR adds the unhealthy nodes to a denylist, reducing job interruptions +// and failures. +// +// If unhealthy node replacement is on, Amazon EMR notifies YARN and other applications +// on the cluster to stop scheduling tasks with these nodes, moves the data, +// and then terminates the nodes. +// +// For more information, see graceful node replacement (https://docs.aws.amazon.com/emr/latest/ManagementGuide/UsingEMR_UnhealthyNodeReplacement.html) +// in the Amazon EMR Management Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EMR's +// API operation SetUnhealthyNodeReplacement for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// Indicates that an error occurred while processing the request and that the +// request was not completed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/SetUnhealthyNodeReplacement +func (c *EMR) SetUnhealthyNodeReplacement(input *SetUnhealthyNodeReplacementInput) (*SetUnhealthyNodeReplacementOutput, error) { + req, out := c.SetUnhealthyNodeReplacementRequest(input) + return out, req.Send() +} + +// SetUnhealthyNodeReplacementWithContext is the same as SetUnhealthyNodeReplacement with the addition of +// the ability to pass a context and additional request options. +// +// See SetUnhealthyNodeReplacement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) SetUnhealthyNodeReplacementWithContext(ctx aws.Context, input *SetUnhealthyNodeReplacementInput, opts ...request.Option) (*SetUnhealthyNodeReplacementOutput, error) { + req, out := c.SetUnhealthyNodeReplacementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSetVisibleToAllUsers = "SetVisibleToAllUsers" // SetVisibleToAllUsersRequest generates a "aws/request.Request" representing the @@ -6833,6 +6927,10 @@ type Cluster struct { // or in the event of a cluster error. TerminationProtected *bool `type:"boolean"` + // Indicates whether Amazon EMR should gracefully replace Amazon EC2 core instances + // that have degraded within the cluster. + UnhealthyNodeReplacement *bool `type:"boolean"` + // Indicates whether the cluster is visible to IAM principals in the Amazon // Web Services account associated with the cluster. When true, IAM principals // in the Amazon Web Services account can perform Amazon EMR cluster actions @@ -7057,6 +7155,12 @@ func (s *Cluster) SetTerminationProtected(v bool) *Cluster { return s } +// SetUnhealthyNodeReplacement sets the UnhealthyNodeReplacement field's value. +func (s *Cluster) SetUnhealthyNodeReplacement(v bool) *Cluster { + s.UnhealthyNodeReplacement = &v + return s +} + // SetVisibleToAllUsers sets the VisibleToAllUsers field's value. func (s *Cluster) SetVisibleToAllUsers(v bool) *Cluster { s.VisibleToAllUsers = &v @@ -12725,6 +12829,10 @@ type JobFlowInstancesConfig struct { // from being terminated by API call, user intervention, or in the event of // a job-flow error. TerminationProtected *bool `type:"boolean"` + + // Indicates whether Amazon EMR should gracefully replace core nodes that have + // degraded within the cluster. + UnhealthyNodeReplacement *bool `type:"boolean"` } // String returns the string representation. @@ -12883,6 +12991,12 @@ func (s *JobFlowInstancesConfig) SetTerminationProtected(v bool) *JobFlowInstanc return s } +// SetUnhealthyNodeReplacement sets the UnhealthyNodeReplacement field's value. +func (s *JobFlowInstancesConfig) SetUnhealthyNodeReplacement(v bool) *JobFlowInstancesConfig { + s.UnhealthyNodeReplacement = &v + return s +} + // Specify the type of Amazon EC2 instances that the cluster (job flow) runs // on. type JobFlowInstancesDetail struct { @@ -12946,6 +13060,10 @@ type JobFlowInstancesDetail struct { // termination by API calls, user intervention, or in the event of a job-flow // error. TerminationProtected *bool `type:"boolean"` + + // Indicates whether Amazon EMR should gracefully replace core nodes that have + // degraded within the cluster. + UnhealthyNodeReplacement *bool `type:"boolean"` } // String returns the string representation. @@ -13044,6 +13162,12 @@ func (s *JobFlowInstancesDetail) SetTerminationProtected(v bool) *JobFlowInstanc return s } +// SetUnhealthyNodeReplacement sets the UnhealthyNodeReplacement field's value. +func (s *JobFlowInstancesDetail) SetUnhealthyNodeReplacement(v bool) *JobFlowInstancesDetail { + s.UnhealthyNodeReplacement = &v + return s +} + // Attributes for Kerberos configuration when Kerberos authentication is enabled // using a security configuration. For more information see Use Kerberos Authentication // (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-kerberos.html) @@ -17703,6 +17827,90 @@ func (s SetTerminationProtectionOutput) GoString() string { return s.String() } +type SetUnhealthyNodeReplacementInput struct { + _ struct{} `type:"structure"` + + // The list of strings that uniquely identify the clusters for which to turn + // on unhealthy node replacement. You can get these identifiers by running the + // RunJobFlow or the DescribeJobFlows operations. + // + // JobFlowIds is a required field + JobFlowIds []*string `type:"list" required:"true"` + + // Indicates whether to turn on or turn off graceful unhealthy node replacement. + // + // UnhealthyNodeReplacement is a required field + UnhealthyNodeReplacement *bool `type:"boolean" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SetUnhealthyNodeReplacementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SetUnhealthyNodeReplacementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetUnhealthyNodeReplacementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetUnhealthyNodeReplacementInput"} + if s.JobFlowIds == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowIds")) + } + if s.UnhealthyNodeReplacement == nil { + invalidParams.Add(request.NewErrParamRequired("UnhealthyNodeReplacement")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobFlowIds sets the JobFlowIds field's value. +func (s *SetUnhealthyNodeReplacementInput) SetJobFlowIds(v []*string) *SetUnhealthyNodeReplacementInput { + s.JobFlowIds = v + return s +} + +// SetUnhealthyNodeReplacement sets the UnhealthyNodeReplacement field's value. +func (s *SetUnhealthyNodeReplacementInput) SetUnhealthyNodeReplacement(v bool) *SetUnhealthyNodeReplacementInput { + s.UnhealthyNodeReplacement = &v + return s +} + +type SetUnhealthyNodeReplacementOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SetUnhealthyNodeReplacementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SetUnhealthyNodeReplacementOutput) GoString() string { + return s.String() +} + // The input to the SetVisibleToAllUsers action. type SetVisibleToAllUsersInput struct { _ struct{} `type:"structure"` diff --git a/service/emr/emriface/interface.go b/service/emr/emriface/interface.go index 83f837afd10..8f1173ed90f 100644 --- a/service/emr/emriface/interface.go +++ b/service/emr/emriface/interface.go @@ -292,6 +292,10 @@ type EMRAPI interface { SetTerminationProtectionWithContext(aws.Context, *emr.SetTerminationProtectionInput, ...request.Option) (*emr.SetTerminationProtectionOutput, error) SetTerminationProtectionRequest(*emr.SetTerminationProtectionInput) (*request.Request, *emr.SetTerminationProtectionOutput) + SetUnhealthyNodeReplacement(*emr.SetUnhealthyNodeReplacementInput) (*emr.SetUnhealthyNodeReplacementOutput, error) + SetUnhealthyNodeReplacementWithContext(aws.Context, *emr.SetUnhealthyNodeReplacementInput, ...request.Option) (*emr.SetUnhealthyNodeReplacementOutput, error) + SetUnhealthyNodeReplacementRequest(*emr.SetUnhealthyNodeReplacementInput) (*request.Request, *emr.SetUnhealthyNodeReplacementOutput) + SetVisibleToAllUsers(*emr.SetVisibleToAllUsersInput) (*emr.SetVisibleToAllUsersOutput, error) SetVisibleToAllUsersWithContext(aws.Context, *emr.SetVisibleToAllUsersInput, ...request.Option) (*emr.SetVisibleToAllUsersOutput, error) SetVisibleToAllUsersRequest(*emr.SetVisibleToAllUsersInput) (*request.Request, *emr.SetVisibleToAllUsersOutput) diff --git a/service/firehose/api.go b/service/firehose/api.go index cd01fe2020d..2f0283b793c 100644 --- a/service/firehose/api.go +++ b/service/firehose/api.go @@ -56,7 +56,7 @@ func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) // CreateDeliveryStream API operation for Amazon Kinesis Firehose. // -// Creates a Kinesis Data Firehose delivery stream. +// Creates a Firehose delivery stream. // // By default, you can create up to 50 delivery streams per Amazon Web Services // Region. @@ -72,8 +72,8 @@ func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) // change, and you can't invoke CreateDeliveryStream again on it. However, you // can invoke the DeleteDeliveryStream operation to delete it. // -// A Kinesis Data Firehose delivery stream can be configured to receive records -// directly from providers using PutRecord or PutRecordBatch, or it can be configured +// A Firehose delivery stream can be configured to receive records directly +// from providers using PutRecord or PutRecordBatch, or it can be configured // to use an existing Kinesis stream as its source. To specify a Kinesis data // stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, // and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in @@ -95,35 +95,35 @@ func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) // // When you specify S3DestinationConfiguration, you can also provide the following // optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. -// By default, if no BufferingHints value is provided, Kinesis Data Firehose -// buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied -// first. BufferingHints is a hint, so there are some cases where the service -// cannot adhere to these conditions strictly. For example, record boundaries -// might be such that the size is a little over or under the configured buffering -// size. By default, no encryption is performed. We strongly recommend that -// you enable encryption to ensure secure data storage in Amazon S3. +// By default, if no BufferingHints value is provided, Firehose buffers data +// up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints +// is a hint, so there are some cases where the service cannot adhere to these +// conditions strictly. For example, record boundaries might be such that the +// size is a little over or under the configured buffering size. By default, +// no encryption is performed. We strongly recommend that you enable encryption +// to ensure secure data storage in Amazon S3. // // A few notes about Amazon Redshift as a destination: // // - An Amazon Redshift destination requires an S3 bucket as intermediate -// location. Kinesis Data Firehose first delivers data to Amazon S3 and then -// uses COPY syntax to load data into an Amazon Redshift table. This is specified -// in the RedshiftDestinationConfiguration.S3Configuration parameter. +// location. Firehose first delivers data to Amazon S3 and then uses COPY +// syntax to load data into an Amazon Redshift table. This is specified in +// the RedshiftDestinationConfiguration.S3Configuration parameter. // // - The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration // because the Amazon Redshift COPY operation that reads from the S3 bucket // doesn't support these compression formats. // // - We strongly recommend that you use the user name and password you provide -// exclusively with Kinesis Data Firehose, and that the permissions for the -// account are restricted for Amazon Redshift INSERT permissions. +// exclusively with Firehose, and that the permissions for the account are +// restricted for Amazon Redshift INSERT permissions. // -// Kinesis Data Firehose assumes the IAM role that is configured as part of -// the destination. The role should allow the Kinesis Data Firehose principal -// to assume the role, and the role should have permissions that allow the service -// to deliver the data. For more information, see Grant Kinesis Data Firehose -// Access to an Amazon S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) -// in the Amazon Kinesis Data Firehose Developer Guide. +// Firehose assumes the IAM role that is configured as part of the destination. +// The role should allow the Firehose principal to assume the role, and the +// role should have permissions that allow the service to deliver the data. +// For more information, see Grant Firehose Access to an Amazon S3 Destination +// (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) +// in the Amazon Firehose Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -144,10 +144,10 @@ func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) // The resource is already in use and not available for this operation. // // - InvalidKMSResourceException -// Kinesis Data Firehose throws this exception when an attempt to put records -// or to start or stop delivery stream encryption fails. This happens when the -// KMS service throws one of the following exception types: AccessDeniedException, -// InvalidStateException, DisabledException, or NotFoundException. +// Firehose throws this exception when an attempt to put records or to start +// or stop delivery stream encryption fails. This happens when the KMS service +// throws one of the following exception types: AccessDeniedException, InvalidStateException, +// DisabledException, or NotFoundException. // // See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/CreateDeliveryStream func (c *Firehose) CreateDeliveryStream(input *CreateDeliveryStreamInput) (*CreateDeliveryStreamOutput, error) { @@ -217,16 +217,22 @@ func (c *Firehose) DeleteDeliveryStreamRequest(input *DeleteDeliveryStreamInput) // // Deletes a delivery stream and its data. // -// To check the state of a delivery stream, use DescribeDeliveryStream. You -// can delete a delivery stream only if it is in one of the following states: +// You can delete a delivery stream only if it is in one of the following states: // ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a -// delivery stream that is in the CREATING state. While the deletion request -// is in process, the delivery stream is in the DELETING state. +// delivery stream that is in the CREATING state. To check the state of a delivery +// stream, use DescribeDeliveryStream. // -// While the delivery stream is in the DELETING state, the service might continue -// to accept records, but it doesn't make any guarantees with respect to delivering -// the data. Therefore, as a best practice, first stop any applications that -// are sending records before you delete a delivery stream. +// DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream +// succeeds, the delivery stream is marked for deletion, and it goes into the +// DELETING state.While the delivery stream is in the DELETING state, the service +// might continue to accept records, but it doesn't make any guarantees with +// respect to delivering the data. Therefore, as a best practice, first stop +// any applications that are sending records before you delete a delivery stream. +// +// Removal of a delivery stream that is in the DELETING state is a low priority +// operation for the service. A stream may remain in the DELETING state for +// several minutes. Therefore, as a best practice, applications should not wait +// for streams in the DELETING state to be removed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -352,6 +358,94 @@ func (c *Firehose) DescribeDeliveryStreamWithContext(ctx aws.Context, input *Des return out, req.Send() } +const opGetKinesisStream = "GetKinesisStream" + +// GetKinesisStreamRequest generates a "aws/request.Request" representing the +// client's request for the GetKinesisStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetKinesisStream for more information on using the GetKinesisStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetKinesisStreamRequest method. +// req, resp := client.GetKinesisStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/GetKinesisStream +func (c *Firehose) GetKinesisStreamRequest(input *GetKinesisStreamInput) (req *request.Request, output *GetKinesisStreamOutput) { + op := &request.Operation{ + Name: opGetKinesisStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetKinesisStreamInput{} + } + + output = &GetKinesisStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetKinesisStream API operation for Amazon Kinesis Firehose. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis Firehose's +// API operation GetKinesisStream for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The specified resource could not be found. +// +// - InvalidArgumentException +// The specified input parameter has a value that is not valid. +// +// - InvalidStreamTypeException +// +// - InvalidKMSResourceException +// Firehose throws this exception when an attempt to put records or to start +// or stop delivery stream encryption fails. This happens when the KMS service +// throws one of the following exception types: AccessDeniedException, InvalidStateException, +// DisabledException, or NotFoundException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/GetKinesisStream +func (c *Firehose) GetKinesisStream(input *GetKinesisStreamInput) (*GetKinesisStreamOutput, error) { + req, out := c.GetKinesisStreamRequest(input) + return out, req.Send() +} + +// GetKinesisStreamWithContext is the same as GetKinesisStream with the addition of +// the ability to pass a context and additional request options. +// +// See GetKinesisStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Firehose) GetKinesisStreamWithContext(ctx aws.Context, input *GetKinesisStreamInput, opts ...request.Option) (*GetKinesisStreamOutput, error) { + req, out := c.GetKinesisStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListDeliveryStreams = "ListDeliveryStreams" // ListDeliveryStreamsRequest generates a "aws/request.Request" representing the @@ -562,21 +656,21 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // PutRecord API operation for Amazon Kinesis Firehose. // -// Writes a single data record into an Amazon Kinesis Data Firehose delivery -// stream. To write multiple data records into a delivery stream, use PutRecordBatch. -// Applications using these operations are referred to as producers. +// Writes a single data record into an Amazon Firehose delivery stream. To write +// multiple data records into a delivery stream, use PutRecordBatch. Applications +// using these operations are referred to as producers. // // By default, each delivery stream can take in up to 2,000 transactions per // second, 5,000 records per second, or 5 MB per second. If you use PutRecord // and PutRecordBatch, the limits are an aggregate across these two operations // for each delivery stream. For more information about limits and how to request -// an increase, see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// an increase, see Amazon Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // -// Kinesis Data Firehose accumulates and publishes a particular metric for a -// customer account in one minute intervals. It is possible that the bursts -// of incoming bytes/records ingested to a delivery stream last only for a few -// seconds. Due to this, the actual spikes in the traffic might not be fully -// visible in the customer's 1 minute CloudWatch metrics. +// Firehose accumulates and publishes a particular metric for a customer account +// in one minute intervals. It is possible that the bursts of incoming bytes/records +// ingested to a delivery stream last only for a few seconds. Due to this, the +// actual spikes in the traffic might not be fully visible in the customer's +// 1 minute CloudWatch metrics. // // You must specify the name of the delivery stream and the data record when // using PutRecord. The data record consists of a data blob that can be up to @@ -584,11 +678,11 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // from a log file, geographic location data, website clickstream data, and // so on. // -// Kinesis Data Firehose buffers records before delivering them to the destination. -// To disambiguate the data blobs at the destination, a common solution is to -// use delimiters in the data, such as a newline (\n) or some other character -// unique within the data. This allows the consumer application to parse individual -// data items when reading the data from the destination. +// Firehose buffers records before delivering them to the destination. To disambiguate +// the data blobs at the destination, a common solution is to use delimiters +// in the data, such as a newline (\n) or some other character unique within +// the data. This allows the consumer application to parse individual data items +// when reading the data from the destination. // // The PutRecord operation returns a RecordId, which is a unique string assigned // to each record. Producer applications can use this ID for purposes such as @@ -603,10 +697,10 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // can result in data duplicates. For larger data assets, allow for a longer // time out before retrying Put API operations. // -// Data records sent to Kinesis Data Firehose are stored for 24 hours from the -// time they are added to a delivery stream as it tries to send the records -// to the destination. If the destination is unreachable for more than 24 hours, -// the data is no longer available. +// Data records sent to Firehose are stored for 24 hours from the time they +// are added to a delivery stream as it tries to send the records to the destination. +// If the destination is unreachable for more than 24 hours, the data is no +// longer available. // // Don't concatenate two or more base64 strings to form the data fields of your // records. Instead, concatenate the raw data, then perform base64 encoding. @@ -627,10 +721,10 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // The specified input parameter has a value that is not valid. // // - InvalidKMSResourceException -// Kinesis Data Firehose throws this exception when an attempt to put records -// or to start or stop delivery stream encryption fails. This happens when the -// KMS service throws one of the following exception types: AccessDeniedException, -// InvalidStateException, DisabledException, or NotFoundException. +// Firehose throws this exception when an attempt to put records or to start +// or stop delivery stream encryption fails. This happens when the KMS service +// throws one of the following exception types: AccessDeniedException, InvalidStateException, +// DisabledException, or NotFoundException. // // - InvalidSourceException // Only requests from CloudWatch Logs are supported when CloudWatch Logs decompression @@ -640,7 +734,7 @@ func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request // The service is unavailable. Back off and retry the operation. If you continue // to see the exception, throughput limits for the delivery stream may have // been exceeded. For more information about limits and how to request an increase, -// see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// see Amazon Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/PutRecord func (c *Firehose) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { @@ -712,14 +806,13 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // To write single data records into a delivery stream, use PutRecord. Applications // using these operations are referred to as producers. // -// Kinesis Data Firehose accumulates and publishes a particular metric for a -// customer account in one minute intervals. It is possible that the bursts -// of incoming bytes/records ingested to a delivery stream last only for a few -// seconds. Due to this, the actual spikes in the traffic might not be fully -// visible in the customer's 1 minute CloudWatch metrics. +// Firehose accumulates and publishes a particular metric for a customer account +// in one minute intervals. It is possible that the bursts of incoming bytes/records +// ingested to a delivery stream last only for a few seconds. Due to this, the +// actual spikes in the traffic might not be fully visible in the customer's +// 1 minute CloudWatch metrics. // -// For information about service quota, see Amazon Kinesis Data Firehose Quota -// (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// For information about service quota, see Amazon Firehose Quota (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // // Each PutRecordBatch request supports up to 500 records. Each record in the // request can be as large as 1,000 KB (before base64 encoding), up to a limit @@ -731,11 +824,11 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // from a log file, geographic location data, website clickstream data, and // so on. // -// Kinesis Data Firehose buffers records before delivering them to the destination. -// To disambiguate the data blobs at the destination, a common solution is to -// use delimiters in the data, such as a newline (\n) or some other character -// unique within the data. This allows the consumer application to parse individual -// data items when reading the data from the destination. +// Firehose buffers records before delivering them to the destination. To disambiguate +// the data blobs at the destination, a common solution is to use delimiters +// in the data, such as a newline (\n) or some other character unique within +// the data. This allows the consumer application to parse individual data items +// when reading the data from the destination. // // The PutRecordBatch response includes a count of failed records, FailedPutCount, // and an array of responses, RequestResponses. Even if the PutRecordBatch call @@ -745,9 +838,9 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // record. It directly correlates with a record in the request array using the // same ordering, from the top to the bottom. The response array always includes // the same number of records as the request array. RequestResponses includes -// both successfully and unsuccessfully processed records. Kinesis Data Firehose -// tries to process all records in each PutRecordBatch request. A single record -// failure does not stop the processing of subsequent records. +// both successfully and unsuccessfully processed records. Firehose tries to +// process all records in each PutRecordBatch request. A single record failure +// does not stop the processing of subsequent records. // // A successfully processed record includes a RecordId value, which is unique // for the record. An unsuccessfully processed record includes ErrorCode and @@ -770,10 +863,10 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // can result in data duplicates. For larger data assets, allow for a longer // time out before retrying Put API operations. // -// Data records sent to Kinesis Data Firehose are stored for 24 hours from the -// time they are added to a delivery stream as it attempts to send the records -// to the destination. If the destination is unreachable for more than 24 hours, -// the data is no longer available. +// Data records sent to Firehose are stored for 24 hours from the time they +// are added to a delivery stream as it attempts to send the records to the +// destination. If the destination is unreachable for more than 24 hours, the +// data is no longer available. // // Don't concatenate two or more base64 strings to form the data fields of your // records. Instead, concatenate the raw data, then perform base64 encoding. @@ -794,10 +887,10 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // The specified input parameter has a value that is not valid. // // - InvalidKMSResourceException -// Kinesis Data Firehose throws this exception when an attempt to put records -// or to start or stop delivery stream encryption fails. This happens when the -// KMS service throws one of the following exception types: AccessDeniedException, -// InvalidStateException, DisabledException, or NotFoundException. +// Firehose throws this exception when an attempt to put records or to start +// or stop delivery stream encryption fails. This happens when the KMS service +// throws one of the following exception types: AccessDeniedException, InvalidStateException, +// DisabledException, or NotFoundException. // // - InvalidSourceException // Only requests from CloudWatch Logs are supported when CloudWatch Logs decompression @@ -807,7 +900,7 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // The service is unavailable. Back off and retry the operation. If you continue // to see the exception, throughput limits for the delivery stream may have // been exceeded. For more information about limits and how to request an increase, -// see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// see Amazon Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/PutRecordBatch func (c *Firehose) PutRecordBatch(input *PutRecordBatchInput) (*PutRecordBatchOutput, error) { @@ -878,8 +971,8 @@ func (c *Firehose) StartDeliveryStreamEncryptionRequest(input *StartDeliveryStre // Enables server-side encryption (SSE) for the delivery stream. // // This operation is asynchronous. It returns immediately. When you invoke it, -// Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, -// and then to ENABLED. The encryption status of a delivery stream is the Status +// Firehose first sets the encryption status of the stream to ENABLING, and +// then to ENABLED. The encryption status of a delivery stream is the Status // property in DeliveryStreamEncryptionConfiguration. If the operation fails, // the encryption status changes to ENABLING_FAILED. You can continue to read // and write data to your delivery stream while the encryption status is ENABLING, @@ -894,12 +987,12 @@ func (c *Firehose) StartDeliveryStreamEncryptionRequest(input *StartDeliveryStre // Even if encryption is currently enabled for a delivery stream, you can still // invoke this operation on it to change the ARN of the CMK or both its type // and ARN. If you invoke this method to change the CMK, and the old CMK is -// of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it -// had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, -// Kinesis Data Firehose creates a grant that enables it to use the new CMK -// to encrypt and decrypt data and to manage the grant. +// of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the +// old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose +// creates a grant that enables it to use the new CMK to encrypt and decrypt +// data and to manage the grant. // -// For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption +// For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption // and CreateDeliveryStream should not be called with session credentials that // are more than 6 hours old. // @@ -910,8 +1003,8 @@ func (c *Firehose) StartDeliveryStreamEncryptionRequest(input *StartDeliveryStre // // If the encryption status of your delivery stream is ENABLING_FAILED, you // can invoke this operation again with a valid CMK. The CMK must be enabled -// and the key policy mustn't explicitly deny the permission for Kinesis Data -// Firehose to invoke KMS encrypt and decrypt operations. +// and the key policy mustn't explicitly deny the permission for Firehose to +// invoke KMS encrypt and decrypt operations. // // You can enable SSE for a delivery stream only if it's a delivery stream that // uses DirectPut as its source. @@ -944,10 +1037,10 @@ func (c *Firehose) StartDeliveryStreamEncryptionRequest(input *StartDeliveryStre // You have already reached the limit for a requested resource. // // - InvalidKMSResourceException -// Kinesis Data Firehose throws this exception when an attempt to put records -// or to start or stop delivery stream encryption fails. This happens when the -// KMS service throws one of the following exception types: AccessDeniedException, -// InvalidStateException, DisabledException, or NotFoundException. +// Firehose throws this exception when an attempt to put records or to start +// or stop delivery stream encryption fails. This happens when the KMS service +// throws one of the following exception types: AccessDeniedException, InvalidStateException, +// DisabledException, or NotFoundException. // // See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/StartDeliveryStreamEncryption func (c *Firehose) StartDeliveryStreamEncryption(input *StartDeliveryStreamEncryptionInput) (*StartDeliveryStreamEncryptionOutput, error) { @@ -1018,8 +1111,8 @@ func (c *Firehose) StopDeliveryStreamEncryptionRequest(input *StopDeliveryStream // Disables server-side encryption (SSE) for the delivery stream. // // This operation is asynchronous. It returns immediately. When you invoke it, -// Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, -// and then to DISABLED. You can continue to read and write data to your stream +// Firehose first sets the encryption status of the stream to DISABLING, and +// then to DISABLED. You can continue to read and write data to your stream // while its status is DISABLING. It can take up to 5 seconds after the encryption // status changes to DISABLED before all records written to the delivery stream // are no longer subject to encryption. To find out whether a record or a batch @@ -1029,9 +1122,8 @@ func (c *Firehose) StopDeliveryStreamEncryptionRequest(input *StopDeliveryStream // To check the encryption state of a delivery stream, use DescribeDeliveryStream. // // If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, -// Kinesis Data Firehose schedules the related KMS grant for retirement and -// then retires it after it ensures that it is finished delivering records to -// the destination. +// Firehose schedules the related KMS grant for retirement and then retires +// it after it ensures that it is finished delivering records to the destination. // // The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations // have a combined limit of 25 calls per delivery stream per 24 hours. For example, @@ -1334,23 +1426,23 @@ func (c *Firehose) UpdateDestinationRequest(input *UpdateDestinationInput) (req // For an Amazon OpenSearch Service destination, you can only update to another // Amazon OpenSearch Service destination. // -// If the destination type is the same, Kinesis Data Firehose merges the configuration -// parameters specified with the destination configuration that already exists -// on the delivery stream. If any of the parameters are not specified in the -// call, the existing values are retained. For example, in the Amazon S3 destination, -// if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration -// is maintained on the destination. +// If the destination type is the same, Firehose merges the configuration parameters +// specified with the destination configuration that already exists on the delivery +// stream. If any of the parameters are not specified in the call, the existing +// values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration +// is not specified, then the existing EncryptionConfiguration is maintained +// on the destination. // // If the destination type is not the same, for example, changing the destination -// from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any -// parameters. In this case, all parameters must be specified. +// from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. +// In this case, all parameters must be specified. // -// Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions -// and conflicting merges. This is a required field, and the service updates -// the configuration only if the existing configuration has a version ID that -// matches. After the update is applied successfully, the version ID is updated, -// and can be retrieved using DescribeDeliveryStream. Use the new version ID -// to set CurrentDeliveryStreamVersionId in the next call. +// Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and +// conflicting merges. This is a required field, and the service updates the +// configuration only if the existing configuration has a version ID that matches. +// After the update is applied successfully, the version ID is updated, and +// can be retrieved using DescribeDeliveryStream. Use the new version ID to +// set CurrentDeliveryStreamVersionId in the next call. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1396,6 +1488,92 @@ func (c *Firehose) UpdateDestinationWithContext(ctx aws.Context, input *UpdateDe return out, req.Send() } +const opVerifyResourcesExistForTagris = "VerifyResourcesExistForTagris" + +// VerifyResourcesExistForTagrisRequest generates a "aws/request.Request" representing the +// client's request for the VerifyResourcesExistForTagris operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See VerifyResourcesExistForTagris for more information on using the VerifyResourcesExistForTagris +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the VerifyResourcesExistForTagrisRequest method. +// req, resp := client.VerifyResourcesExistForTagrisRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/VerifyResourcesExistForTagris +func (c *Firehose) VerifyResourcesExistForTagrisRequest(input *VerifyResourcesExistForTagrisInput) (req *request.Request, output *VerifyResourcesExistForTagrisOutput) { + op := &request.Operation{ + Name: opVerifyResourcesExistForTagris, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyResourcesExistForTagrisInput{} + } + + output = &VerifyResourcesExistForTagrisOutput{} + req = c.newRequest(op, input, output) + return +} + +// VerifyResourcesExistForTagris API operation for Amazon Kinesis Firehose. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Kinesis Firehose's +// API operation VerifyResourcesExistForTagris for usage and error information. +// +// Returned Error Types: +// +// - TagrisAccessDeniedException +// +// - TagrisInternalServiceException +// +// - TagrisInvalidArnException +// +// - TagrisInvalidParameterException +// +// - TagrisPartialResourcesExistResultsException +// +// - TagrisThrottledException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/VerifyResourcesExistForTagris +func (c *Firehose) VerifyResourcesExistForTagris(input *VerifyResourcesExistForTagrisInput) (*VerifyResourcesExistForTagrisOutput, error) { + req, out := c.VerifyResourcesExistForTagrisRequest(input) + return out, req.Send() +} + +// VerifyResourcesExistForTagrisWithContext is the same as VerifyResourcesExistForTagris with the addition of +// the ability to pass a context and additional request options. +// +// See VerifyResourcesExistForTagris for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Firehose) VerifyResourcesExistForTagrisWithContext(ctx aws.Context, input *VerifyResourcesExistForTagrisInput, opts ...request.Option) (*VerifyResourcesExistForTagrisOutput, error) { + req, out := c.VerifyResourcesExistForTagrisRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // Describes the buffering to perform before delivering data to the Serverless // offering for Amazon OpenSearch Service destination. type AmazonOpenSearchServerlessBufferingHints struct { @@ -1481,24 +1659,24 @@ type AmazonOpenSearchServerlessDestinationConfiguration struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to the Serverless offering for Amazon OpenSearch Service. The default value - // is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to the + // Serverless offering for Amazon OpenSearch Service. The default value is 300 + // (5 minutes). RetryOptions *AmazonOpenSearchServerlessRetryOptions `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Serverless offering for Amazon OpenSearch Service - // Configuration API and for indexing documents. + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose + // for calling the Serverless offering for Amazon OpenSearch Service Configuration + // API and for indexing documents. // // RoleARN is a required field RoleARN *string `min:"1" type:"string" required:"true"` // Defines how documents should be delivered to Amazon S3. When it is set to - // FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could - // not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ - // appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose - // delivers all incoming records to Amazon S3, and also writes failed documents - // with AmazonOpenSearchService-failed/ appended to the prefix. + // FailedDocumentsOnly, Firehose writes any documents that could not be indexed + // to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ + // appended to the key prefix. When set to AllDocuments, Firehose delivers all + // incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ + // appended to the prefix. S3BackupMode *string `type:"string" enum:"AmazonOpenSearchServerlessS3BackupMode"` // Describes the configuration of a destination in Amazon S3. @@ -1774,14 +1952,14 @@ type AmazonOpenSearchServerlessDestinationUpdate struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to the Serverless offering for Amazon OpenSearch Service. The default value - // is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to the + // Serverless offering for Amazon OpenSearch Service. The default value is 300 + // (5 minutes). RetryOptions *AmazonOpenSearchServerlessRetryOptions `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Serverless offering for Amazon OpenSearch Service - // Configuration API and for indexing documents. + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose + // for calling the Serverless offering for Amazon OpenSearch Service Configuration + // API and for indexing documents. RoleARN *string `min:"1" type:"string"` // Describes an update for a destination in Amazon S3. @@ -1888,16 +2066,16 @@ func (s *AmazonOpenSearchServerlessDestinationUpdate) SetS3Update(v *S3Destinati return s } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to the Serverless offering for Amazon OpenSearch Service. +// Configures retry behavior in case Firehose is unable to deliver documents +// to the Serverless offering for Amazon OpenSearch Service. type AmazonOpenSearchServerlessRetryOptions struct { _ struct{} `type:"structure"` // After an initial failure to deliver to the Serverless offering for Amazon - // OpenSearch Service, the total amount of time during which Kinesis Data Firehose - // retries delivery (including the first attempt). After this time has elapsed, - // the failed documents are written to Amazon S3. Default value is 300 seconds - // (5 minutes). A value of 0 (zero) results in no retries. + // OpenSearch Service, the total amount of time during which Firehose retries + // delivery (including the first attempt). After this time has elapsed, the + // failed documents are written to Amazon S3. Default value is 300 seconds (5 + // minutes). A value of 0 (zero) results in no retries. DurationInSeconds *int64 `type:"integer"` } @@ -2002,8 +2180,8 @@ type AmazonopensearchserviceDestinationConfiguration struct { ClusterEndpoint *string `min:"1" type:"string"` // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document + // ID. DocumentIdOptions *DocumentIdOptions `type:"structure"` // The ARN of the Amazon OpenSearch Service domain. The IAM role must have permissions @@ -2023,23 +2201,23 @@ type AmazonopensearchserviceDestinationConfiguration struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon OpenSearch Service. The default value is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // OpenSearch Service. The default value is 300 (5 minutes). RetryOptions *AmazonopensearchserviceRetryOptions `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Amazon OpenSearch Service Configuration API and - // for indexing documents. + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose + // for calling the Amazon OpenSearch Service Configuration API and for indexing + // documents. // // RoleARN is a required field RoleARN *string `min:"1" type:"string" required:"true"` // Defines how documents should be delivered to Amazon S3. When it is set to - // FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could - // not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ - // appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose - // delivers all incoming records to Amazon S3, and also writes failed documents - // with AmazonOpenSearchService-failed/ appended to the prefix. + // FailedDocumentsOnly, Firehose writes any documents that could not be indexed + // to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ + // appended to the key prefix. When set to AllDocuments, Firehose delivers all + // incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ + // appended to the prefix. S3BackupMode *string `type:"string" enum:"AmazonopensearchserviceS3BackupMode"` // Describes the configuration of a destination in Amazon S3. @@ -2049,8 +2227,8 @@ type AmazonopensearchserviceDestinationConfiguration struct { // The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can // be only one type per index. If you try to specify a new type for an existing - // index that already has another type, Kinesis Data Firehose returns an error - // during run time. + // index that already has another type, Firehose returns an error during run + // time. TypeName *string `type:"string"` // The details of the VPC of the Amazon OpenSearch or Amazon OpenSearch Serverless @@ -2226,14 +2404,14 @@ type AmazonopensearchserviceDestinationDescription struct { // Describes the Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` - // The endpoint to use when communicating with the cluster. Kinesis Data Firehose - // uses either this ClusterEndpoint or the DomainARN field to send data to Amazon - // OpenSearch Service. + // The endpoint to use when communicating with the cluster. Firehose uses either + // this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch + // Service. ClusterEndpoint *string `min:"1" type:"string"` // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document + // ID. DocumentIdOptions *DocumentIdOptions `type:"structure"` // The ARN of the Amazon OpenSearch Service domain. @@ -2387,8 +2565,8 @@ type AmazonopensearchserviceDestinationUpdate struct { ClusterEndpoint *string `min:"1" type:"string"` // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document + // ID. DocumentIdOptions *DocumentIdOptions `type:"structure"` // The ARN of the Amazon OpenSearch Service domain. The IAM role must have permissions @@ -2406,13 +2584,13 @@ type AmazonopensearchserviceDestinationUpdate struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon OpenSearch Service. The default value is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // OpenSearch Service. The default value is 300 (5 minutes). RetryOptions *AmazonopensearchserviceRetryOptions `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Amazon OpenSearch Service Configuration API and - // for indexing documents. + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose + // for calling the Amazon OpenSearch Service Configuration API and for indexing + // documents. RoleARN *string `min:"1" type:"string"` // Describes an update for a destination in Amazon S3. @@ -2420,13 +2598,12 @@ type AmazonopensearchserviceDestinationUpdate struct { // The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can // be only one type per index. If you try to specify a new type for an existing - // index that already has another type, Kinesis Data Firehose returns an error - // during runtime. + // index that already has another type, Firehose returns an error during runtime. // // If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery - // stream, Kinesis Data Firehose still delivers data to Elasticsearch with the - // old index name and type name. If you want to update your delivery stream - // with a new index name, provide an empty string for TypeName. + // stream, Firehose still delivers data to Elasticsearch with the old index + // name and type name. If you want to update your delivery stream with a new + // index name, provide an empty string for TypeName. TypeName *string `type:"string"` } @@ -2562,16 +2739,16 @@ func (s *AmazonopensearchserviceDestinationUpdate) SetTypeName(v string) *Amazon return s } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to Amazon OpenSearch Service. +// Configures retry behavior in case Firehose is unable to deliver documents +// to Amazon OpenSearch Service. type AmazonopensearchserviceRetryOptions struct { _ struct{} `type:"structure"` // After an initial failure to deliver to Amazon OpenSearch Service, the total - // amount of time during which Kinesis Data Firehose retries delivery (including - // the first attempt). After this time has elapsed, the failed documents are - // written to Amazon S3. Default value is 300 seconds (5 minutes). A value of - // 0 (zero) results in no retries. + // amount of time during which Firehose retries delivery (including the first + // attempt). After this time has elapsed, the failed documents are written to + // Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) + // results in no retries. DurationInSeconds *int64 `type:"integer"` } @@ -2664,10 +2841,10 @@ func (s *AuthenticationConfiguration) SetRoleARN(v string) *AuthenticationConfig } // Describes hints for the buffering to perform before delivering data to the -// destination. These options are treated as hints, and therefore Kinesis Data -// Firehose might choose to use different values when it is optimal. The SizeInMBs -// and IntervalInSeconds parameters are optional. However, if specify a value -// for one of them, you must also provide a value for the other. +// destination. These options are treated as hints, and therefore Firehose might +// choose to use different values when it is optimal. The SizeInMBs and IntervalInSeconds +// parameters are optional. However, if specify a value for one of them, you +// must also provide a value for the other. type BufferingHints struct { _ struct{} `type:"structure"` @@ -2857,7 +3034,7 @@ type CopyCommand struct { // Optional parameters to use with the Amazon Redshift COPY command. For more // information, see the "Optional Parameters" section of Amazon Redshift COPY // command (https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some - // possible examples that would apply to Kinesis Data Firehose are as follows: + // possible examples that would apply to Firehose are as follows: // // delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and // compressed using lzop. @@ -3239,13 +3416,12 @@ func (s *CreateDeliveryStreamOutput) SetDeliveryStreamARN(v string) *CreateDeliv return s } -// Specifies that you want Kinesis Data Firehose to convert data from the JSON -// format to the Parquet or ORC format before writing it to Amazon S3. Kinesis -// Data Firehose uses the serializer and deserializer that you specify, in addition -// to the column information from the Amazon Web Services Glue table, to deserialize -// your input data from JSON and then serialize it to the Parquet or ORC format. -// For more information, see Kinesis Data Firehose Record Format Conversion -// (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html). +// Specifies that you want Firehose to convert data from the JSON format to +// the Parquet or ORC format before writing it to Amazon S3. Firehose uses the +// serializer and deserializer that you specify, in addition to the column information +// from the Amazon Web Services Glue table, to deserialize your input data from +// JSON and then serialize it to the Parquet or ORC format. For more information, +// see Firehose Record Format Conversion (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html). type DataFormatConversionConfiguration struct { _ struct{} `type:"structure"` @@ -3253,14 +3429,13 @@ type DataFormatConversionConfiguration struct { // while preserving the configuration details. Enabled *bool `type:"boolean"` - // Specifies the deserializer that you want Kinesis Data Firehose to use to - // convert the format of your data from JSON. This parameter is required if - // Enabled is set to true. + // Specifies the deserializer that you want Firehose to use to convert the format + // of your data from JSON. This parameter is required if Enabled is set to true. InputFormatConfiguration *InputFormatConfiguration `type:"structure"` - // Specifies the serializer that you want Kinesis Data Firehose to use to convert - // the format of your data to the Parquet or ORC format. This parameter is required - // if Enabled is set to true. + // Specifies the serializer that you want Firehose to use to convert the format + // of your data to the Parquet or ORC format. This parameter is required if + // Enabled is set to true. OutputFormatConfiguration *OutputFormatConfiguration `type:"structure"` // Specifies the Amazon Web Services Glue Data Catalog table that contains the @@ -3333,14 +3508,14 @@ func (s *DataFormatConversionConfiguration) SetSchemaConfiguration(v *SchemaConf type DeleteDeliveryStreamInput struct { _ struct{} `type:"structure"` - // Set this to true if you want to delete the delivery stream even if Kinesis - // Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose - // might be unable to retire the grant due to a customer error, such as when - // the CMK or the grant are in an invalid state. If you force deletion, you - // can then use the RevokeGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html) - // operation to revoke the grant you gave to Kinesis Data Firehose. If a failure - // to retire the grant happens due to an Amazon Web Services KMS issue, Kinesis - // Data Firehose keeps retrying the delete operation. + // Set this to true if you want to delete the delivery stream even if Firehose + // is unable to retire the grant for the CMK. Firehose might be unable to retire + // the grant due to a customer error, such as when the CMK or the grant are + // in an invalid state. If you force deletion, you can then use the RevokeGrant + // (https://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html) + // operation to revoke the grant you gave to Firehose. If a failure to retire + // the grant happens due to an Amazon Web Services KMS issue, Firehose keeps + // retrying the delete operation. // // The default value is false. AllowForceDelete *bool `type:"boolean"` @@ -3658,30 +3833,29 @@ type DeliveryStreamEncryptionConfigurationInput struct { // If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource // Name (ARN) of the CMK. If you set KeyType to Amazon Web Services_OWNED_CMK, - // Kinesis Data Firehose uses a service-account CMK. + // Firehose uses a service-account CMK. KeyARN *string `min:"1" type:"string"` // Indicates the type of customer master key (CMK) to use for encryption. The // default setting is Amazon Web Services_OWNED_CMK. For more information about // CMKs, see Customer Master Keys (CMKs) (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys). // When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with - // KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon - // KMS operation CreateGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateGrant.html) - // to create a grant that allows the Kinesis Data Firehose service to use the - // customer managed CMK to perform encryption and decryption. Kinesis Data Firehose - // manages that grant. + // KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation + // CreateGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateGrant.html) + // to create a grant that allows the Firehose service to use the customer managed + // CMK to perform encryption and decryption. Firehose manages that grant. // // When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery - // stream that is encrypted with a customer managed CMK, Kinesis Data Firehose - // schedules the grant it had on the old CMK for retirement. + // stream that is encrypted with a customer managed CMK, Firehose schedules + // the grant it had on the old CMK for retirement. // // You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery // streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation - // exceeds this limit, Kinesis Data Firehose throws a LimitExceededException. + // exceeds this limit, Firehose throws a LimitExceededException. // - // To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose - // doesn't support asymmetric CMKs. For information about symmetric and asymmetric - // CMKs, see About Symmetric and Asymmetric CMKs (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html) + // To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't support + // asymmetric CMKs. For information about symmetric and asymmetric CMKs, see + // About Symmetric and Asymmetric CMKs (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html) // in the Amazon Web Services Key Management Service developer guide. // // KeyType is a required field @@ -3743,7 +3917,7 @@ type DescribeDeliveryStreamInput struct { DeliveryStreamName *string `min:"1" type:"string" required:"true"` // The ID of the destination to start returning the destination information. - // Kinesis Data Firehose supports one destination per delivery stream. + // Firehose supports one destination per delivery stream. ExclusiveStartDestinationId *string `min:"1" type:"string"` // The limit on the number of destinations to return. You can have one destination @@ -3842,26 +4016,26 @@ func (s *DescribeDeliveryStreamOutput) SetDeliveryStreamDescription(v *DeliveryS return s } -// The deserializer you want Kinesis Data Firehose to use for converting the -// input data from JSON. Kinesis Data Firehose then serializes the data to its -// final format using the Serializer. Kinesis Data Firehose supports two types -// of deserializers: the Apache Hive JSON SerDe (https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-JSON) +// The deserializer you want Firehose to use for converting the input data from +// JSON. Firehose then serializes the data to its final format using the Serializer. +// Firehose supports two types of deserializers: the Apache Hive JSON SerDe +// (https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-JSON) // and the OpenX JSON SerDe (https://github.com/rcongiu/Hive-JSON-Serde). type Deserializer struct { _ struct{} `type:"structure"` - // The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing + // The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing // data, which means converting it from the JSON format in preparation for serializing // it to the Parquet or ORC format. This is one of two deserializers you can // choose, depending on which one offers the functionality you need. The other // option is the OpenX SerDe. HiveJsonSerDe *HiveJsonSerDe `type:"structure"` - // The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which - // means converting it from the JSON format in preparation for serializing it - // to the Parquet or ORC format. This is one of two deserializers you can choose, - // depending on which one offers the functionality you need. The other option - // is the native Hive / HCatalog JsonSerDe. + // The OpenX SerDe. Used by Firehose for deserializing data, which means converting + // it from the JSON format in preparation for serializing it to the Parquet + // or ORC format. This is one of two deserializers you can choose, depending + // on which one offers the functionality you need. The other option is the native + // Hive / HCatalog JsonSerDe. OpenXJsonSerDe *OpenXJsonSerDe `type:"structure"` } @@ -4011,25 +4185,24 @@ func (s *DestinationDescription) SetSplunkDestinationDescription(v *SplunkDestin } // Indicates the method for setting up document ID. The supported methods are -// Kinesis Data Firehose generated document ID and OpenSearch Service generated -// document ID. +// Firehose generated document ID and OpenSearch Service generated document +// ID. type DocumentIdOptions struct { _ struct{} `type:"structure"` - // When the FIREHOSE_DEFAULT option is chosen, Kinesis Data Firehose generates - // a unique document ID for each record based on a unique internal identifier. - // The generated document ID is stable across multiple delivery attempts, which - // helps prevent the same record from being indexed multiple times with different - // document IDs. - // - // When the NO_DOCUMENT_ID option is chosen, Kinesis Data Firehose does not - // include any document IDs in the requests it sends to the Amazon OpenSearch - // Service. This causes the Amazon OpenSearch Service domain to generate document - // IDs. In case of multiple delivery attempts, this may cause the same record - // to be indexed more than once with different document IDs. This option enables - // write-heavy operations, such as the ingestion of logs and observability data, - // to consume less resources in the Amazon OpenSearch Service domain, resulting - // in improved performance. + // When the FIREHOSE_DEFAULT option is chosen, Firehose generates a unique document + // ID for each record based on a unique internal identifier. The generated document + // ID is stable across multiple delivery attempts, which helps prevent the same + // record from being indexed multiple times with different document IDs. + // + // When the NO_DOCUMENT_ID option is chosen, Firehose does not include any document + // IDs in the requests it sends to the Amazon OpenSearch Service. This causes + // the Amazon OpenSearch Service domain to generate document IDs. In case of + // multiple delivery attempts, this may cause the same record to be indexed + // more than once with different document IDs. This option enables write-heavy + // operations, such as the ingestion of logs and observability data, to consume + // less resources in the Amazon OpenSearch Service domain, resulting in improved + // performance. // // DefaultDocumentIdFormat is a required field DefaultDocumentIdFormat *string `type:"string" required:"true" enum:"DefaultDocumentIdFormat"` @@ -4078,12 +4251,12 @@ func (s *DocumentIdOptions) SetDefaultDocumentIdFormat(v string) *DocumentIdOpti type DynamicPartitioningConfiguration struct { _ struct{} `type:"structure"` - // Specifies that the dynamic partitioning is enabled for this Kinesis Data - // Firehose delivery stream. + // Specifies that the dynamic partitioning is enabled for this Firehose delivery + // stream. Enabled *bool `type:"boolean"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver data - // to an Amazon S3 prefix. + // The retry behavior in case Firehose is unable to deliver data to an Amazon + // S3 prefix. RetryOptions *RetryOptions `type:"structure"` } @@ -4194,8 +4367,8 @@ type ElasticsearchDestinationConfiguration struct { ClusterEndpoint *string `min:"1" type:"string"` // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document + // ID. DocumentIdOptions *DocumentIdOptions `type:"structure"` // The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeDomain, @@ -4220,14 +4393,13 @@ type ElasticsearchDestinationConfiguration struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon ES. The default value is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // ES. The default value is 300 (5 minutes). RetryOptions *ElasticsearchRetryOptions `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Amazon ES Configuration API and for indexing documents. - // For more information, see Grant Kinesis Data Firehose Access to an Amazon - // S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose + // for calling the Amazon ES Configuration API and for indexing documents. For + // more information, see Grant Firehose Access to an Amazon S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) // and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // @@ -4235,12 +4407,12 @@ type ElasticsearchDestinationConfiguration struct { RoleARN *string `min:"1" type:"string" required:"true"` // Defines how documents should be delivered to Amazon S3. When it is set to - // FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could - // not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ - // appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose - // delivers all incoming records to Amazon S3, and also writes failed documents - // with AmazonOpenSearchService-failed/ appended to the prefix. For more information, - // see Amazon S3 Backup for the Amazon ES Destination (https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup). + // FailedDocumentsOnly, Firehose writes any documents that could not be indexed + // to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ + // appended to the key prefix. When set to AllDocuments, Firehose delivers all + // incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ + // appended to the prefix. For more information, see Amazon S3 Backup for the + // Amazon ES Destination (https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup). // Default value is FailedDocumentsOnly. // // You can't change this backup mode after you create the delivery stream. @@ -4253,8 +4425,7 @@ type ElasticsearchDestinationConfiguration struct { // The Elasticsearch type name. For Elasticsearch 6.x, there can be only one // type per index. If you try to specify a new type for an existing index that - // already has another type, Kinesis Data Firehose returns an error during run - // time. + // already has another type, Firehose returns an error during run time. // // For Elasticsearch 7.x, don't specify a TypeName. TypeName *string `type:"string"` @@ -4431,21 +4602,20 @@ type ElasticsearchDestinationDescription struct { // The Amazon CloudWatch logging options. CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` - // The endpoint to use when communicating with the cluster. Kinesis Data Firehose - // uses either this ClusterEndpoint or the DomainARN field to send data to Amazon - // ES. + // The endpoint to use when communicating with the cluster. Firehose uses either + // this ClusterEndpoint or the DomainARN field to send data to Amazon ES. ClusterEndpoint *string `min:"1" type:"string"` // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document + // ID. DocumentIdOptions *DocumentIdOptions `type:"structure"` // The ARN of the Amazon ES domain. For more information, see Amazon Resource // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // - // Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data - // to Amazon ES. + // Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon + // ES. DomainARN *string `min:"1" type:"string"` // The Elasticsearch index name. @@ -4599,8 +4769,8 @@ type ElasticsearchDestinationUpdate struct { ClusterEndpoint *string `min:"1" type:"string"` // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document + // ID. DocumentIdOptions *DocumentIdOptions `type:"structure"` // The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeDomain, @@ -4623,14 +4793,13 @@ type ElasticsearchDestinationUpdate struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon ES. The default value is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // ES. The default value is 300 (5 minutes). RetryOptions *ElasticsearchRetryOptions `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Amazon ES Configuration API and for indexing documents. - // For more information, see Grant Kinesis Data Firehose Access to an Amazon - // S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose + // for calling the Amazon ES Configuration API and for indexing documents. For + // more information, see Grant Firehose Access to an Amazon S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) // and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). RoleARN *string `min:"1" type:"string"` @@ -4640,12 +4809,12 @@ type ElasticsearchDestinationUpdate struct { // The Elasticsearch type name. For Elasticsearch 6.x, there can be only one // type per index. If you try to specify a new type for an existing index that - // already has another type, Kinesis Data Firehose returns an error during runtime. + // already has another type, Firehose returns an error during runtime. // // If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery - // stream, Kinesis Data Firehose still delivers data to Elasticsearch with the - // old index name and type name. If you want to update your delivery stream - // with a new index name, provide an empty string for TypeName. + // stream, Firehose still delivers data to Elasticsearch with the old index + // name and type name. If you want to update your delivery stream with a new + // index name, provide an empty string for TypeName. TypeName *string `type:"string"` } @@ -4781,16 +4950,15 @@ func (s *ElasticsearchDestinationUpdate) SetTypeName(v string) *ElasticsearchDes return s } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to Amazon ES. +// Configures retry behavior in case Firehose is unable to deliver documents +// to Amazon ES. type ElasticsearchRetryOptions struct { _ struct{} `type:"structure"` // After an initial failure to deliver to Amazon ES, the total amount of time - // during which Kinesis Data Firehose retries delivery (including the first - // attempt). After this time has elapsed, the failed documents are written to - // Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) - // results in no retries. + // during which Firehose retries delivery (including the first attempt). After + // this time has elapsed, the failed documents are written to Amazon S3. Default + // value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries. DurationInSeconds *int64 `type:"integer"` } @@ -4894,6 +5062,9 @@ type ExtendedS3DestinationConfiguration struct { // The compression format. If no value is specified, the default is UNCOMPRESSED. CompressionFormat *string `type:"string" enum:"CompressionFormat"` + // The time zone you prefer. UTC is the default. + CustomTimeZone *string `type:"string"` + // The serializer, deserializer, and schema for converting data from the JSON // format to the Parquet or ORC format before writing it to Amazon S3. DataFormatConversionConfiguration *DataFormatConversionConfiguration `type:"structure"` @@ -4907,12 +5078,15 @@ type ExtendedS3DestinationConfiguration struct { // encryption. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // A prefix that Kinesis Data Firehose evaluates and adds to failed records - // before writing them to S3. This prefix appears immediately following the - // bucket name. For information about how to specify this prefix, see Custom - // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). + // A prefix that Firehose evaluates and adds to failed records before writing + // them to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon + // S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` + // Specify a file extension. It will override the default file extension + FileExtension *string `type:"string"` + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered // Amazon S3 files. You can also specify a custom prefix, as described in Custom // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). @@ -5026,6 +5200,12 @@ func (s *ExtendedS3DestinationConfiguration) SetCompressionFormat(v string) *Ext return s } +// SetCustomTimeZone sets the CustomTimeZone field's value. +func (s *ExtendedS3DestinationConfiguration) SetCustomTimeZone(v string) *ExtendedS3DestinationConfiguration { + s.CustomTimeZone = &v + return s +} + // SetDataFormatConversionConfiguration sets the DataFormatConversionConfiguration field's value. func (s *ExtendedS3DestinationConfiguration) SetDataFormatConversionConfiguration(v *DataFormatConversionConfiguration) *ExtendedS3DestinationConfiguration { s.DataFormatConversionConfiguration = v @@ -5050,6 +5230,12 @@ func (s *ExtendedS3DestinationConfiguration) SetErrorOutputPrefix(v string) *Ext return s } +// SetFileExtension sets the FileExtension field's value. +func (s *ExtendedS3DestinationConfiguration) SetFileExtension(v string) *ExtendedS3DestinationConfiguration { + s.FileExtension = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ExtendedS3DestinationConfiguration) SetPrefix(v string) *ExtendedS3DestinationConfiguration { s.Prefix = &v @@ -5103,6 +5289,9 @@ type ExtendedS3DestinationDescription struct { // CompressionFormat is a required field CompressionFormat *string `type:"string" required:"true" enum:"CompressionFormat"` + // The time zone you prefer. UTC is the default. + CustomTimeZone *string `type:"string"` + // The serializer, deserializer, and schema for converting data from the JSON // format to the Parquet or ORC format before writing it to Amazon S3. DataFormatConversionConfiguration *DataFormatConversionConfiguration `type:"structure"` @@ -5118,12 +5307,15 @@ type ExtendedS3DestinationDescription struct { // EncryptionConfiguration is a required field EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"` - // A prefix that Kinesis Data Firehose evaluates and adds to failed records - // before writing them to S3. This prefix appears immediately following the - // bucket name. For information about how to specify this prefix, see Custom - // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). + // A prefix that Firehose evaluates and adds to failed records before writing + // them to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon + // S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` + // Specify a file extension. It will override the default file extension + FileExtension *string `type:"string"` + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered // Amazon S3 files. You can also specify a custom prefix, as described in Custom // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). @@ -5188,6 +5380,12 @@ func (s *ExtendedS3DestinationDescription) SetCompressionFormat(v string) *Exten return s } +// SetCustomTimeZone sets the CustomTimeZone field's value. +func (s *ExtendedS3DestinationDescription) SetCustomTimeZone(v string) *ExtendedS3DestinationDescription { + s.CustomTimeZone = &v + return s +} + // SetDataFormatConversionConfiguration sets the DataFormatConversionConfiguration field's value. func (s *ExtendedS3DestinationDescription) SetDataFormatConversionConfiguration(v *DataFormatConversionConfiguration) *ExtendedS3DestinationDescription { s.DataFormatConversionConfiguration = v @@ -5212,6 +5410,12 @@ func (s *ExtendedS3DestinationDescription) SetErrorOutputPrefix(v string) *Exten return s } +// SetFileExtension sets the FileExtension field's value. +func (s *ExtendedS3DestinationDescription) SetFileExtension(v string) *ExtendedS3DestinationDescription { + s.FileExtension = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ExtendedS3DestinationDescription) SetPrefix(v string) *ExtendedS3DestinationDescription { s.Prefix = &v @@ -5259,6 +5463,9 @@ type ExtendedS3DestinationUpdate struct { // The compression format. If no value is specified, the default is UNCOMPRESSED. CompressionFormat *string `type:"string" enum:"CompressionFormat"` + // The time zone you prefer. UTC is the default. + CustomTimeZone *string `type:"string"` + // The serializer, deserializer, and schema for converting data from the JSON // format to the Parquet or ORC format before writing it to Amazon S3. DataFormatConversionConfiguration *DataFormatConversionConfiguration `type:"structure"` @@ -5272,12 +5479,15 @@ type ExtendedS3DestinationUpdate struct { // encryption. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // A prefix that Kinesis Data Firehose evaluates and adds to failed records - // before writing them to S3. This prefix appears immediately following the - // bucket name. For information about how to specify this prefix, see Custom - // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). + // A prefix that Firehose evaluates and adds to failed records before writing + // them to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon + // S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` + // Specify a file extension. It will override the default file extension + FileExtension *string `type:"string"` + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered // Amazon S3 files. You can also specify a custom prefix, as described in Custom // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). @@ -5382,6 +5592,12 @@ func (s *ExtendedS3DestinationUpdate) SetCompressionFormat(v string) *ExtendedS3 return s } +// SetCustomTimeZone sets the CustomTimeZone field's value. +func (s *ExtendedS3DestinationUpdate) SetCustomTimeZone(v string) *ExtendedS3DestinationUpdate { + s.CustomTimeZone = &v + return s +} + // SetDataFormatConversionConfiguration sets the DataFormatConversionConfiguration field's value. func (s *ExtendedS3DestinationUpdate) SetDataFormatConversionConfiguration(v *DataFormatConversionConfiguration) *ExtendedS3DestinationUpdate { s.DataFormatConversionConfiguration = v @@ -5406,6 +5622,12 @@ func (s *ExtendedS3DestinationUpdate) SetErrorOutputPrefix(v string) *ExtendedS3 return s } +// SetFileExtension sets the FileExtension field's value. +func (s *ExtendedS3DestinationUpdate) SetFileExtension(v string) *ExtendedS3DestinationUpdate { + s.FileExtension = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ExtendedS3DestinationUpdate) SetPrefix(v string) *ExtendedS3DestinationUpdate { s.Prefix = &v @@ -5483,7 +5705,95 @@ func (s *FailureDescription) SetType(v string) *FailureDescription { return s } -// The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing +type GetKinesisStreamInput struct { + _ struct{} `type:"structure"` + + // DeliveryStreamARN is a required field + DeliveryStreamARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetKinesisStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetKinesisStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetKinesisStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetKinesisStreamInput"} + if s.DeliveryStreamARN == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamARN")) + } + if s.DeliveryStreamARN != nil && len(*s.DeliveryStreamARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryStreamARN sets the DeliveryStreamARN field's value. +func (s *GetKinesisStreamInput) SetDeliveryStreamARN(v string) *GetKinesisStreamInput { + s.DeliveryStreamARN = &v + return s +} + +type GetKinesisStreamOutput struct { + _ struct{} `type:"structure"` + + // CredentialsForReadingKinesisStream is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetKinesisStreamOutput's + // String and GoString methods. + CredentialsForReadingKinesisStream *SessionCredentials `type:"structure" sensitive:"true"` + + KinesisStreamARN *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetKinesisStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetKinesisStreamOutput) GoString() string { + return s.String() +} + +// SetCredentialsForReadingKinesisStream sets the CredentialsForReadingKinesisStream field's value. +func (s *GetKinesisStreamOutput) SetCredentialsForReadingKinesisStream(v *SessionCredentials) *GetKinesisStreamOutput { + s.CredentialsForReadingKinesisStream = v + return s +} + +// SetKinesisStreamARN sets the KinesisStreamARN field's value. +func (s *GetKinesisStreamOutput) SetKinesisStreamARN(v string) *GetKinesisStreamOutput { + s.KinesisStreamARN = &v + return s +} + +// The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing // data, which means converting it from the JSON format in preparation for serializing // it to the Parquet or ORC format. This is one of two deserializers you can // choose, depending on which one offers the functionality you need. The other @@ -5491,12 +5801,12 @@ func (s *FailureDescription) SetType(v string) *FailureDescription { type HiveJsonSerDe struct { _ struct{} `type:"structure"` - // Indicates how you want Kinesis Data Firehose to parse the date and timestamps - // that may be present in your input data JSON. To specify these format strings, - // follow the pattern syntax of JodaTime's DateTimeFormat format strings. For - // more information, see Class DateTimeFormat (https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html). + // Indicates how you want Firehose to parse the date and timestamps that may + // be present in your input data JSON. To specify these format strings, follow + // the pattern syntax of JodaTime's DateTimeFormat format strings. For more + // information, see Class DateTimeFormat (https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html). // You can also use the special value millis to parse timestamps in epoch milliseconds. - // If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf + // If you don't specify a format, Firehose uses java.sql.Timestamp::valueOf // by default. TimestampFormats []*string `type:"list"` } @@ -5526,10 +5836,10 @@ func (s *HiveJsonSerDe) SetTimestampFormats(v []*string) *HiveJsonSerDe { } // Describes the buffering options that can be applied before data is delivered -// to the HTTP endpoint destination. Kinesis Data Firehose treats these options -// as hints, and it might choose to use more optimal values. The SizeInMBs and -// IntervalInSeconds parameters are optional. However, if specify a value for -// one of them, you must also provide a value for the other. +// to the HTTP endpoint destination. Firehose treats these options as hints, +// and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds +// parameters are optional. However, if specify a value for one of them, you +// must also provide a value for the other. type HttpEndpointBufferingHints struct { _ struct{} `type:"structure"` @@ -5796,10 +6106,10 @@ type HttpEndpointDestinationConfiguration struct { _ struct{} `type:"structure"` // The buffering options that can be used before data is delivered to the specified - // destination. Kinesis Data Firehose treats these options as hints, and it - // might choose to use more optimal values. The SizeInMBs and IntervalInSeconds - // parameters are optional. However, if you specify a value for one of them, - // you must also provide a value for the other. + // destination. Firehose treats these options as hints, and it might choose + // to use more optimal values. The SizeInMBs and IntervalInSeconds parameters + // are optional. However, if you specify a value for one of them, you must also + // provide a value for the other. BufferingHints *HttpEndpointBufferingHints `type:"structure"` // Describes the Amazon CloudWatch logging options for your delivery stream. @@ -5817,19 +6127,19 @@ type HttpEndpointDestinationConfiguration struct { // the destination. RequestConfiguration *HttpEndpointRequestConfiguration `type:"structure"` - // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver - // data to the specified HTTP endpoint destination, or if it doesn't receive - // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + // Describes the retry behavior in case Firehose is unable to deliver data to + // the specified HTTP endpoint destination, or if it doesn't receive a valid + // acknowledgment of receipt from the specified HTTP endpoint destination. RetryOptions *HttpEndpointRetryOptions `type:"structure"` - // Kinesis Data Firehose uses this IAM role for all the permissions that the - // delivery stream needs. + // Firehose uses this IAM role for all the permissions that the delivery stream + // needs. RoleARN *string `min:"1" type:"string"` - // Describes the S3 bucket backup options for the data that Kinesis Data Firehose - // delivers to the HTTP endpoint destination. You can back up all documents - // (AllData) or only the documents that Kinesis Data Firehose could not deliver - // to the specified HTTP endpoint destination (FailedDataOnly). + // Describes the S3 bucket backup options for the data that Firehose delivers + // to the HTTP endpoint destination. You can back up all documents (AllData) + // or only the documents that Firehose could not deliver to the specified HTTP + // endpoint destination (FailedDataOnly). S3BackupMode *string `type:"string" enum:"HttpEndpointS3BackupMode"` // Describes the configuration of a destination in Amazon S3. @@ -5959,10 +6269,10 @@ type HttpEndpointDestinationDescription struct { _ struct{} `type:"structure"` // Describes buffering options that can be applied to the data before it is - // delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats - // these options as hints, and it might choose to use more optimal values. The - // SizeInMBs and IntervalInSeconds parameters are optional. However, if specify - // a value for one of them, you must also provide a value for the other. + // delivered to the HTTPS endpoint destination. Firehose teats these options + // as hints, and it might choose to use more optimal values. The SizeInMBs and + // IntervalInSeconds parameters are optional. However, if specify a value for + // one of them, you must also provide a value for the other. BufferingHints *HttpEndpointBufferingHints `type:"structure"` // Describes the Amazon CloudWatch logging options for your delivery stream. @@ -5977,19 +6287,19 @@ type HttpEndpointDestinationDescription struct { // The configuration of request sent to the HTTP endpoint specified as the destination. RequestConfiguration *HttpEndpointRequestConfiguration `type:"structure"` - // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver - // data to the specified HTTP endpoint destination, or if it doesn't receive - // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + // Describes the retry behavior in case Firehose is unable to deliver data to + // the specified HTTP endpoint destination, or if it doesn't receive a valid + // acknowledgment of receipt from the specified HTTP endpoint destination. RetryOptions *HttpEndpointRetryOptions `type:"structure"` - // Kinesis Data Firehose uses this IAM role for all the permissions that the - // delivery stream needs. + // Firehose uses this IAM role for all the permissions that the delivery stream + // needs. RoleARN *string `min:"1" type:"string"` // Describes the S3 bucket backup options for the data that Kinesis Firehose // delivers to the HTTP endpoint destination. You can back up all documents - // (AllData) or only the documents that Kinesis Data Firehose could not deliver - // to the specified HTTP endpoint destination (FailedDataOnly). + // (AllData) or only the documents that Firehose could not deliver to the specified + // HTTP endpoint destination (FailedDataOnly). S3BackupMode *string `type:"string" enum:"HttpEndpointS3BackupMode"` // Describes a destination in Amazon S3. @@ -6073,10 +6383,10 @@ type HttpEndpointDestinationUpdate struct { _ struct{} `type:"structure"` // Describes buffering options that can be applied to the data before it is - // delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats - // these options as hints, and it might choose to use more optimal values. The - // SizeInMBs and IntervalInSeconds parameters are optional. However, if specify - // a value for one of them, you must also provide a value for the other. + // delivered to the HTTPS endpoint destination. Firehose teats these options + // as hints, and it might choose to use more optimal values. The SizeInMBs and + // IntervalInSeconds parameters are optional. However, if specify a value for + // one of them, you must also provide a value for the other. BufferingHints *HttpEndpointBufferingHints `type:"structure"` // Describes the Amazon CloudWatch logging options for your delivery stream. @@ -6092,19 +6402,19 @@ type HttpEndpointDestinationUpdate struct { // destination. RequestConfiguration *HttpEndpointRequestConfiguration `type:"structure"` - // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver - // data to the specified HTTP endpoint destination, or if it doesn't receive - // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + // Describes the retry behavior in case Firehose is unable to deliver data to + // the specified HTTP endpoint destination, or if it doesn't receive a valid + // acknowledgment of receipt from the specified HTTP endpoint destination. RetryOptions *HttpEndpointRetryOptions `type:"structure"` - // Kinesis Data Firehose uses this IAM role for all the permissions that the - // delivery stream needs. + // Firehose uses this IAM role for all the permissions that the delivery stream + // needs. RoleARN *string `min:"1" type:"string"` // Describes the S3 bucket backup options for the data that Kinesis Firehose // delivers to the HTTP endpoint destination. You can back up all documents - // (AllData) or only the documents that Kinesis Data Firehose could not deliver - // to the specified HTTP endpoint destination (FailedDataOnly). + // (AllData) or only the documents that Firehose could not deliver to the specified + // HTTP endpoint destination (FailedDataOnly). S3BackupMode *string `type:"string" enum:"HttpEndpointS3BackupMode"` // Describes an update for a destination in Amazon S3. @@ -6228,9 +6538,9 @@ type HttpEndpointRequestConfiguration struct { // Describes the metadata sent to the HTTP endpoint destination. CommonAttributes []*HttpEndpointCommonAttribute `type:"list"` - // Kinesis Data Firehose uses the content encoding to compress the body of a - // request before sending the request to the destination. For more information, - // see Content-Encoding (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding) + // Firehose uses the content encoding to compress the body of a request before + // sending the request to the destination. For more information, see Content-Encoding + // (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding) // in MDN Web Docs, the official Mozilla documentation. ContentEncoding *string `type:"string" enum:"ContentEncoding"` } @@ -6285,17 +6595,16 @@ func (s *HttpEndpointRequestConfiguration) SetContentEncoding(v string) *HttpEnd return s } -// Describes the retry behavior in case Kinesis Data Firehose is unable to deliver -// data to the specified HTTP endpoint destination, or if it doesn't receive -// a valid acknowledgment of receipt from the specified HTTP endpoint destination. +// Describes the retry behavior in case Firehose is unable to deliver data to +// the specified HTTP endpoint destination, or if it doesn't receive a valid +// acknowledgment of receipt from the specified HTTP endpoint destination. type HttpEndpointRetryOptions struct { _ struct{} `type:"structure"` - // The total amount of time that Kinesis Data Firehose spends on retries. This - // duration starts after the initial attempt to send data to the custom destination - // via HTTPS endpoint fails. It doesn't include the periods during which Kinesis - // Data Firehose waits for acknowledgment from the specified destination after - // each attempt. + // The total amount of time that Firehose spends on retries. This duration starts + // after the initial attempt to send data to the custom destination via HTTPS + // endpoint fails. It doesn't include the periods during which Firehose waits + // for acknowledgment from the specified destination after each attempt. DurationInSeconds *int64 `type:"integer"` } @@ -6423,10 +6732,10 @@ func (s *InvalidArgumentException) RequestID() string { return s.RespMetadata.RequestID } -// Kinesis Data Firehose throws this exception when an attempt to put records -// or to start or stop delivery stream encryption fails. This happens when the -// KMS service throws one of the following exception types: AccessDeniedException, -// InvalidStateException, DisabledException, or NotFoundException. +// Firehose throws this exception when an attempt to put records or to start +// or stop delivery stream encryption fails. This happens when the KMS service +// throws one of the following exception types: AccessDeniedException, InvalidStateException, +// DisabledException, or NotFoundException. type InvalidKMSResourceException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -6559,17 +6868,13 @@ func (s *InvalidSourceException) RequestID() string { return s.RespMetadata.RequestID } -// Describes an encryption key for a destination in Amazon S3. -type KMSEncryptionConfig struct { - _ struct{} `type:"structure"` +type InvalidStreamTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The Amazon Resource Name (ARN) of the encryption key. Must belong to the - // same Amazon Web Services Region as the destination Amazon S3 bucket. For - // more information, see Amazon Resource Names (ARNs) and Amazon Web Services - // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // - // AWSKMSKeyARN is a required field - AWSKMSKeyARN *string `min:"1" type:"string" required:"true"` + Message_ *string `locationName:"message" type:"string"` + + Source *string `locationName:"source" type:"string"` } // String returns the string representation. @@ -6577,7 +6882,7 @@ type KMSEncryptionConfig struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s KMSEncryptionConfig) String() string { +func (s InvalidStreamTypeException) String() string { return awsutil.Prettify(s) } @@ -6586,12 +6891,81 @@ func (s KMSEncryptionConfig) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s KMSEncryptionConfig) GoString() string { +func (s InvalidStreamTypeException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *KMSEncryptionConfig) Validate() error { +func newErrorInvalidStreamTypeException(v protocol.ResponseMetadata) error { + return &InvalidStreamTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidStreamTypeException) Code() string { + return "InvalidStreamTypeException" +} + +// Message returns the exception's message. +func (s *InvalidStreamTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidStreamTypeException) OrigErr() error { + return nil +} + +func (s *InvalidStreamTypeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidStreamTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidStreamTypeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes an encryption key for a destination in Amazon S3. +type KMSEncryptionConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the encryption key. Must belong to the + // same Amazon Web Services Region as the destination Amazon S3 bucket. For + // more information, see Amazon Resource Names (ARNs) and Amazon Web Services + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // + // AWSKMSKeyARN is a required field + AWSKMSKeyARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KMSEncryptionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KMSEncryptionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KMSEncryptionConfig) Validate() error { invalidParams := request.ErrInvalidParams{Context: "KMSEncryptionConfig"} if s.AWSKMSKeyARN == nil { invalidParams.Add(request.NewErrParamRequired("AWSKMSKeyARN")) @@ -6683,13 +7057,13 @@ func (s *KinesisStreamSourceConfiguration) SetRoleARN(v string) *KinesisStreamSo return s } -// Details about a Kinesis data stream used as the source for a Kinesis Data -// Firehose delivery stream. +// Details about a Kinesis data stream used as the source for a Firehose delivery +// stream. type KinesisStreamSourceDescription struct { _ struct{} `type:"structure"` - // Kinesis Data Firehose starts retrieving records from the Kinesis data stream - // starting with this timestamp. + // Firehose starts retrieving records from the Kinesis data stream starting + // with this timestamp. DeliveryStartTimestamp *time.Time `type:"timestamp"` // The Amazon Resource Name (ARN) of the source Kinesis data stream. For more @@ -7132,16 +7506,16 @@ func (s *MSKSourceConfiguration) SetTopicName(v string) *MSKSourceConfiguration return s } -// Details about the Amazon MSK cluster used as the source for a Kinesis Data -// Firehose delivery stream. +// Details about the Amazon MSK cluster used as the source for a Firehose delivery +// stream. type MSKSourceDescription struct { _ struct{} `type:"structure"` // The authentication configuration of the Amazon MSK cluster. AuthenticationConfiguration *AuthenticationConfiguration `type:"structure"` - // Kinesis Data Firehose starts retrieving records from the topic within the - // Amazon MSK cluster starting with this timestamp. + // Firehose starts retrieving records from the topic within the Amazon MSK cluster + // starting with this timestamp. DeliveryStartTimestamp *time.Time `type:"timestamp"` // The ARN of the Amazon MSK cluster. @@ -7193,16 +7567,16 @@ func (s *MSKSourceDescription) SetTopicName(v string) *MSKSourceDescription { return s } -// The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which -// means converting it from the JSON format in preparation for serializing it -// to the Parquet or ORC format. This is one of two deserializers you can choose, -// depending on which one offers the functionality you need. The other option -// is the native Hive / HCatalog JsonSerDe. +// The OpenX SerDe. Used by Firehose for deserializing data, which means converting +// it from the JSON format in preparation for serializing it to the Parquet +// or ORC format. This is one of two deserializers you can choose, depending +// on which one offers the functionality you need. The other option is the native +// Hive / HCatalog JsonSerDe. type OpenXJsonSerDe struct { _ struct{} `type:"structure"` - // When set to true, which is the default, Kinesis Data Firehose converts JSON - // keys to lowercase before deserializing them. + // When set to true, which is the default, Firehose converts JSON keys to lowercase + // before deserializing them. CaseInsensitive *bool `type:"boolean"` // Maps column names to JSON keys that aren't identical to the column names. @@ -7212,10 +7586,10 @@ type OpenXJsonSerDe struct { ColumnToJsonKeyMappings map[string]*string `type:"map"` // When set to true, specifies that the names of the keys include dots and that - // you want Kinesis Data Firehose to replace them with underscores. This is - // useful because Apache Hive does not allow dots in column names. For example, - // if the JSON contains a key whose name is "a.b", you can define the column - // name to be "a_b" when using this option. + // you want Firehose to replace them with underscores. This is useful because + // Apache Hive does not allow dots in column names. For example, if the JSON + // contains a key whose name is "a.b", you can define the column name to be + // "a_b" when using this option. // // The default is false. ConvertDotsInJsonKeysToUnderscores *bool `type:"boolean"` @@ -7264,12 +7638,12 @@ type OrcSerDe struct { // The Hadoop Distributed File System (HDFS) block size. This is useful if you // intend to copy the data from Amazon S3 to HDFS before querying. The default - // is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value - // for padding calculations. + // is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding + // calculations. BlockSizeBytes *int64 `min:"6.7108864e+07" type:"integer"` - // The column names for which you want Kinesis Data Firehose to create bloom - // filters. The default is null. + // The column names for which you want Firehose to create bloom filters. The + // default is null. BloomFilterColumns []*string `type:"list"` // The Bloom filter false positive probability (FPP). The lower the FPP, the @@ -7306,8 +7680,7 @@ type OrcSerDe struct { // to fit within that space. This ensures that no stripe crosses block boundaries // and causes remote reads within a node-local task. // - // Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding - // is false. + // Firehose ignores this parameter when OrcSerDe$EnablePadding is false. PaddingTolerance *float64 `type:"double"` // The number of rows between index entries. The default is 10,000 and the minimum @@ -7416,9 +7789,9 @@ func (s *OrcSerDe) SetStripeSizeBytes(v int64) *OrcSerDe { return s } -// Specifies the serializer that you want Kinesis Data Firehose to use to convert -// the format of your data before it writes it to Amazon S3. This parameter -// is required if Enabled is set to true. +// Specifies the serializer that you want Firehose to use to convert the format +// of your data before it writes it to Amazon S3. This parameter is required +// if Enabled is set to true. type OutputFormatConfiguration struct { _ struct{} `type:"structure"` @@ -7473,8 +7846,8 @@ type ParquetSerDe struct { // The Hadoop Distributed File System (HDFS) block size. This is useful if you // intend to copy the data from Amazon S3 to HDFS before querying. The default - // is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value - // for padding calculations. + // is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding + // calculations. BlockSizeBytes *int64 `min:"6.7108864e+07" type:"integer"` // The compression code to use over data blocks. The possible values are UNCOMPRESSED, @@ -7631,6 +8004,10 @@ func (s *ProcessingConfiguration) SetProcessors(v []*Processor) *ProcessingConfi } // Describes a data processor. +// +// If you want to add a new line delimiter between records in objects that are +// delivered to Amazon S3, choose AppendDelimiterToRecord as a processor type. +// You don’t have to put a processor parameter when you select AppendDelimiterToRecord. type Processor struct { _ struct{} `type:"structure"` @@ -8137,8 +8514,8 @@ type RedshiftDestinationConfiguration struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon Redshift. Default value is 3600 (60 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // Redshift. Default value is 3600 (60 minutes). RetryOptions *RedshiftRetryOptions `type:"structure"` // The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For @@ -8340,8 +8717,8 @@ type RedshiftDestinationDescription struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon Redshift. Default value is 3600 (60 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // Redshift. Default value is 3600 (60 minutes). RetryOptions *RedshiftRetryOptions `type:"structure"` // The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For @@ -8473,8 +8850,8 @@ type RedshiftDestinationUpdate struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon Redshift. Default value is 3600 (60 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // Redshift. Default value is 3600 (60 minutes). RetryOptions *RedshiftRetryOptions `type:"structure"` // The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For @@ -8630,16 +9007,16 @@ func (s *RedshiftDestinationUpdate) SetUsername(v string) *RedshiftDestinationUp return s } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to Amazon Redshift. +// Configures retry behavior in case Firehose is unable to deliver documents +// to Amazon Redshift. type RedshiftRetryOptions struct { _ struct{} `type:"structure"` - // The length of time during which Kinesis Data Firehose retries delivery after - // a failure, starting from the initial request and including the first attempt. - // The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does - // not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery - // attempt takes longer than the current value. + // The length of time during which Firehose retries delivery after a failure, + // starting from the initial request and including the first attempt. The default + // value is 3600 seconds (60 minutes). Firehose does not retry if the value + // of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer + // than the current value. DurationInSeconds *int64 `type:"integer"` } @@ -8797,13 +9174,13 @@ func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } -// The retry behavior in case Kinesis Data Firehose is unable to deliver data -// to an Amazon S3 prefix. +// The retry behavior in case Firehose is unable to deliver data to an Amazon +// S3 prefix. type RetryOptions struct { _ struct{} `type:"structure"` - // The period of time during which Kinesis Data Firehose retries to deliver - // data to the specified Amazon S3 prefix. + // The period of time during which Firehose retries to deliver data to the specified + // Amazon S3 prefix. DurationInSeconds *int64 `type:"integer"` } @@ -8859,10 +9236,10 @@ type S3DestinationConfiguration struct { // encryption. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // A prefix that Kinesis Data Firehose evaluates and adds to failed records - // before writing them to S3. This prefix appears immediately following the - // bucket name. For information about how to specify this prefix, see Custom - // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). + // A prefix that Firehose evaluates and adds to failed records before writing + // them to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon + // S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered @@ -9006,10 +9383,10 @@ type S3DestinationDescription struct { // EncryptionConfiguration is a required field EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"` - // A prefix that Kinesis Data Firehose evaluates and adds to failed records - // before writing them to S3. This prefix appears immediately following the - // bucket name. For information about how to specify this prefix, see Custom - // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). + // A prefix that Firehose evaluates and adds to failed records before writing + // them to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon + // S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered @@ -9117,10 +9494,10 @@ type S3DestinationUpdate struct { // encryption. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // A prefix that Kinesis Data Firehose evaluates and adds to failed records - // before writing them to S3. This prefix appears immediately following the - // bucket name. For information about how to specify this prefix, see Custom - // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). + // A prefix that Firehose evaluates and adds to failed records before writing + // them to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon + // S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html). ErrorOutputPrefix *string `type:"string"` // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered @@ -9226,9 +9603,9 @@ func (s *S3DestinationUpdate) SetRoleARN(v string) *S3DestinationUpdate { return s } -// Specifies the schema to which you want Kinesis Data Firehose to configure -// your data before it writes it to Amazon S3. This parameter is required if -// Enabled is set to true. +// Specifies the schema to which you want Firehose to configure your data before +// it writes it to Amazon S3. This parameter is required if Enabled is set to +// true. type SchemaConfiguration struct { _ struct{} `type:"structure"` @@ -9248,9 +9625,9 @@ type SchemaConfiguration struct { // Region. Region *string `min:"1" type:"string"` - // The role that Kinesis Data Firehose can use to access Amazon Web Services - // Glue. This role must be in the same account you use for Kinesis Data Firehose. - // Cross-account roles aren't allowed. + // The role that Firehose can use to access Amazon Web Services Glue. This role + // must be in the same account you use for Firehose. Cross-account roles aren't + // allowed. // // If the SchemaConfiguration request parameter is used as part of invoking // the CreateDeliveryStream API, then the RoleARN property is required and its @@ -9266,9 +9643,9 @@ type SchemaConfiguration struct { TableName *string `min:"1" type:"string"` // Specifies the table version for the output data schema. If you don't specify - // this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the - // most recent version. This means that any updates to the table are automatically - // picked up. + // this version ID, or if you set it to LATEST, Firehose uses the most recent + // version. This means that any updates to the table are automatically picked + // up. VersionId *string `min:"1" type:"string"` } @@ -9354,9 +9731,9 @@ func (s *SchemaConfiguration) SetVersionId(v string) *SchemaConfiguration { return s } -// The serializer that you want Kinesis Data Firehose to use to convert data -// to the target format before writing it to Amazon S3. Kinesis Data Firehose -// supports two types of serializers: the ORC SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html) +// The serializer that you want Firehose to use to convert data to the target +// format before writing it to Amazon S3. Firehose supports two types of serializers: +// the ORC SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html) // and the Parquet SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.html). type Serializer struct { _ struct{} `type:"structure"` @@ -9423,7 +9800,7 @@ func (s *Serializer) SetParquetSerDe(v *ParquetSerDe) *Serializer { // The service is unavailable. Back off and retry the operation. If you continue // to see the exception, throughput limits for the delivery stream may have // been exceeded. For more information about limits and how to request an increase, -// see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// see Amazon Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). type ServiceUnavailableException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -9488,6 +9865,73 @@ func (s *ServiceUnavailableException) RequestID() string { return s.RespMetadata.RequestID } +type SessionCredentials struct { + _ struct{} `type:"structure" sensitive:"true"` + + // AccessKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // AccessKeyId is a required field + AccessKeyId *string `type:"string" required:"true" sensitive:"true"` + + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` + + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // SessionToken is a required field + SessionToken *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *SessionCredentials) SetAccessKeyId(v string) *SessionCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *SessionCredentials) SetExpiration(v time.Time) *SessionCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *SessionCredentials) SetSecretAccessKey(v string) *SessionCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *SessionCredentials) SetSessionToken(v string) *SessionCredentials { + s.SessionToken = &v + return s +} + // Configure Snowflake destination type SnowflakeDestinationConfiguration struct { _ struct{} `type:"structure"` @@ -9555,8 +9999,8 @@ type SnowflakeDestinationConfiguration struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The time period where Kinesis Data Firehose will retry sending data to the - // chosen HTTP endpoint. + // The time period where Firehose will retry sending data to the chosen HTTP + // endpoint. RetryOptions *SnowflakeRetryOptions `type:"structure"` // The Amazon Resource Name (ARN) of the Snowflake role @@ -9866,8 +10310,8 @@ type SnowflakeDestinationDescription struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The time period where Kinesis Data Firehose will retry sending data to the - // chosen HTTP endpoint. + // The time period where Firehose will retry sending data to the chosen HTTP + // endpoint. RetryOptions *SnowflakeRetryOptions `type:"structure"` // The Amazon Resource Name (ARN) of the Snowflake role @@ -10087,23 +10531,21 @@ type SnowflakeDestinationUpdate struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // Specify how long Kinesis Data Firehose retries sending data to the New Relic - // HTTP endpoint. After sending data, Kinesis Data Firehose first waits for - // an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment - // doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose - // starts the retry duration counter. It keeps retrying until the retry duration - // expires. After that, Kinesis Data Firehose considers it a data delivery failure - // and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data - // Firehose sends data to the HTTP endpoint (either the initial attempt or a - // retry), it restarts the acknowledgement timeout counter and waits for an - // acknowledgement from the HTTP endpoint. Even if the retry duration expires, - // Kinesis Data Firehose still waits for the acknowledgment until it receives - // it or the acknowledgement timeout period is reached. If the acknowledgment - // times out, Kinesis Data Firehose determines whether there's time left in - // the retry counter. If there is time left, it retries again and repeats the - // logic until it receives an acknowledgment or determines that the retry time - // has expired. If you don't want Kinesis Data Firehose to retry sending data, - // set this value to 0. + // Specify how long Firehose retries sending data to the New Relic HTTP endpoint. + // After sending data, Firehose first waits for an acknowledgment from the HTTP + // endpoint. If an error occurs or the acknowledgment doesn’t arrive within + // the acknowledgment timeout period, Firehose starts the retry duration counter. + // It keeps retrying until the retry duration expires. After that, Firehose + // considers it a data delivery failure and backs up the data to your Amazon + // S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either + // the initial attempt or a retry), it restarts the acknowledgement timeout + // counter and waits for an acknowledgement from the HTTP endpoint. Even if + // the retry duration expires, Firehose still waits for the acknowledgment until + // it receives it or the acknowledgement timeout period is reached. If the acknowledgment + // times out, Firehose determines whether there's time left in the retry counter. + // If there is time left, it retries again and repeats the logic until it receives + // an acknowledgment or determines that the retry time has expired. If you don't + // want Firehose to retry sending data, set this value to 0. RetryOptions *SnowflakeRetryOptions `type:"structure"` // The Amazon Resource Name (ARN) of the Snowflake role @@ -10318,28 +10760,26 @@ func (s *SnowflakeDestinationUpdate) SetUser(v string) *SnowflakeDestinationUpda return s } -// Specify how long Kinesis Data Firehose retries sending data to the New Relic -// HTTP endpoint. After sending data, Kinesis Data Firehose first waits for -// an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment -// doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose -// starts the retry duration counter. It keeps retrying until the retry duration -// expires. After that, Kinesis Data Firehose considers it a data delivery failure -// and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data -// Firehose sends data to the HTTP endpoint (either the initial attempt or a -// retry), it restarts the acknowledgement timeout counter and waits for an -// acknowledgement from the HTTP endpoint. Even if the retry duration expires, -// Kinesis Data Firehose still waits for the acknowledgment until it receives -// it or the acknowledgement timeout period is reached. If the acknowledgment -// times out, Kinesis Data Firehose determines whether there's time left in -// the retry counter. If there is time left, it retries again and repeats the -// logic until it receives an acknowledgment or determines that the retry time -// has expired. If you don't want Kinesis Data Firehose to retry sending data, -// set this value to 0. +// Specify how long Firehose retries sending data to the New Relic HTTP endpoint. +// After sending data, Firehose first waits for an acknowledgment from the HTTP +// endpoint. If an error occurs or the acknowledgment doesn’t arrive within +// the acknowledgment timeout period, Firehose starts the retry duration counter. +// It keeps retrying until the retry duration expires. After that, Firehose +// considers it a data delivery failure and backs up the data to your Amazon +// S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either +// the initial attempt or a retry), it restarts the acknowledgement timeout +// counter and waits for an acknowledgement from the HTTP endpoint. Even if +// the retry duration expires, Firehose still waits for the acknowledgment until +// it receives it or the acknowledgement timeout period is reached. If the acknowledgment +// times out, Firehose determines whether there's time left in the retry counter. +// If there is time left, it retries again and repeats the logic until it receives +// an acknowledgment or determines that the retry time has expired. If you don't +// want Firehose to retry sending data, set this value to 0. type SnowflakeRetryOptions struct { _ struct{} `type:"structure"` - // the time period where Kinesis Data Firehose will retry sending data to the - // chosen HTTP endpoint. + // the time period where Firehose will retry sending data to the chosen HTTP + // endpoint. DurationInSeconds *int64 `type:"integer"` } @@ -10482,8 +10922,8 @@ func (s *SnowflakeVpcConfiguration) SetPrivateLinkVpceId(v string) *SnowflakeVpc return s } -// Details about a Kinesis data stream used as the source for a Kinesis Data -// Firehose delivery stream. +// Details about a Kinesis data stream used as the source for a Firehose delivery +// stream. type SourceDescription struct { _ struct{} `type:"structure"` @@ -10593,14 +11033,13 @@ type SplunkDestinationConfiguration struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` - // The amount of time that Kinesis Data Firehose waits to receive an acknowledgment - // from Splunk after it sends it data. At the end of the timeout period, Kinesis - // Data Firehose either tries to send the data again or considers it an error, - // based on your retry settings. + // The amount of time that Firehose waits to receive an acknowledgment from + // Splunk after it sends it data. At the end of the timeout period, Firehose + // either tries to send the data again or considers it an error, based on your + // retry settings. HECAcknowledgmentTimeoutInSeconds *int64 `min:"180" type:"integer"` - // The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends - // your data. + // The HTTP Event Collector (HEC) endpoint to which Firehose sends your data. // // HECEndpoint is a required field HECEndpoint *string `type:"string" required:"true"` @@ -10619,15 +11058,15 @@ type SplunkDestinationConfiguration struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver data - // to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk. + // The retry behavior in case Firehose is unable to deliver data to Splunk, + // or if it doesn't receive an acknowledgment of receipt from Splunk. RetryOptions *SplunkRetryOptions `type:"structure"` // Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly, - // Kinesis Data Firehose writes any data that could not be indexed to the configured - // Amazon S3 destination. When set to AllEvents, Kinesis Data Firehose delivers - // all incoming records to Amazon S3, and also writes failed documents to Amazon - // S3. The default value is FailedEventsOnly. + // Firehose writes any data that could not be indexed to the configured Amazon + // S3 destination. When set to AllEvents, Firehose delivers all incoming records + // to Amazon S3, and also writes failed documents to Amazon S3. The default + // value is FailedEventsOnly. // // You can update this backup mode from FailedEventsOnly to AllEvents. You can't // update it from AllEvents to FailedEventsOnly. @@ -10768,14 +11207,13 @@ type SplunkDestinationDescription struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` - // The amount of time that Kinesis Data Firehose waits to receive an acknowledgment - // from Splunk after it sends it data. At the end of the timeout period, Kinesis - // Data Firehose either tries to send the data again or considers it an error, - // based on your retry settings. + // The amount of time that Firehose waits to receive an acknowledgment from + // Splunk after it sends it data. At the end of the timeout period, Firehose + // either tries to send the data again or considers it an error, based on your + // retry settings. HECAcknowledgmentTimeoutInSeconds *int64 `min:"180" type:"integer"` - // The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends - // your data. + // The HTTP Event Collector (HEC) endpoint to which Firehose sends your data. HECEndpoint *string `type:"string"` // This type can be either "Raw" or "Event." @@ -10787,15 +11225,15 @@ type SplunkDestinationDescription struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver data - // to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk. + // The retry behavior in case Firehose is unable to deliver data to Splunk or + // if it doesn't receive an acknowledgment of receipt from Splunk. RetryOptions *SplunkRetryOptions `type:"structure"` // Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, - // Kinesis Data Firehose writes any data that could not be indexed to the configured - // Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers - // all incoming records to Amazon S3, and also writes failed documents to Amazon - // S3. Default value is FailedDocumentsOnly. + // Firehose writes any data that could not be indexed to the configured Amazon + // S3 destination. When set to AllDocuments, Firehose delivers all incoming + // records to Amazon S3, and also writes failed documents to Amazon S3. Default + // value is FailedDocumentsOnly. S3BackupMode *string `type:"string" enum:"SplunkS3BackupMode"` // The Amazon S3 destination.> @@ -10891,14 +11329,13 @@ type SplunkDestinationUpdate struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` - // The amount of time that Kinesis Data Firehose waits to receive an acknowledgment - // from Splunk after it sends data. At the end of the timeout period, Kinesis - // Data Firehose either tries to send the data again or considers it an error, - // based on your retry settings. + // The amount of time that Firehose waits to receive an acknowledgment from + // Splunk after it sends data. At the end of the timeout period, Firehose either + // tries to send the data again or considers it an error, based on your retry + // settings. HECAcknowledgmentTimeoutInSeconds *int64 `min:"180" type:"integer"` - // The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends - // your data. + // The HTTP Event Collector (HEC) endpoint to which Firehose sends your data. HECEndpoint *string `type:"string"` // This type can be either "Raw" or "Event." @@ -10911,15 +11348,15 @@ type SplunkDestinationUpdate struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration `type:"structure"` - // The retry behavior in case Kinesis Data Firehose is unable to deliver data - // to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk. + // The retry behavior in case Firehose is unable to deliver data to Splunk or + // if it doesn't receive an acknowledgment of receipt from Splunk. RetryOptions *SplunkRetryOptions `type:"structure"` - // Specifies how you want Kinesis Data Firehose to back up documents to Amazon - // S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data - // that could not be indexed to the configured Amazon S3 destination. When set - // to AllEvents, Kinesis Data Firehose delivers all incoming records to Amazon - // S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly. + // Specifies how you want Firehose to back up documents to Amazon S3. When set + // to FailedDocumentsOnly, Firehose writes any data that could not be indexed + // to the configured Amazon S3 destination. When set to AllEvents, Firehose + // delivers all incoming records to Amazon S3, and also writes failed documents + // to Amazon S3. The default value is FailedEventsOnly. // // You can update this backup mode from FailedEventsOnly to AllEvents. You can't // update it from AllEvents to FailedEventsOnly. @@ -11035,15 +11472,15 @@ func (s *SplunkDestinationUpdate) SetS3Update(v *S3DestinationUpdate) *SplunkDes return s } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to Splunk, or if it doesn't receive an acknowledgment from Splunk. +// Configures retry behavior in case Firehose is unable to deliver documents +// to Splunk, or if it doesn't receive an acknowledgment from Splunk. type SplunkRetryOptions struct { _ struct{} `type:"structure"` - // The total amount of time that Kinesis Data Firehose spends on retries. This - // duration starts after the initial attempt to send data to Splunk fails. It - // doesn't include the periods during which Kinesis Data Firehose waits for - // acknowledgment from Splunk after each attempt. + // The total amount of time that Firehose spends on retries. This duration starts + // after the initial attempt to send data to Splunk fails. It doesn't include + // the periods during which Firehose waits for acknowledgment from Splunk after + // each attempt. DurationInSeconds *int64 `type:"integer"` } @@ -11391,18 +11828,11 @@ func (s TagDeliveryStreamOutput) GoString() string { return s.String() } -type UntagDeliveryStreamInput struct { - _ struct{} `type:"structure"` - - // The name of the delivery stream. - // - // DeliveryStreamName is a required field - DeliveryStreamName *string `min:"1" type:"string" required:"true"` +type TagrisAccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // A list of tag keys. Each corresponding tag is removed from the delivery stream. - // - // TagKeys is a required field - TagKeys []*string `min:"1" type:"list" required:"true"` + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation. @@ -11410,7 +11840,7 @@ type UntagDeliveryStreamInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UntagDeliveryStreamInput) String() string { +func (s TagrisAccessDeniedException) String() string { return awsutil.Prettify(s) } @@ -11419,46 +11849,53 @@ func (s UntagDeliveryStreamInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UntagDeliveryStreamInput) GoString() string { +func (s TagrisAccessDeniedException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagDeliveryStreamInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagDeliveryStreamInput"} - if s.DeliveryStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) - } - if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1)) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - if s.TagKeys != nil && len(s.TagKeys) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) +func newErrorTagrisAccessDeniedException(v protocol.ResponseMetadata) error { + return &TagrisAccessDeniedException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams +// Code returns the exception type name. +func (s *TagrisAccessDeniedException) Code() string { + return "TagrisAccessDeniedException" +} + +// Message returns the exception's message. +func (s *TagrisAccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TagrisAccessDeniedException) OrigErr() error { return nil } -// SetDeliveryStreamName sets the DeliveryStreamName field's value. -func (s *UntagDeliveryStreamInput) SetDeliveryStreamName(v string) *UntagDeliveryStreamInput { - s.DeliveryStreamName = &v - return s +func (s *TagrisAccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// SetTagKeys sets the TagKeys field's value. -func (s *UntagDeliveryStreamInput) SetTagKeys(v []*string) *UntagDeliveryStreamInput { - s.TagKeys = v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *TagrisAccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } -type UntagDeliveryStreamOutput struct { - _ struct{} `type:"structure"` +// RequestID returns the service's response RequestID for request. +func (s *TagrisAccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +type TagrisInternalServiceException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation. @@ -11466,7 +11903,7 @@ type UntagDeliveryStreamOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UntagDeliveryStreamOutput) String() string { +func (s TagrisInternalServiceException) String() string { return awsutil.Prettify(s) } @@ -11475,62 +11912,55 @@ func (s UntagDeliveryStreamOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UntagDeliveryStreamOutput) GoString() string { +func (s TagrisInternalServiceException) GoString() string { return s.String() } -type UpdateDestinationInput struct { - _ struct{} `type:"structure"` - - // Describes an update for a destination in the Serverless offering for Amazon - // OpenSearch Service. - AmazonOpenSearchServerlessDestinationUpdate *AmazonOpenSearchServerlessDestinationUpdate `type:"structure"` - - // Describes an update for a destination in Amazon OpenSearch Service. - AmazonopensearchserviceDestinationUpdate *AmazonopensearchserviceDestinationUpdate `type:"structure"` - - // Obtain this value from the VersionId result of DeliveryStreamDescription. - // This value is required, and helps the service perform conditional operations. - // For example, if there is an interleaving update and this value is null, then - // the update destination fails. After the update is successful, the VersionId - // value is updated. The service then performs a merge of the old configuration - // with the new configuration. - // - // CurrentDeliveryStreamVersionId is a required field - CurrentDeliveryStreamVersionId *string `min:"1" type:"string" required:"true"` +func newErrorTagrisInternalServiceException(v protocol.ResponseMetadata) error { + return &TagrisInternalServiceException{ + RespMetadata: v, + } +} - // The name of the delivery stream. - // - // DeliveryStreamName is a required field - DeliveryStreamName *string `min:"1" type:"string" required:"true"` +// Code returns the exception type name. +func (s *TagrisInternalServiceException) Code() string { + return "TagrisInternalServiceException" +} - // The ID of the destination. - // - // DestinationId is a required field - DestinationId *string `min:"1" type:"string" required:"true"` +// Message returns the exception's message. +func (s *TagrisInternalServiceException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} - // Describes an update for a destination in Amazon ES. - ElasticsearchDestinationUpdate *ElasticsearchDestinationUpdate `type:"structure"` +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TagrisInternalServiceException) OrigErr() error { + return nil +} - // Describes an update for a destination in Amazon S3. - ExtendedS3DestinationUpdate *ExtendedS3DestinationUpdate `type:"structure"` +func (s *TagrisInternalServiceException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} - // Describes an update to the specified HTTP endpoint destination. - HttpEndpointDestinationUpdate *HttpEndpointDestinationUpdate `type:"structure"` +// Status code returns the HTTP status code for the request's response error. +func (s *TagrisInternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode +} - // Describes an update for a destination in Amazon Redshift. - RedshiftDestinationUpdate *RedshiftDestinationUpdate `type:"structure"` +// RequestID returns the service's response RequestID for request. +func (s *TagrisInternalServiceException) RequestID() string { + return s.RespMetadata.RequestID +} - // [Deprecated] Describes an update for a destination in Amazon S3. - // - // Deprecated: S3DestinationUpdate has been deprecated - S3DestinationUpdate *S3DestinationUpdate `deprecated:"true" type:"structure"` +type TagrisInvalidArnException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // Update to the Snowflake destination condiguration settings - SnowflakeDestinationUpdate *SnowflakeDestinationUpdate `type:"structure"` + Message_ *string `locationName:"message" type:"string"` - // Describes an update for a destination in Splunk. - SplunkDestinationUpdate *SplunkDestinationUpdate `type:"structure"` + SweepListItem *TagrisSweepListItem `locationName:"sweepListItem" type:"structure"` } // String returns the string representation. @@ -11538,7 +11968,7 @@ type UpdateDestinationInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UpdateDestinationInput) String() string { +func (s TagrisInvalidArnException) String() string { return awsutil.Prettify(s) } @@ -11547,15 +11977,474 @@ func (s UpdateDestinationInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UpdateDestinationInput) GoString() string { +func (s TagrisInvalidArnException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDestinationInput"} - if s.CurrentDeliveryStreamVersionId == nil { - invalidParams.Add(request.NewErrParamRequired("CurrentDeliveryStreamVersionId")) +func newErrorTagrisInvalidArnException(v protocol.ResponseMetadata) error { + return &TagrisInvalidArnException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TagrisInvalidArnException) Code() string { + return "TagrisInvalidArnException" +} + +// Message returns the exception's message. +func (s *TagrisInvalidArnException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TagrisInvalidArnException) OrigErr() error { + return nil +} + +func (s *TagrisInvalidArnException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TagrisInvalidArnException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TagrisInvalidArnException) RequestID() string { + return s.RespMetadata.RequestID +} + +type TagrisInvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagrisInvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagrisInvalidParameterException) GoString() string { + return s.String() +} + +func newErrorTagrisInvalidParameterException(v protocol.ResponseMetadata) error { + return &TagrisInvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TagrisInvalidParameterException) Code() string { + return "TagrisInvalidParameterException" +} + +// Message returns the exception's message. +func (s *TagrisInvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TagrisInvalidParameterException) OrigErr() error { + return nil +} + +func (s *TagrisInvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TagrisInvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TagrisInvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +type TagrisPartialResourcesExistResultsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` + + ResourceExistenceInformation map[string]*string `locationName:"resourceExistenceInformation" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagrisPartialResourcesExistResultsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagrisPartialResourcesExistResultsException) GoString() string { + return s.String() +} + +func newErrorTagrisPartialResourcesExistResultsException(v protocol.ResponseMetadata) error { + return &TagrisPartialResourcesExistResultsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TagrisPartialResourcesExistResultsException) Code() string { + return "TagrisPartialResourcesExistResultsException" +} + +// Message returns the exception's message. +func (s *TagrisPartialResourcesExistResultsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TagrisPartialResourcesExistResultsException) OrigErr() error { + return nil +} + +func (s *TagrisPartialResourcesExistResultsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TagrisPartialResourcesExistResultsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TagrisPartialResourcesExistResultsException) RequestID() string { + return s.RespMetadata.RequestID +} + +type TagrisSweepListItem struct { + _ struct{} `type:"structure"` + + TagrisAccountId *string `min:"12" type:"string"` + + TagrisAmazonResourceName *string `min:"1" type:"string"` + + TagrisInternalId *string `type:"string"` + + TagrisVersion *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagrisSweepListItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagrisSweepListItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagrisSweepListItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagrisSweepListItem"} + if s.TagrisAccountId != nil && len(*s.TagrisAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("TagrisAccountId", 12)) + } + if s.TagrisAmazonResourceName != nil && len(*s.TagrisAmazonResourceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagrisAmazonResourceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagrisAccountId sets the TagrisAccountId field's value. +func (s *TagrisSweepListItem) SetTagrisAccountId(v string) *TagrisSweepListItem { + s.TagrisAccountId = &v + return s +} + +// SetTagrisAmazonResourceName sets the TagrisAmazonResourceName field's value. +func (s *TagrisSweepListItem) SetTagrisAmazonResourceName(v string) *TagrisSweepListItem { + s.TagrisAmazonResourceName = &v + return s +} + +// SetTagrisInternalId sets the TagrisInternalId field's value. +func (s *TagrisSweepListItem) SetTagrisInternalId(v string) *TagrisSweepListItem { + s.TagrisInternalId = &v + return s +} + +// SetTagrisVersion sets the TagrisVersion field's value. +func (s *TagrisSweepListItem) SetTagrisVersion(v int64) *TagrisSweepListItem { + s.TagrisVersion = &v + return s +} + +type TagrisThrottledException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagrisThrottledException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagrisThrottledException) GoString() string { + return s.String() +} + +func newErrorTagrisThrottledException(v protocol.ResponseMetadata) error { + return &TagrisThrottledException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TagrisThrottledException) Code() string { + return "TagrisThrottledException" +} + +// Message returns the exception's message. +func (s *TagrisThrottledException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TagrisThrottledException) OrigErr() error { + return nil +} + +func (s *TagrisThrottledException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TagrisThrottledException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TagrisThrottledException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagDeliveryStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the delivery stream. + // + // DeliveryStreamName is a required field + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // A list of tag keys. Each corresponding tag is removed from the delivery stream. + // + // TagKeys is a required field + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagDeliveryStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagDeliveryStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagDeliveryStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagDeliveryStreamInput"} + if s.DeliveryStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) + } + if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryStreamName sets the DeliveryStreamName field's value. +func (s *UntagDeliveryStreamInput) SetDeliveryStreamName(v string) *UntagDeliveryStreamInput { + s.DeliveryStreamName = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagDeliveryStreamInput) SetTagKeys(v []*string) *UntagDeliveryStreamInput { + s.TagKeys = v + return s +} + +type UntagDeliveryStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagDeliveryStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagDeliveryStreamOutput) GoString() string { + return s.String() +} + +type UpdateDestinationInput struct { + _ struct{} `type:"structure"` + + // Describes an update for a destination in the Serverless offering for Amazon + // OpenSearch Service. + AmazonOpenSearchServerlessDestinationUpdate *AmazonOpenSearchServerlessDestinationUpdate `type:"structure"` + + // Describes an update for a destination in Amazon OpenSearch Service. + AmazonopensearchserviceDestinationUpdate *AmazonopensearchserviceDestinationUpdate `type:"structure"` + + // Obtain this value from the VersionId result of DeliveryStreamDescription. + // This value is required, and helps the service perform conditional operations. + // For example, if there is an interleaving update and this value is null, then + // the update destination fails. After the update is successful, the VersionId + // value is updated. The service then performs a merge of the old configuration + // with the new configuration. + // + // CurrentDeliveryStreamVersionId is a required field + CurrentDeliveryStreamVersionId *string `min:"1" type:"string" required:"true"` + + // The name of the delivery stream. + // + // DeliveryStreamName is a required field + DeliveryStreamName *string `min:"1" type:"string" required:"true"` + + // The ID of the destination. + // + // DestinationId is a required field + DestinationId *string `min:"1" type:"string" required:"true"` + + // Describes an update for a destination in Amazon ES. + ElasticsearchDestinationUpdate *ElasticsearchDestinationUpdate `type:"structure"` + + // Describes an update for a destination in Amazon S3. + ExtendedS3DestinationUpdate *ExtendedS3DestinationUpdate `type:"structure"` + + // Describes an update to the specified HTTP endpoint destination. + HttpEndpointDestinationUpdate *HttpEndpointDestinationUpdate `type:"structure"` + + // Describes an update for a destination in Amazon Redshift. + RedshiftDestinationUpdate *RedshiftDestinationUpdate `type:"structure"` + + // [Deprecated] Describes an update for a destination in Amazon S3. + // + // Deprecated: S3DestinationUpdate has been deprecated + S3DestinationUpdate *S3DestinationUpdate `deprecated:"true" type:"structure"` + + // Update to the Snowflake destination condiguration settings + SnowflakeDestinationUpdate *SnowflakeDestinationUpdate `type:"structure"` + + // Describes an update for a destination in Splunk. + SplunkDestinationUpdate *SplunkDestinationUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDestinationInput"} + if s.CurrentDeliveryStreamVersionId == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentDeliveryStreamVersionId")) } if s.CurrentDeliveryStreamVersionId != nil && len(*s.CurrentDeliveryStreamVersionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CurrentDeliveryStreamVersionId", 1)) @@ -11718,16 +12607,100 @@ func (s UpdateDestinationOutput) GoString() string { return s.String() } +type VerifyResourcesExistForTagrisInput struct { + _ struct{} `type:"structure"` + + // TagrisSweepList is a required field + TagrisSweepList []*TagrisSweepListItem `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VerifyResourcesExistForTagrisInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VerifyResourcesExistForTagrisInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyResourcesExistForTagrisInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyResourcesExistForTagrisInput"} + if s.TagrisSweepList == nil { + invalidParams.Add(request.NewErrParamRequired("TagrisSweepList")) + } + if s.TagrisSweepList != nil { + for i, v := range s.TagrisSweepList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagrisSweepList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagrisSweepList sets the TagrisSweepList field's value. +func (s *VerifyResourcesExistForTagrisInput) SetTagrisSweepList(v []*TagrisSweepListItem) *VerifyResourcesExistForTagrisInput { + s.TagrisSweepList = v + return s +} + +type VerifyResourcesExistForTagrisOutput struct { + _ struct{} `type:"structure"` + + // TagrisSweepListResult is a required field + TagrisSweepListResult map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VerifyResourcesExistForTagrisOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VerifyResourcesExistForTagrisOutput) GoString() string { + return s.String() +} + +// SetTagrisSweepListResult sets the TagrisSweepListResult field's value. +func (s *VerifyResourcesExistForTagrisOutput) SetTagrisSweepListResult(v map[string]*string) *VerifyResourcesExistForTagrisOutput { + s.TagrisSweepListResult = v + return s +} + // The details of the VPC of the Amazon OpenSearch or Amazon OpenSearch Serverless // destination. type VpcConfiguration struct { _ struct{} `type:"structure"` // The ARN of the IAM role that you want the delivery stream to use to create - // endpoints in the destination VPC. You can use your existing Kinesis Data - // Firehose delivery role or you can specify a new role. In either case, make - // sure that the role trusts the Kinesis Data Firehose service principal and - // that it grants the following permissions: + // endpoints in the destination VPC. You can use your existing Firehose delivery + // role or you can specify a new role. In either case, make sure that the role + // trusts the Firehose service principal and that it grants the following permissions: // // * ec2:DescribeVpcs // @@ -11745,42 +12718,44 @@ type VpcConfiguration struct { // // * ec2:DeleteNetworkInterface // - // If you revoke these permissions after you create the delivery stream, Kinesis - // Data Firehose can't scale out by creating more ENIs when necessary. You might - // therefore see a degradation in performance. + // When you specify subnets for delivering data to the destination in a private + // VPC, make sure you have enough number of free IP addresses in chosen subnets. + // If there is no available free IP address in a specified subnet, Firehose + // cannot create or add ENIs for the data delivery in the private VPC, and the + // delivery will be degraded or fail. // // RoleARN is a required field RoleARN *string `min:"1" type:"string" required:"true"` - // The IDs of the security groups that you want Kinesis Data Firehose to use - // when it creates ENIs in the VPC of the Amazon ES destination. You can use - // the same security group that the Amazon ES domain uses or different ones. - // If you specify different security groups here, ensure that they allow outbound - // HTTPS traffic to the Amazon ES domain's security group. Also ensure that - // the Amazon ES domain's security group allows HTTPS traffic from the security - // groups specified here. If you use the same security group for both your delivery - // stream and the Amazon ES domain, make sure the security group inbound rule - // allows HTTPS traffic. For more information about security group rules, see - // Security group rules (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules) + // The IDs of the security groups that you want Firehose to use when it creates + // ENIs in the VPC of the Amazon ES destination. You can use the same security + // group that the Amazon ES domain uses or different ones. If you specify different + // security groups here, ensure that they allow outbound HTTPS traffic to the + // Amazon ES domain's security group. Also ensure that the Amazon ES domain's + // security group allows HTTPS traffic from the security groups specified here. + // If you use the same security group for both your delivery stream and the + // Amazon ES domain, make sure the security group inbound rule allows HTTPS + // traffic. For more information about security group rules, see Security group + // rules (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules) // in the Amazon VPC documentation. // // SecurityGroupIds is a required field SecurityGroupIds []*string `min:"1" type:"list" required:"true"` - // The IDs of the subnets that you want Kinesis Data Firehose to use to create - // ENIs in the VPC of the Amazon ES destination. Make sure that the routing - // tables and inbound and outbound rules allow traffic to flow from the subnets - // whose IDs are specified here to the subnets that have the destination Amazon - // ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the - // subnets that are specified here. Do not delete or modify these ENIs. - // - // The number of ENIs that Kinesis Data Firehose creates in the subnets specified - // here scales up and down automatically based on throughput. To enable Kinesis - // Data Firehose to scale up the number of ENIs to match throughput, ensure - // that you have sufficient quota. To help you calculate the quota you need, - // assume that Kinesis Data Firehose can create up to three ENIs for this delivery - // stream for each of the subnets specified here. For more information about - // ENI quota, see Network Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) + // The IDs of the subnets that you want Firehose to use to create ENIs in the + // VPC of the Amazon ES destination. Make sure that the routing tables and inbound + // and outbound rules allow traffic to flow from the subnets whose IDs are specified + // here to the subnets that have the destination Amazon ES endpoints. Firehose + // creates at least one ENI in each of the subnets that are specified here. + // Do not delete or modify these ENIs. + // + // The number of ENIs that Firehose creates in the subnets specified here scales + // up and down automatically based on throughput. To enable Firehose to scale + // up the number of ENIs to match throughput, ensure that you have sufficient + // quota. To help you calculate the quota you need, assume that Firehose can + // create up to three ENIs for this delivery stream for each of the subnets + // specified here. For more information about ENI quota, see Network Interfaces + // (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) // in the Amazon VPC Quotas topic. // // SubnetIds is a required field @@ -11856,10 +12831,9 @@ type VpcConfigurationDescription struct { _ struct{} `type:"structure"` // The ARN of the IAM role that the delivery stream uses to create endpoints - // in the destination VPC. You can use your existing Kinesis Data Firehose delivery - // role or you can specify a new role. In either case, make sure that the role - // trusts the Kinesis Data Firehose service principal and that it grants the - // following permissions: + // in the destination VPC. You can use your existing Firehose delivery role + // or you can specify a new role. In either case, make sure that the role trusts + // the Firehose service principal and that it grants the following permissions: // // * ec2:DescribeVpcs // @@ -11877,16 +12851,16 @@ type VpcConfigurationDescription struct { // // * ec2:DeleteNetworkInterface // - // If you revoke these permissions after you create the delivery stream, Kinesis - // Data Firehose can't scale out by creating more ENIs when necessary. You might - // therefore see a degradation in performance. + // If you revoke these permissions after you create the delivery stream, Firehose + // can't scale out by creating more ENIs when necessary. You might therefore + // see a degradation in performance. // // RoleARN is a required field RoleARN *string `min:"1" type:"string" required:"true"` - // The IDs of the security groups that Kinesis Data Firehose uses when it creates - // ENIs in the VPC of the Amazon ES destination. You can use the same security - // group that the Amazon ES domain uses or different ones. If you specify different + // The IDs of the security groups that Firehose uses when it creates ENIs in + // the VPC of the Amazon ES destination. You can use the same security group + // that the Amazon ES domain uses or different ones. If you specify different // security groups, ensure that they allow outbound HTTPS traffic to the Amazon // ES domain's security group. Also ensure that the Amazon ES domain's security // group allows HTTPS traffic from the security groups specified here. If you @@ -11899,20 +12873,20 @@ type VpcConfigurationDescription struct { // SecurityGroupIds is a required field SecurityGroupIds []*string `min:"1" type:"list" required:"true"` - // The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in - // the VPC of the Amazon ES destination. Make sure that the routing tables and - // inbound and outbound rules allow traffic to flow from the subnets whose IDs - // are specified here to the subnets that have the destination Amazon ES endpoints. - // Kinesis Data Firehose creates at least one ENI in each of the subnets that - // are specified here. Do not delete or modify these ENIs. - // - // The number of ENIs that Kinesis Data Firehose creates in the subnets specified - // here scales up and down automatically based on throughput. To enable Kinesis - // Data Firehose to scale up the number of ENIs to match throughput, ensure - // that you have sufficient quota. To help you calculate the quota you need, - // assume that Kinesis Data Firehose can create up to three ENIs for this delivery - // stream for each of the subnets specified here. For more information about - // ENI quota, see Network Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) + // The IDs of the subnets that Firehose uses to create ENIs in the VPC of the + // Amazon ES destination. Make sure that the routing tables and inbound and + // outbound rules allow traffic to flow from the subnets whose IDs are specified + // here to the subnets that have the destination Amazon ES endpoints. Firehose + // creates at least one ENI in each of the subnets that are specified here. + // Do not delete or modify these ENIs. + // + // The number of ENIs that Firehose creates in the subnets specified here scales + // up and down automatically based on throughput. To enable Firehose to scale + // up the number of ENIs to match throughput, ensure that you have sufficient + // quota. To help you calculate the quota you need, assume that Firehose can + // create up to three ENIs for this delivery stream for each of the subnets + // specified here. For more information about ENI quota, see Network Interfaces + // (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) // in the Amazon VPC Quotas topic. // // SubnetIds is a required field @@ -12456,6 +13430,9 @@ const ( // ProcessorParameterNameCompressionFormat is a ProcessorParameterName enum value ProcessorParameterNameCompressionFormat = "CompressionFormat" + + // ProcessorParameterNameDataMessageExtraction is a ProcessorParameterName enum value + ProcessorParameterNameDataMessageExtraction = "DataMessageExtraction" ) // ProcessorParameterName_Values returns all elements of the ProcessorParameterName enum @@ -12471,6 +13448,7 @@ func ProcessorParameterName_Values() []string { ProcessorParameterNameSubRecordType, ProcessorParameterNameDelimiter, ProcessorParameterNameCompressionFormat, + ProcessorParameterNameDataMessageExtraction, } } @@ -12481,6 +13459,9 @@ const ( // ProcessorTypeDecompression is a ProcessorType enum value ProcessorTypeDecompression = "Decompression" + // ProcessorTypeCloudWatchLogProcessing is a ProcessorType enum value + ProcessorTypeCloudWatchLogProcessing = "CloudWatchLogProcessing" + // ProcessorTypeLambda is a ProcessorType enum value ProcessorTypeLambda = "Lambda" @@ -12496,6 +13477,7 @@ func ProcessorType_Values() []string { return []string{ ProcessorTypeRecordDeAggregation, ProcessorTypeDecompression, + ProcessorTypeCloudWatchLogProcessing, ProcessorTypeLambda, ProcessorTypeMetadataExtraction, ProcessorTypeAppendDelimiterToRecord, @@ -12585,3 +13567,19 @@ func SplunkS3BackupMode_Values() []string { SplunkS3BackupModeAllEvents, } } + +const ( + // TagrisStatusActive is a TagrisStatus enum value + TagrisStatusActive = "ACTIVE" + + // TagrisStatusNotActive is a TagrisStatus enum value + TagrisStatusNotActive = "NOT_ACTIVE" +) + +// TagrisStatus_Values returns all elements of the TagrisStatus enum +func TagrisStatus_Values() []string { + return []string{ + TagrisStatusActive, + TagrisStatusNotActive, + } +} diff --git a/service/firehose/doc.go b/service/firehose/doc.go index 22cc2e8180a..2c7fd8e85f2 100644 --- a/service/firehose/doc.go +++ b/service/firehose/doc.go @@ -3,10 +3,9 @@ // Package firehose provides the client and types for making API // requests to Amazon Kinesis Firehose. // -// Amazon Kinesis Data Firehose is a fully managed service that delivers real-time -// streaming data to destinations such as Amazon Simple Storage Service (Amazon -// S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other -// supportd destinations. +// Amazon Data Firehose is a fully managed service that delivers real-time streaming +// data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon +// OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations. // // See https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04 for more information on this service. // diff --git a/service/firehose/errors.go b/service/firehose/errors.go index 3f7e7ab0ff9..f920e8a433b 100644 --- a/service/firehose/errors.go +++ b/service/firehose/errors.go @@ -24,10 +24,10 @@ const ( // ErrCodeInvalidKMSResourceException for service response error code // "InvalidKMSResourceException". // - // Kinesis Data Firehose throws this exception when an attempt to put records - // or to start or stop delivery stream encryption fails. This happens when the - // KMS service throws one of the following exception types: AccessDeniedException, - // InvalidStateException, DisabledException, or NotFoundException. + // Firehose throws this exception when an attempt to put records or to start + // or stop delivery stream encryption fails. This happens when the KMS service + // throws one of the following exception types: AccessDeniedException, InvalidStateException, + // DisabledException, or NotFoundException. ErrCodeInvalidKMSResourceException = "InvalidKMSResourceException" // ErrCodeInvalidSourceException for service response error code @@ -37,6 +37,10 @@ const ( // is enabled. ErrCodeInvalidSourceException = "InvalidSourceException" + // ErrCodeInvalidStreamTypeException for service response error code + // "InvalidStreamTypeException". + ErrCodeInvalidStreamTypeException = "InvalidStreamTypeException" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // @@ -61,17 +65,48 @@ const ( // The service is unavailable. Back off and retry the operation. If you continue // to see the exception, throughput limits for the delivery stream may have // been exceeded. For more information about limits and how to request an increase, - // see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). + // see Amazon Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). ErrCodeServiceUnavailableException = "ServiceUnavailableException" + + // ErrCodeTagrisAccessDeniedException for service response error code + // "TagrisAccessDeniedException". + ErrCodeTagrisAccessDeniedException = "TagrisAccessDeniedException" + + // ErrCodeTagrisInternalServiceException for service response error code + // "TagrisInternalServiceException". + ErrCodeTagrisInternalServiceException = "TagrisInternalServiceException" + + // ErrCodeTagrisInvalidArnException for service response error code + // "TagrisInvalidArnException". + ErrCodeTagrisInvalidArnException = "TagrisInvalidArnException" + + // ErrCodeTagrisInvalidParameterException for service response error code + // "TagrisInvalidParameterException". + ErrCodeTagrisInvalidParameterException = "TagrisInvalidParameterException" + + // ErrCodeTagrisPartialResourcesExistResultsException for service response error code + // "TagrisPartialResourcesExistResultsException". + ErrCodeTagrisPartialResourcesExistResultsException = "TagrisPartialResourcesExistResultsException" + + // ErrCodeTagrisThrottledException for service response error code + // "TagrisThrottledException". + ErrCodeTagrisThrottledException = "TagrisThrottledException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "ConcurrentModificationException": newErrorConcurrentModificationException, - "InvalidArgumentException": newErrorInvalidArgumentException, - "InvalidKMSResourceException": newErrorInvalidKMSResourceException, - "InvalidSourceException": newErrorInvalidSourceException, - "LimitExceededException": newErrorLimitExceededException, - "ResourceInUseException": newErrorResourceInUseException, - "ResourceNotFoundException": newErrorResourceNotFoundException, - "ServiceUnavailableException": newErrorServiceUnavailableException, + "ConcurrentModificationException": newErrorConcurrentModificationException, + "InvalidArgumentException": newErrorInvalidArgumentException, + "InvalidKMSResourceException": newErrorInvalidKMSResourceException, + "InvalidSourceException": newErrorInvalidSourceException, + "InvalidStreamTypeException": newErrorInvalidStreamTypeException, + "LimitExceededException": newErrorLimitExceededException, + "ResourceInUseException": newErrorResourceInUseException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ServiceUnavailableException": newErrorServiceUnavailableException, + "TagrisAccessDeniedException": newErrorTagrisAccessDeniedException, + "TagrisInternalServiceException": newErrorTagrisInternalServiceException, + "TagrisInvalidArnException": newErrorTagrisInvalidArnException, + "TagrisInvalidParameterException": newErrorTagrisInvalidParameterException, + "TagrisPartialResourcesExistResultsException": newErrorTagrisPartialResourcesExistResultsException, + "TagrisThrottledException": newErrorTagrisThrottledException, } diff --git a/service/firehose/firehoseiface/interface.go b/service/firehose/firehoseiface/interface.go index dd0889202fa..dcfac41ffff 100644 --- a/service/firehose/firehoseiface/interface.go +++ b/service/firehose/firehoseiface/interface.go @@ -72,6 +72,10 @@ type FirehoseAPI interface { DescribeDeliveryStreamWithContext(aws.Context, *firehose.DescribeDeliveryStreamInput, ...request.Option) (*firehose.DescribeDeliveryStreamOutput, error) DescribeDeliveryStreamRequest(*firehose.DescribeDeliveryStreamInput) (*request.Request, *firehose.DescribeDeliveryStreamOutput) + GetKinesisStream(*firehose.GetKinesisStreamInput) (*firehose.GetKinesisStreamOutput, error) + GetKinesisStreamWithContext(aws.Context, *firehose.GetKinesisStreamInput, ...request.Option) (*firehose.GetKinesisStreamOutput, error) + GetKinesisStreamRequest(*firehose.GetKinesisStreamInput) (*request.Request, *firehose.GetKinesisStreamOutput) + ListDeliveryStreams(*firehose.ListDeliveryStreamsInput) (*firehose.ListDeliveryStreamsOutput, error) ListDeliveryStreamsWithContext(aws.Context, *firehose.ListDeliveryStreamsInput, ...request.Option) (*firehose.ListDeliveryStreamsOutput, error) ListDeliveryStreamsRequest(*firehose.ListDeliveryStreamsInput) (*request.Request, *firehose.ListDeliveryStreamsOutput) @@ -107,6 +111,10 @@ type FirehoseAPI interface { UpdateDestination(*firehose.UpdateDestinationInput) (*firehose.UpdateDestinationOutput, error) UpdateDestinationWithContext(aws.Context, *firehose.UpdateDestinationInput, ...request.Option) (*firehose.UpdateDestinationOutput, error) UpdateDestinationRequest(*firehose.UpdateDestinationInput) (*request.Request, *firehose.UpdateDestinationOutput) + + VerifyResourcesExistForTagris(*firehose.VerifyResourcesExistForTagrisInput) (*firehose.VerifyResourcesExistForTagrisOutput, error) + VerifyResourcesExistForTagrisWithContext(aws.Context, *firehose.VerifyResourcesExistForTagrisInput, ...request.Option) (*firehose.VerifyResourcesExistForTagrisOutput, error) + VerifyResourcesExistForTagrisRequest(*firehose.VerifyResourcesExistForTagrisInput) (*request.Request, *firehose.VerifyResourcesExistForTagrisOutput) } var _ FirehoseAPI = (*firehose.Firehose)(nil) diff --git a/service/lambda/api.go b/service/lambda/api.go index d032c07df7f..d3ce0bd1374 100644 --- a/service/lambda/api.go +++ b/service/lambda/api.go @@ -3488,6 +3488,10 @@ func (c *Lambda) InvokeAsyncRequest(input *InvokeAsyncInput) (req *request.Reque // // Invokes a function asynchronously. // +// If you do use the InvokeAsync action, note that it doesn't support the use +// of X-Ray active tracing. Trace ID is not propagated to the function, even +// if X-Ray active tracing is turned on. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8850,8 +8854,9 @@ type CreateEventSourceMappingInput struct { // the batch in two and retry. BisectBatchOnFunctionError *bool `type:"boolean"` - // (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard - // Amazon SNS topic destination for discarded records. + // (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration + // object that specifies the destination of an event after Lambda processes + // it. DestinationConfig *DestinationConfig `type:"structure"` // Specific configuration settings for a DocumentDB event source. @@ -8871,7 +8876,9 @@ type CreateEventSourceMappingInput struct { // // * Amazon Simple Queue Service – The ARN of the queue. // - // * Amazon Managed Streaming for Apache Kafka – The ARN of the cluster. + // * Amazon Managed Streaming for Apache Kafka – The ARN of the cluster + // or the ARN of the VPC connection (for cross-account event source mappings + // (https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc)). // // * Amazon MQ – The ARN of the broker. // @@ -9227,7 +9234,8 @@ type CreateFunctionInput struct { Environment *Environment `type:"structure"` // The size of the function's /tmp directory in MB. The default value is 512, - // but can be any whole number between 512 and 10,240 MB. + // but can be any whole number between 512 and 10,240 MB. For more information, + // see Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage). EphemeralStorage *EphemeralStorage `type:"structure"` // Connection settings for an Amazon EFS file system. @@ -11427,7 +11435,8 @@ func (s *EnvironmentResponse) SetVariables(v map[string]*string) *EnvironmentRes } // The size of the function's /tmp directory in MB. The default value is 512, -// but it can be any whole number between 512 and 10,240 MB. +// but can be any whole number between 512 and 10,240 MB. For more information, +// see Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage). type EphemeralStorage struct { _ struct{} `type:"structure"` @@ -12106,8 +12115,9 @@ type FunctionConfiguration struct { // Omitted from CloudTrail logs. Environment *EnvironmentResponse `type:"structure"` - // The size of the function’s /tmp directory in MB. The default value is 512, - // but it can be any whole number between 512 and 10,240 MB. + // The size of the function's /tmp directory in MB. The default value is 512, + // but can be any whole number between 512 and 10,240 MB. For more information, + // see Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage). EphemeralStorage *EphemeralStorage `type:"structure"` // Connection settings for an Amazon EFS file system (https://docs.aws.amazon.com/lambda/latest/dg/configuration-filesystem.html). @@ -15193,7 +15203,8 @@ type InvokeInput struct { _ struct{} `type:"structure" payload:"Payload"` // Up to 3,583 bytes of base64-encoded data about the invoking client to pass - // to the function in the context object. + // to the function in the context object. Lambda passes the ClientContext object + // to your function for synchronous invocations only. ClientContext *string `location:"header" locationName:"X-Amz-Client-Context" type:"string"` // The name of the Lambda function, version, or alias. @@ -16734,7 +16745,9 @@ type ListEventSourceMappingsInput struct { // // * Amazon Simple Queue Service – The ARN of the queue. // - // * Amazon Managed Streaming for Apache Kafka – The ARN of the cluster. + // * Amazon Managed Streaming for Apache Kafka – The ARN of the cluster + // or the ARN of the VPC connection (for cross-account event source mappings + // (https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc)). // // * Amazon MQ – The ARN of the broker. // @@ -17348,7 +17361,7 @@ type ListLayerVersionsInput struct { // The compatible instruction set architecture (https://docs.aws.amazon.com/lambda/latest/dg/foundation-arch.html). CompatibleArchitecture *string `location:"querystring" locationName:"CompatibleArchitecture" type:"string" enum:"Architecture"` - // A runtime identifier. For example, go1.x. + // A runtime identifier. For example, java21. // // The following list includes deprecated runtimes. For more information, see // Runtime deprecation policy (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-support-policy). @@ -17479,7 +17492,7 @@ type ListLayersInput struct { // The compatible instruction set architecture (https://docs.aws.amazon.com/lambda/latest/dg/foundation-arch.html). CompatibleArchitecture *string `location:"querystring" locationName:"CompatibleArchitecture" type:"string" enum:"Architecture"` - // A runtime identifier. For example, go1.x. + // A runtime identifier. For example, java21. // // The following list includes deprecated runtimes. For more information, see // Runtime deprecation policy (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-support-policy). @@ -17920,7 +17933,7 @@ type LoggingConfig struct { // Set this property to filter the application logs for your function that Lambda // sends to CloudWatch. Lambda only sends application logs at the selected level - // and lower. + // of detail and lower, where TRACE is the highest level and FATAL is the lowest. ApplicationLogLevel *string `type:"string" enum:"ApplicationLogLevel"` // The format in which Lambda sends your function's application and system logs @@ -17935,7 +17948,7 @@ type LoggingConfig struct { // Set this property to filter the system logs for your function that Lambda // sends to CloudWatch. Lambda only sends system logs at the selected level - // and lower. + // of detail and lower, where DEBUG is the highest level and WARN is the lowest. SystemLogLevel *string `type:"string" enum:"SystemLogLevel"` } @@ -17999,6 +18012,18 @@ type OnFailure struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the destination resource. + // + // To retain records of asynchronous invocations (https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations), + // you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, + // or Amazon EventBridge event bus as the destination. + // + // To retain records of failed invocations from Kinesis and DynamoDB event sources + // (https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations), + // you can configure an Amazon SNS topic or Amazon SQS queue as the destination. + // + // To retain records of failed invocations from self-managed Kafka (https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) + // or Amazon MSK (https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination), + // you can configure an Amazon SNS topic or Amazon SQS queue as the destination. Destination *string `type:"string"` } @@ -21454,8 +21479,9 @@ type UpdateEventSourceMappingInput struct { // the batch in two and retry. BisectBatchOnFunctionError *bool `type:"boolean"` - // (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard - // Amazon SNS topic destination for discarded records. + // (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration + // object that specifies the destination of an event after Lambda processes + // it. DestinationConfig *DestinationConfig `type:"structure"` // Specific configuration settings for a DocumentDB event source. @@ -21893,7 +21919,8 @@ type UpdateFunctionConfigurationInput struct { Environment *Environment `type:"structure"` // The size of the function's /tmp directory in MB. The default value is 512, - // but can be any whole number between 512 and 10,240 MB. + // but can be any whole number between 512 and 10,240 MB. For more information, + // see Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage). EphemeralStorage *EphemeralStorage `type:"structure"` // Connection settings for an Amazon EFS file system. diff --git a/service/rds/api.go b/service/rds/api.go index 652af2e022f..09d64650a54 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -21923,7 +21923,8 @@ type CreateDBClusterInput struct { // // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 (for Aurora DB clusters) or 1 to 52 (for Multi-AZ + // DB clusters) letters, numbers, or hyphens. // // * First character must be a letter. // @@ -23003,7 +23004,7 @@ type CreateDBClusterParameterGroupInput struct { // // RDS for PostgreSQL // - // Example: postgres12 + // Example: postgres13 // // To list all of the available parameter group families for a DB engine, use // the following command: @@ -44625,6 +44626,8 @@ func (s *FailoverState) SetToDbClusterArn(v string) *FailoverState { // // - DescribeDBRecommendations // +// - DescribeDBShardGroups +// // - DescribePendingMaintenanceActions type Filter struct { _ struct{} `type:"structure"` diff --git a/service/sns/api.go b/service/sns/api.go index 2ff147ccf4f..36e14883e62 100644 --- a/service/sns/api.go +++ b/service/sns/api.go @@ -368,8 +368,15 @@ func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationI // - For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal // is signing key ID and PlatformCredential is signing key. // -// - For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and -// the PlatformCredential is API key. +// - For GCM (Firebase Cloud Messaging) using key credentials, there is no +// PlatformPrincipal. The PlatformCredential is API key. +// +// - For GCM (Firebase Cloud Messaging) using token credentials, there is +// no PlatformPrincipal. The PlatformCredential is a JSON formatted private +// key file. When using the Amazon Web Services CLI, the file must be in +// string format and special characters must be ignored. To format the file +// correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq +// @json <<< cat service.json`. // // - For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential // is private key. @@ -4905,8 +4912,12 @@ type CheckIfPhoneNumberIsOptedOutInput struct { // The phone number for which you want to check the opt out status. // + // PhoneNumber is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CheckIfPhoneNumberIsOptedOutInput's + // String and GoString methods. + // // PhoneNumber is a required field - PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true"` + PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -5328,8 +5339,12 @@ type CreateSMSSandboxPhoneNumberInput struct { // this phone number to the list of verified phone numbers that you can send // SMS messages to. // + // PhoneNumber is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateSMSSandboxPhoneNumberInput's + // String and GoString methods. + // // PhoneNumber is a required field - PhoneNumber *string `type:"string" required:"true"` + PhoneNumber *string `type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -5724,8 +5739,12 @@ type DeleteSMSSandboxPhoneNumberInput struct { // The destination phone number to delete. // + // PhoneNumber is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by DeleteSMSSandboxPhoneNumberInput's + // String and GoString methods. + // // PhoneNumber is a required field - PhoneNumber *string `type:"string" required:"true"` + PhoneNumber *string `type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -6131,6 +6150,11 @@ type GetPlatformApplicationAttributesOutput struct { // * ApplePlatformBundleID – The app identifier used to configure token-based // authentication. // + // * AuthenticationMethod – Returns the credential type used when sending + // push notifications from application to APNS/APNS_Sandbox, or application + // to GCM. APNS – Returns the token or certificate. GCM – Returns the + // token or key. + // // * EventEndpointCreated – Topic ARN to which EndpointCreated event notifications // should be sent. // @@ -7418,8 +7442,12 @@ type OptInPhoneNumberInput struct { // The phone number to opt in. Use E.164 format. // + // PhoneNumber is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by OptInPhoneNumberInput's + // String and GoString methods. + // // PhoneNumber is a required field - PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true"` + PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -7496,7 +7524,11 @@ type PhoneNumberInformation struct { NumberCapabilities []*string `type:"list" enum:"NumberCapability"` // The phone number. - PhoneNumber *string `type:"string"` + // + // PhoneNumber is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PhoneNumberInformation's + // String and GoString methods. + PhoneNumber *string `type:"string" sensitive:"true"` // The list of supported routes. RouteType *string `type:"string" enum:"RouteType"` @@ -8057,7 +8089,11 @@ type PublishInput struct { // // If you don't specify a value for the PhoneNumber parameter, you must specify // a value for the TargetArn or TopicArn parameters. - PhoneNumber *string `type:"string"` + // + // PhoneNumber is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PublishInput's + // String and GoString methods. + PhoneNumber *string `type:"string" sensitive:"true"` // Optional parameter to be used as the "Subject" line when the message is delivered // to email endpoints. This field will also be included, if present, in the @@ -8408,7 +8444,11 @@ type SMSSandboxPhoneNumber struct { _ struct{} `type:"structure"` // The destination phone number. - PhoneNumber *string `type:"string"` + // + // PhoneNumber is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SMSSandboxPhoneNumber's + // String and GoString methods. + PhoneNumber *string `type:"string" sensitive:"true"` // The destination phone number's verification status. Status *string `type:"string" enum:"SMSSandboxPhoneNumberVerificationStatus"` @@ -8551,7 +8591,13 @@ type SetPlatformApplicationAttributesInput struct { // service. For ADM, PlatformCredentialis client secret. For Apple Services // using certificate credentials, PlatformCredential is private key. For // Apple Services using token credentials, PlatformCredential is signing - // key. For GCM (Firebase Cloud Messaging), PlatformCredential is API key. + // key. For GCM (Firebase Cloud Messaging) using key credentials, there is + // no PlatformPrincipal. The PlatformCredential is API key. For GCM (Firebase + // Cloud Messaging) using token credentials, there is no PlatformPrincipal. + // The PlatformCredential is a JSON formatted private key file. When using + // the Amazon Web Services CLI, the file must be in string format and special + // characters must be ignored. To format the file correctly, Amazon SNS recommends + // using the following command: SERVICE_JSON=`jq @json <<< cat service.json`. // // * PlatformPrincipal – The principal received from the notification service. // For ADM, PlatformPrincipalis client id. For Apple Services using certificate @@ -9785,8 +9831,12 @@ type VerifySMSSandboxPhoneNumberInput struct { // The destination phone number to verify. // + // PhoneNumber is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by VerifySMSSandboxPhoneNumberInput's + // String and GoString methods. + // // PhoneNumber is a required field - PhoneNumber *string `type:"string" required:"true"` + PhoneNumber *string `type:"string" required:"true" sensitive:"true"` } // String returns the string representation.