diff --git a/CHANGELOG.md b/CHANGELOG.md index e0ebf8a4e21..8dc4f3666d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +Release v1.44.309 (2023-07-26) +=== + +### Service Client Updates +* `service/cloudcontrol`: Updates service documentation +* `service/entityresolution`: Adds new service +* `service/glue`: Updates service API and documentation + * Release Glue Studio Snowflake Connector Node for SDK/CLI +* `service/healthlake`: Updates service documentation +* `service/managedblockchain-query`: Adds new service +* `service/mediaconvert`: Updates service API and documentation + * This release includes general updates to user documentation. +* `service/omics`: Updates service documentation +* `service/opensearchserverless`: Updates service API and documentation +* `service/polly`: Updates service API + * Amazon Polly adds 1 new voice - Lisa (nl-BE) +* `service/route53`: Updates service documentation + * Update that corrects the documents for received feedback. + Release v1.44.308 (2023-07-25) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index f0f75411f07..f8ab8850151 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -25190,7 +25190,7 @@ var awsPartition = partition{ Region: "af-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.af-south-1.amazonaws.com", + Hostname: "servicediscovery.af-south-1.api.aws", }, endpointKey{ Region: "ap-east-1", @@ -25199,7 +25199,7 @@ var awsPartition = partition{ Region: "ap-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-east-1.amazonaws.com", + Hostname: "servicediscovery.ap-east-1.api.aws", }, endpointKey{ Region: "ap-northeast-1", @@ -25208,7 +25208,7 @@ var awsPartition = partition{ Region: "ap-northeast-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-1.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-1.api.aws", }, endpointKey{ Region: "ap-northeast-2", @@ -25217,7 +25217,7 @@ var awsPartition = partition{ Region: "ap-northeast-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-2.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-2.api.aws", }, endpointKey{ Region: "ap-northeast-3", @@ -25226,7 +25226,7 @@ var awsPartition = partition{ Region: "ap-northeast-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-3.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-3.api.aws", }, endpointKey{ Region: "ap-south-1", @@ -25235,7 +25235,7 @@ var awsPartition = partition{ Region: "ap-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-south-1.amazonaws.com", + Hostname: "servicediscovery.ap-south-1.api.aws", }, endpointKey{ Region: "ap-south-2", @@ -25244,7 +25244,7 @@ var awsPartition = partition{ Region: "ap-south-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-south-2.amazonaws.com", + Hostname: "servicediscovery.ap-south-2.api.aws", }, endpointKey{ Region: "ap-southeast-1", @@ -25253,7 +25253,7 @@ var awsPartition = partition{ Region: "ap-southeast-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-1.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-1.api.aws", }, endpointKey{ Region: "ap-southeast-2", @@ -25262,7 +25262,7 @@ var awsPartition = partition{ Region: "ap-southeast-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-2.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-2.api.aws", }, endpointKey{ Region: "ap-southeast-3", @@ -25271,7 +25271,7 @@ var awsPartition = partition{ Region: "ap-southeast-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-3.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-3.api.aws", }, endpointKey{ Region: "ap-southeast-4", @@ -25280,7 +25280,7 @@ var awsPartition = partition{ Region: "ap-southeast-4", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-4.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-4.api.aws", }, endpointKey{ Region: "ca-central-1", @@ -25289,7 +25289,7 @@ var awsPartition = partition{ Region: "ca-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ca-central-1.amazonaws.com", + Hostname: "servicediscovery.ca-central-1.api.aws", }, endpointKey{ Region: "ca-central-1", @@ -25297,6 +25297,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1-fips", }: endpoint{ @@ -25313,7 +25319,7 @@ var awsPartition = partition{ Region: "eu-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-central-1.amazonaws.com", + Hostname: "servicediscovery.eu-central-1.api.aws", }, endpointKey{ Region: "eu-central-2", @@ -25322,7 +25328,7 @@ var awsPartition = partition{ Region: "eu-central-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-central-2.amazonaws.com", + Hostname: "servicediscovery.eu-central-2.api.aws", }, endpointKey{ Region: "eu-north-1", @@ -25331,7 +25337,7 @@ var awsPartition = partition{ Region: "eu-north-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-north-1.amazonaws.com", + Hostname: "servicediscovery.eu-north-1.api.aws", }, endpointKey{ Region: "eu-south-1", @@ -25340,7 +25346,7 @@ var awsPartition = partition{ Region: "eu-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-south-1.amazonaws.com", + Hostname: "servicediscovery.eu-south-1.api.aws", }, endpointKey{ Region: "eu-south-2", @@ -25349,7 +25355,7 @@ var awsPartition = partition{ Region: "eu-south-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-south-2.amazonaws.com", + Hostname: "servicediscovery.eu-south-2.api.aws", }, endpointKey{ Region: "eu-west-1", @@ -25358,7 +25364,7 @@ var awsPartition = partition{ Region: "eu-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-1.amazonaws.com", + Hostname: "servicediscovery.eu-west-1.api.aws", }, endpointKey{ Region: "eu-west-2", @@ -25367,7 +25373,7 @@ var awsPartition = partition{ Region: "eu-west-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-2.amazonaws.com", + Hostname: "servicediscovery.eu-west-2.api.aws", }, endpointKey{ Region: "eu-west-3", @@ -25376,7 +25382,7 @@ var awsPartition = partition{ Region: "eu-west-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-3.amazonaws.com", + Hostname: "servicediscovery.eu-west-3.api.aws", }, endpointKey{ Region: "me-central-1", @@ -25385,7 +25391,7 @@ var awsPartition = partition{ Region: "me-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.me-central-1.amazonaws.com", + Hostname: "servicediscovery.me-central-1.api.aws", }, endpointKey{ Region: "me-south-1", @@ -25394,7 +25400,7 @@ var awsPartition = partition{ Region: "me-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.me-south-1.amazonaws.com", + Hostname: "servicediscovery.me-south-1.api.aws", }, endpointKey{ Region: "sa-east-1", @@ -25403,7 +25409,7 @@ var awsPartition = partition{ Region: "sa-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.sa-east-1.amazonaws.com", + Hostname: "servicediscovery.sa-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -25412,7 +25418,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-east-1.amazonaws.com", + Hostname: "servicediscovery.us-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -25420,6 +25426,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1-fips", }: endpoint{ @@ -25436,7 +25448,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-east-2.amazonaws.com", + Hostname: "servicediscovery.us-east-2.api.aws", }, endpointKey{ Region: "us-east-2", @@ -25444,6 +25456,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2-fips", }: endpoint{ @@ -25460,7 +25478,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-west-1.amazonaws.com", + Hostname: "servicediscovery.us-west-1.api.aws", }, endpointKey{ Region: "us-west-1", @@ -25468,6 +25486,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1-fips", }: endpoint{ @@ -25484,7 +25508,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-west-2.amazonaws.com", + Hostname: "servicediscovery.us-west-2.api.aws", }, endpointKey{ Region: "us-west-2", @@ -25492,6 +25516,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2-fips", }: endpoint{ @@ -32688,7 +32718,7 @@ var awscnPartition = partition{ Region: "cn-north-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn", + Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", }, endpointKey{ Region: "cn-northwest-1", @@ -32697,7 +32727,7 @@ var awscnPartition = partition{ Region: "cn-northwest-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn", + Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", }, }, }, @@ -38100,6 +38130,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-east-1-fips", }: endpoint{ @@ -38124,6 +38160,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1-fips", }: endpoint{ diff --git a/aws/version.go b/aws/version.go index 17983f5b599..ef626eaea2e 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.308" +const SDKVersion = "1.44.309" diff --git a/models/apis/cloudcontrol/2021-09-30/docs-2.json b/models/apis/cloudcontrol/2021-09-30/docs-2.json index d39b4967e46..0e665d15857 100644 --- a/models/apis/cloudcontrol/2021-09-30/docs-2.json +++ b/models/apis/cloudcontrol/2021-09-30/docs-2.json @@ -264,7 +264,7 @@ "Properties": { "base": null, "refs": { - "CreateResourceInput$DesiredState": "

Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values.

Cloud Control API currently supports JSON as a structured data format.

 <p>Specify the desired state as one of the following:</p> <ul> <li> <p>A JSON blob</p> </li> <li> <p>A local path containing the desired state in JSON data format</p> </li> </ul> <p>For more information, see <a href="https://docs.aws.amazon.com/cloudcontrolapi/latest/userguide/resource-operations-create.html#resource-operations-create-desiredstate">Composing the desired state of the resource</a> in the <i>Amazon Web Services Cloud Control API User Guide</i>.</p> <p>For more information about the properties of a specific resource, refer to the related topic for the resource in the <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html">Resource and property types reference</a> in the <i>CloudFormation Users Guide</i>.</p> 
", + "CreateResourceInput$DesiredState": "

Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values.

Cloud Control API currently supports JSON as a structured data format.

Specify the desired state as one of the following:

For more information, see Composing the desired state of the resource in the Amazon Web Services Cloud Control API User Guide.

For more information about the properties of a specific resource, refer to the related topic for the resource in the Resource and property types reference in the CloudFormation Users Guide.

", "ListResourcesInput$ResourceModel": "

The resource model to use to select the resources to return.

", "ProgressEvent$ResourceModel": "

A JSON string containing the resource model, consisting of each resource property and its current value.

", "ResourceDescription$Properties": "

A list of the resource properties and their current values.

" diff --git a/models/apis/cloudcontrol/2021-09-30/endpoint-rule-set-1.json b/models/apis/cloudcontrol/2021-09-30/endpoint-rule-set-1.json new file mode 100644 index 00000000000..1812986e28a --- /dev/null +++ b/models/apis/cloudcontrol/2021-09-30/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/cloudcontrol/2021-09-30/endpoint-tests-1.json b/models/apis/cloudcontrol/2021-09-30/endpoint-tests-1.json new file mode 100644 index 00000000000..d21e7072890 --- /dev/null +++ b/models/apis/cloudcontrol/2021-09-30/endpoint-tests-1.json @@ -0,0 +1,678 @@ +{ + "testCases": [ + { + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.af-south-1.amazonaws.com" + } + }, + "params": { + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ap-east-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ap-northeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ap-northeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ap-south-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ap-southeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ap-southeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ap-southeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.eu-central-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.eu-north-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.eu-south-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.eu-west-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.eu-west-3.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.me-south-1.amazonaws.com" + } + }, + "params": { + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.sa-east-1.amazonaws.com" + } + }, + "params": { + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudcontrolapi.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/entityresolution/2018-05-10/api-2.json b/models/apis/entityresolution/2018-05-10/api-2.json new file mode 100644 index 00000000000..0e7ef7a3704 --- /dev/null +++ b/models/apis/entityresolution/2018-05-10/api-2.json @@ -0,0 +1,1153 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"entityresolution", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AWSEntityResolution", + "serviceFullName":"AWS EntityResolution", + "serviceId":"EntityResolution", + "signatureVersion":"v4", + "signingName":"entityresolution", + "uid":"entityresolution-2018-05-10" + }, + "operations":{ + "CreateMatchingWorkflow":{ + "name":"CreateMatchingWorkflow", + "http":{ + "method":"POST", + "requestUri":"/matchingworkflows", + "responseCode":200 + }, + "input":{"shape":"CreateMatchingWorkflowInput"}, + "output":{"shape":"CreateMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ExceedsLimitException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ] + }, + "CreateSchemaMapping":{ + "name":"CreateSchemaMapping", + "http":{ + "method":"POST", + "requestUri":"/schemas", + "responseCode":200 + }, + "input":{"shape":"CreateSchemaMappingInput"}, + "output":{"shape":"CreateSchemaMappingOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ExceedsLimitException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ] + }, + "DeleteMatchingWorkflow":{ + "name":"DeleteMatchingWorkflow", + "http":{ + "method":"DELETE", + "requestUri":"/matchingworkflows/{workflowName}", + "responseCode":200 + }, + "input":{"shape":"DeleteMatchingWorkflowInput"}, + "output":{"shape":"DeleteMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "idempotent":true + }, + "DeleteSchemaMapping":{ + "name":"DeleteSchemaMapping", + "http":{ + "method":"DELETE", + "requestUri":"/schemas/{schemaName}", + "responseCode":200 + }, + "input":{"shape":"DeleteSchemaMappingInput"}, + "output":{"shape":"DeleteSchemaMappingOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "idempotent":true + }, + "GetMatchId":{ + "name":"GetMatchId", + "http":{ + "method":"POST", + "requestUri":"/matchingworkflows/{workflowName}/matches", + "responseCode":200 + }, + "input":{"shape":"GetMatchIdInput"}, + "output":{"shape":"GetMatchIdOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, + "GetMatchingJob":{ + "name":"GetMatchingJob", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}/jobs/{jobId}", + "responseCode":200 + }, + "input":{"shape":"GetMatchingJobInput"}, + "output":{"shape":"GetMatchingJobOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, + "GetMatchingWorkflow":{ + "name":"GetMatchingWorkflow", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}", + "responseCode":200 + }, + "input":{"shape":"GetMatchingWorkflowInput"}, + "output":{"shape":"GetMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, + "GetSchemaMapping":{ + "name":"GetSchemaMapping", + "http":{ + "method":"GET", + "requestUri":"/schemas/{schemaName}", + "responseCode":200 + }, + "input":{"shape":"GetSchemaMappingInput"}, + "output":{"shape":"GetSchemaMappingOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, + "ListMatchingJobs":{ + "name":"ListMatchingJobs", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}/jobs", + "responseCode":200 + }, + "input":{"shape":"ListMatchingJobsInput"}, + "output":{"shape":"ListMatchingJobsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, + "ListMatchingWorkflows":{ + "name":"ListMatchingWorkflows", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows", + "responseCode":200 + }, + "input":{"shape":"ListMatchingWorkflowsInput"}, + "output":{"shape":"ListMatchingWorkflowsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, + "ListSchemaMappings":{ + "name":"ListSchemaMappings", + "http":{ + "method":"GET", + "requestUri":"/schemas", + "responseCode":200 + }, + "input":{"shape":"ListSchemaMappingsInput"}, + "output":{"shape":"ListSchemaMappingsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ] + }, + "StartMatchingJob":{ + "name":"StartMatchingJob", + "http":{ + "method":"POST", + "requestUri":"/matchingworkflows/{workflowName}/jobs", + "responseCode":200 + }, + "input":{"shape":"StartMatchingJobInput"}, + "output":{"shape":"StartMatchingJobOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ExceedsLimitException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, + "UpdateMatchingWorkflow":{ + "name":"UpdateMatchingWorkflow", + "http":{ + "method":"PUT", + "requestUri":"/matchingworkflows/{workflowName}", + "responseCode":200 + }, + "input":{"shape":"UpdateMatchingWorkflowInput"}, + "output":{"shape":"UpdateMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AttributeMatchingModel":{ + "type":"string", + "enum":[ + "ONE_TO_ONE", + "MANY_TO_MANY" + ] + }, + "AttributeName":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9- \\t]*$" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CreateMatchingWorkflowInput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowName" + ], + "members":{ + "description":{"shape":"Description"}, + "incrementalRunConfig":{"shape":"IncrementalRunConfig"}, + "inputSourceConfig":{"shape":"InputSourceConfig"}, + "outputSourceConfig":{"shape":"OutputSourceConfig"}, + "resolutionTechniques":{"shape":"ResolutionTechniques"}, + "roleArn":{"shape":"String"}, + "tags":{"shape":"TagMap"}, + "workflowName":{"shape":"EntityName"} + } + }, + "CreateMatchingWorkflowOutput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowArn", + "workflowName" + ], + "members":{ + "description":{"shape":"Description"}, + "incrementalRunConfig":{"shape":"IncrementalRunConfig"}, + "inputSourceConfig":{"shape":"InputSourceConfig"}, + "outputSourceConfig":{"shape":"OutputSourceConfig"}, + "resolutionTechniques":{"shape":"ResolutionTechniques"}, + "roleArn":{"shape":"String"}, + "workflowArn":{"shape":"MatchingWorkflowArn"}, + "workflowName":{"shape":"EntityName"} + } + }, + "CreateSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "description":{"shape":"Description"}, + "mappedInputFields":{"shape":"SchemaInputAttributes"}, + "schemaName":{"shape":"EntityName"}, + "tags":{"shape":"TagMap"} + } + }, + "CreateSchemaMappingOutput":{ + "type":"structure", + "required":[ + "description", + "mappedInputFields", + "schemaArn", + "schemaName" + ], + "members":{ + "description":{"shape":"Description"}, + "mappedInputFields":{"shape":"SchemaInputAttributes"}, + "schemaArn":{"shape":"SchemaMappingArn"}, + "schemaName":{"shape":"EntityName"} + } + }, + "DeleteMatchingWorkflowInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "workflowName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "DeleteMatchingWorkflowOutput":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + } + }, + "DeleteSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "schemaName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"schemaName" + } + } + }, + "DeleteSchemaMappingOutput":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + } + }, + "Description":{ + "type":"string", + "max":255, + "min":0 + }, + "EntityName":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9-]*$" + }, + "ErrorDetails":{ + "type":"structure", + "members":{ + "errorMessage":{"shape":"ErrorMessage"} + } + }, + "ErrorMessage":{ + "type":"string", + "max":2048, + "min":1 + }, + "ExceedsLimitException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "GetMatchIdInput":{ + "type":"structure", + "required":[ + "record", + "workflowName" + ], + "members":{ + "record":{"shape":"RecordAttributeMap"}, + "workflowName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchIdOutput":{ + "type":"structure", + "members":{ + "matchId":{"shape":"String"} + } + }, + "GetMatchingJobInput":{ + "type":"structure", + "required":[ + "jobId", + "workflowName" + ], + "members":{ + "jobId":{ + "shape":"JobId", + "location":"uri", + "locationName":"jobId" + }, + "workflowName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchingJobOutput":{ + "type":"structure", + "required":[ + "jobId", + "startTime", + "status" + ], + "members":{ + "endTime":{"shape":"Timestamp"}, + "errorDetails":{"shape":"ErrorDetails"}, + "jobId":{"shape":"JobId"}, + "metrics":{"shape":"JobMetrics"}, + "startTime":{"shape":"Timestamp"}, + "status":{"shape":"JobStatus"} + } + }, + "GetMatchingWorkflowInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "workflowName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchingWorkflowOutput":{ + "type":"structure", + "required":[ + "createdAt", + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "updatedAt", + "workflowArn", + "workflowName" + ], + "members":{ + "createdAt":{"shape":"Timestamp"}, + "description":{"shape":"Description"}, + "incrementalRunConfig":{"shape":"IncrementalRunConfig"}, + "inputSourceConfig":{"shape":"InputSourceConfig"}, + "outputSourceConfig":{"shape":"OutputSourceConfig"}, + "resolutionTechniques":{"shape":"ResolutionTechniques"}, + "roleArn":{"shape":"String"}, + "tags":{"shape":"TagMap"}, + "updatedAt":{"shape":"Timestamp"}, + "workflowArn":{"shape":"MatchingWorkflowArn"}, + "workflowName":{"shape":"EntityName"} + } + }, + "GetSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "schemaName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"schemaName" + } + } + }, + "GetSchemaMappingOutput":{ + "type":"structure", + "required":[ + "createdAt", + "mappedInputFields", + "schemaArn", + "schemaName", + "updatedAt" + ], + "members":{ + "createdAt":{"shape":"Timestamp"}, + "description":{"shape":"Description"}, + "mappedInputFields":{"shape":"SchemaInputAttributes"}, + "schemaArn":{"shape":"SchemaMappingArn"}, + "schemaName":{"shape":"EntityName"}, + "tags":{"shape":"TagMap"}, + "updatedAt":{"shape":"Timestamp"} + } + }, + "IncrementalRunConfig":{ + "type":"structure", + "members":{ + "incrementalRunType":{"shape":"IncrementalRunType"} + } + }, + "IncrementalRunType":{ + "type":"string", + "enum":["IMMEDIATE"] + }, + "InputSource":{ + "type":"structure", + "required":[ + "inputSourceARN", + "schemaName" + ], + "members":{ + "applyNormalization":{"shape":"Boolean"}, + "inputSourceARN":{"shape":"InputSourceInputSourceARNString"}, + "schemaName":{"shape":"EntityName"} + } + }, + "InputSourceConfig":{ + "type":"list", + "member":{"shape":"InputSource"}, + "max":20, + "min":1 + }, + "InputSourceInputSourceARNString":{ + "type":"string", + "pattern":"^arn:aws:.*:.*:[0-9]+:.*$" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "JobId":{ + "type":"string", + "pattern":"^[a-f0-9]{32}$" + }, + "JobList":{ + "type":"list", + "member":{"shape":"JobSummary"} + }, + "JobMetrics":{ + "type":"structure", + "members":{ + "inputRecords":{"shape":"Integer"}, + "matchIDs":{"shape":"Integer"}, + "recordsNotProcessed":{"shape":"Integer"}, + "totalRecordsProcessed":{"shape":"Integer"} + } + }, + "JobStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED", + "QUEUED" + ] + }, + "JobSummary":{ + "type":"structure", + "required":[ + "jobId", + "startTime", + "status" + ], + "members":{ + "endTime":{"shape":"Timestamp"}, + "jobId":{"shape":"JobId"}, + "startTime":{"shape":"Timestamp"}, + "status":{"shape":"JobStatus"} + } + }, + "KMSArn":{ + "type":"string", + "pattern":"^arn:aws:kms:.*:[0-9]+:.*$" + }, + "ListMatchingJobsInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "maxResults":{ + "shape":"ListMatchingJobsInputMaxResultsInteger", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "workflowName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "ListMatchingJobsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListMatchingJobsOutput":{ + "type":"structure", + "members":{ + "jobs":{"shape":"JobList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListMatchingWorkflowsInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListMatchingWorkflowsInputMaxResultsInteger", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListMatchingWorkflowsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25 + }, + "ListMatchingWorkflowsOutput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"}, + "workflowSummaries":{"shape":"MatchingWorkflowList"} + } + }, + "ListSchemaMappingsInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListSchemaMappingsInputMaxResultsInteger", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSchemaMappingsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25 + }, + "ListSchemaMappingsOutput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"}, + "schemaList":{"shape":"SchemaMappingList"} + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"VeniceGlobalArn", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{"shape":"TagMap"} + } + }, + "MatchingWorkflowArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-us-gov|aws-cn):entityresolution:.*:[0-9]+:(matchingworkflow/.*)$" + }, + "MatchingWorkflowList":{ + "type":"list", + "member":{"shape":"MatchingWorkflowSummary"} + }, + "MatchingWorkflowSummary":{ + "type":"structure", + "required":[ + "createdAt", + "updatedAt", + "workflowArn", + "workflowName" + ], + "members":{ + "createdAt":{"shape":"Timestamp"}, + "updatedAt":{"shape":"Timestamp"}, + "workflowArn":{"shape":"MatchingWorkflowArn"}, + "workflowName":{"shape":"EntityName"} + } + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[a-zA-Z_0-9-=+/]*$" + }, + "OutputAttribute":{ + "type":"structure", + "required":["name"], + "members":{ + "hashed":{"shape":"Boolean"}, + "name":{"shape":"AttributeName"} + } + }, + "OutputSource":{ + "type":"structure", + "required":[ + "output", + "outputS3Path" + ], + "members":{ + "KMSArn":{"shape":"KMSArn"}, + "applyNormalization":{"shape":"Boolean"}, + "output":{"shape":"OutputSourceOutputList"}, + "outputS3Path":{"shape":"OutputSourceOutputS3PathString"} + } + }, + "OutputSourceConfig":{ + "type":"list", + "member":{"shape":"OutputSource"}, + "max":1, + "min":1 + }, + "OutputSourceOutputList":{ + "type":"list", + "member":{"shape":"OutputAttribute"}, + "max":750, + "min":0 + }, + "OutputSourceOutputS3PathString":{ + "type":"string", + "pattern":"^s3://([^/]+)/?(.*?([^/]+)/?)$" + }, + "RecordAttributeMap":{ + "type":"map", + "key":{"shape":"RecordAttributeMapKeyString"}, + "value":{"shape":"RecordAttributeMapValueString"}, + "sensitive":true + }, + "RecordAttributeMapKeyString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9- \\t]*$" + }, + "RecordAttributeMapValueString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9-.@ ()+\\t]*$" + }, + "ResolutionTechniques":{ + "type":"structure", + "members":{ + "resolutionType":{"shape":"ResolutionType"}, + "ruleBasedProperties":{"shape":"RuleBasedProperties"} + } + }, + "ResolutionType":{ + "type":"string", + "enum":[ + "RULE_MATCHING", + "ML_MATCHING" + ] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "Rule":{ + "type":"structure", + "required":[ + "matchingKeys", + "ruleName" + ], + "members":{ + "matchingKeys":{"shape":"RuleMatchingKeysList"}, + "ruleName":{"shape":"RuleRuleNameString"} + } + }, + "RuleBasedProperties":{ + "type":"structure", + "required":[ + "attributeMatchingModel", + "rules" + ], + "members":{ + "attributeMatchingModel":{"shape":"AttributeMatchingModel"}, + "rules":{"shape":"RuleBasedPropertiesRulesList"} + } + }, + "RuleBasedPropertiesRulesList":{ + "type":"list", + "member":{"shape":"Rule"}, + "max":15, + "min":1 + }, + "RuleMatchingKeysList":{ + "type":"list", + "member":{"shape":"AttributeName"}, + "max":15, + "min":1 + }, + "RuleRuleNameString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9- \\t]*$" + }, + "SchemaAttributeType":{ + "type":"string", + "enum":[ + "NAME", + "NAME_FIRST", + "NAME_MIDDLE", + "NAME_LAST", + "ADDRESS", + "ADDRESS_STREET1", + "ADDRESS_STREET2", + "ADDRESS_STREET3", + "ADDRESS_CITY", + "ADDRESS_STATE", + "ADDRESS_COUNTRY", + "ADDRESS_POSTALCODE", + "PHONE", + "PHONE_NUMBER", + "PHONE_COUNTRYCODE", + "EMAIL_ADDRESS", + "UNIQUE_ID", + "DATE", + "STRING" + ] + }, + "SchemaInputAttribute":{ + "type":"structure", + "required":[ + "fieldName", + "type" + ], + "members":{ + "fieldName":{"shape":"AttributeName"}, + "groupName":{"shape":"AttributeName"}, + "matchKey":{"shape":"AttributeName"}, + "type":{"shape":"SchemaAttributeType"} + } + }, + "SchemaInputAttributes":{ + "type":"list", + "member":{"shape":"SchemaInputAttribute"}, + "max":25, + "min":2 + }, + "SchemaMappingArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-us-gov|aws-cn):entityresolution:.*:[0-9]+:(schemamapping/.*)$" + }, + "SchemaMappingList":{ + "type":"list", + "member":{"shape":"SchemaMappingSummary"} + }, + "SchemaMappingSummary":{ + "type":"structure", + "required":[ + "createdAt", + "schemaArn", + "schemaName", + "updatedAt" + ], + "members":{ + "createdAt":{"shape":"Timestamp"}, + "schemaArn":{"shape":"SchemaMappingArn"}, + "schemaName":{"shape":"EntityName"}, + "updatedAt":{"shape":"Timestamp"} + } + }, + "StartMatchingJobInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "workflowName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "StartMatchingJobOutput":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{"shape":"JobId"} + } + }, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"VeniceGlobalArn", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{"shape":"TagMap"} + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"VeniceGlobalArn", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateMatchingWorkflowInput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowName" + ], + "members":{ + "description":{"shape":"Description"}, + "incrementalRunConfig":{"shape":"IncrementalRunConfig"}, + "inputSourceConfig":{"shape":"InputSourceConfig"}, + "outputSourceConfig":{"shape":"OutputSourceConfig"}, + "resolutionTechniques":{"shape":"ResolutionTechniques"}, + "roleArn":{"shape":"String"}, + "workflowName":{ + "shape":"EntityName", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "UpdateMatchingWorkflowOutput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowName" + ], + "members":{ + "description":{"shape":"Description"}, + "incrementalRunConfig":{"shape":"IncrementalRunConfig"}, + "inputSourceConfig":{"shape":"InputSourceConfig"}, + "outputSourceConfig":{"shape":"OutputSourceConfig"}, + "resolutionTechniques":{"shape":"ResolutionTechniques"}, + "roleArn":{"shape":"String"}, + "workflowName":{"shape":"EntityName"} + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "VeniceGlobalArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-us-gov|aws-cn):(entityresolution):.*:[0-9]+:((schemamapping|matchingworkflow)/[a-zA-Z0-9_-]+)$" + } + } +} diff --git a/models/apis/entityresolution/2018-05-10/docs-2.json b/models/apis/entityresolution/2018-05-10/docs-2.json new file mode 100644 index 00000000000..4200b328b9b --- /dev/null +++ b/models/apis/entityresolution/2018-05-10/docs-2.json @@ -0,0 +1,641 @@ +{ + "version": "2.0", + "service": "

Welcome to the AWS Entity Resolution API Reference.

AWS Entity Resolution is an AWS service that provides pre-configured entity resolution capabilities that enable developers and analysts at advertising and marketing companies to build an accurate and complete view of their consumers.

With AWS Entity Resolution, you have the ability to match source records containing consumer identifiers, such as name, email address, and phone number. This holds true even when these records have incomplete or conflicting identifiers. For example, AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system, which includes account information like first name, last name, postal address, phone number, and email address, with a source record from a marketing system containing campaign information, such as username and email address.

To learn more about AWS Entity Resolution concepts, procedures, and best practices, see the AWS Entity Resolution User Guide.

", + "operations": { + "CreateMatchingWorkflow": "

Creates a MatchingWorkflow object which stores the configuration of the data processing job to be run. It is important to note that there should not be a pre-existing MatchingWorkflow with the same name. To modify an existing workflow, utilize the UpdateMatchingWorkflow API.

", + "CreateSchemaMapping": "

Creates a schema mapping, which defines the schema of the input customer records table. The SchemaMapping also provides Entity Resolution with some metadata about the table, such as the attribute types of the columns and which columns to match on.

", + "DeleteMatchingWorkflow": "

Deletes the MatchingWorkflow with a given name. This operation will succeed even if a workflow with the given name does not exist.

", + "DeleteSchemaMapping": "

Deletes the SchemaMapping with a given name. This operation will succeed even if a schema with the given name does not exist. This operation will fail if there is a DataIntegrationWorkflow object that references the SchemaMapping in the workflow's InputSourceConfig.

", + "GetMatchId": "

Returns the corresponding Match ID of a customer record if the record has been processed.

", + "GetMatchingJob": "

Gets the status, metrics, and errors (if there are any) that are associated with a job.

", + "GetMatchingWorkflow": "

Returns the MatchingWorkflow with a given name, if it exists.

", + "GetSchemaMapping": "

Returns the SchemaMapping of a given name.

", + "ListMatchingJobs": "

Lists all jobs for a given workflow.

", + "ListMatchingWorkflows": "

Returns a list of all the MatchingWorkflows that have been created for an AWS account.

", + "ListSchemaMappings": "

Returns a list of all the SchemaMappings that have been created for an AWS account.

", + "ListTagsForResource": "

Displays the tags associated with an AWS Entity Resolution resource. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged.

", + "StartMatchingJob": "

Starts the MatchingJob of a workflow. The workflow must have previously been created using the CreateMatchingWorkflow endpoint.

", + "TagResource": "

Assigns one or more tags (key-value pairs) to the specified AWS Entity Resolution resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged. Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters. You can use the TagResource action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

", + "UntagResource": "

Removes one or more tags from the specified AWS Entity Resolution resource. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged.

", + "UpdateMatchingWorkflow": "

Updates an existing MatchingWorkflow. This method is identical to CreateMatchingWorkflow, except it uses an HTTP PUT request instead of a POST request, and the MatchingWorkflow must already exist for the method to succeed.

" + }, + "shapes": { + "AccessDeniedException": { + "base": "

You do not have sufficient access to perform this action. HTTP Status Code: 403

", + "refs": { + } + }, + "AttributeMatchingModel": { + "base": null, + "refs": { + "RuleBasedProperties$attributeMatchingModel": "

You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. When choosing MANY_TO_MANY, the system can match attribute across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email type. When choosing ONE_TO_ONE the system can only match if the sub-types are exact matches. For example, only when the value of the Email field of Profile A and the value of the Email field of Profile B matches, the two profiles are matched on the Email type.

" + } + }, + "AttributeName": { + "base": null, + "refs": { + "OutputAttribute$name": "

A name of a column to be written to the output. This must be an InputField name in the schema mapping.

", + "RuleMatchingKeysList$member": null, + "SchemaInputAttribute$fieldName": "

A string containing the field name.

", + "SchemaInputAttribute$groupName": "

Instruct Entity Resolution to combine several columns into a unified column with the identical attribute type. For example, when working with columns such as first_name, middle_name, and last_name, assigning them a common GroupName will prompt Entity Resolution to concatenate them into a single value.

", + "SchemaInputAttribute$matchKey": "

A key that allows grouping of multiple input attributes into a unified matching group. For example, let's consider a scenario where the source table contains various addresses, such as business_address and shipping_address. By assigning the MatchKey Address' to both attributes, Entity Resolution will match records across these fields to create a consolidated matching group. If no MatchKey is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.

" + } + }, + "Boolean": { + "base": null, + "refs": { + "InputSource$applyNormalization": "

Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in the input table is in a format of 1234567890, Entity Resolution will normalize this field in the output to (123)-456-7890.

", + "OutputAttribute$hashed": "

Enables the ability to hash the column values in the output.

", + "OutputSource$applyNormalization": "

Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in the input table is in a format of 1234567890, Entity Resolution will normalize this field in the output to (123)-456-7890.

" + } + }, + "ConflictException": { + "base": "

The request could not be processed because of conflict in the current state of the resource. Example: Workflow already exists, Schema already exists, Workflow is currently running, etc. HTTP Status Code: 400

", + "refs": { + } + }, + "CreateMatchingWorkflowInput": { + "base": null, + "refs": { + } + }, + "CreateMatchingWorkflowOutput": { + "base": null, + "refs": { + } + }, + "CreateSchemaMappingInput": { + "base": null, + "refs": { + } + }, + "CreateSchemaMappingOutput": { + "base": null, + "refs": { + } + }, + "DeleteMatchingWorkflowInput": { + "base": null, + "refs": { + } + }, + "DeleteMatchingWorkflowOutput": { + "base": null, + "refs": { + } + }, + "DeleteSchemaMappingInput": { + "base": null, + "refs": { + } + }, + "DeleteSchemaMappingOutput": { + "base": null, + "refs": { + } + }, + "Description": { + "base": null, + "refs": { + "CreateMatchingWorkflowInput$description": "

A description of the workflow.

", + "CreateMatchingWorkflowOutput$description": "

A description of the workflow.

", + "CreateSchemaMappingInput$description": "

A description of the schema.

", + "CreateSchemaMappingOutput$description": "

A description of the schema.

", + "GetMatchingWorkflowOutput$description": "

A description of the workflow.

", + "GetSchemaMappingOutput$description": "

A description of the schema.

", + "UpdateMatchingWorkflowInput$description": "

A description of the workflow.

", + "UpdateMatchingWorkflowOutput$description": "

A description of the workflow.

" + } + }, + "EntityName": { + "base": null, + "refs": { + "CreateMatchingWorkflowInput$workflowName": "

The name of the workflow. There cannot be multiple DataIntegrationWorkflows with the same name.

", + "CreateMatchingWorkflowOutput$workflowName": "

The name of the workflow.

", + "CreateSchemaMappingInput$schemaName": "

The name of the schema. There cannot be multiple SchemaMappings with the same name.

", + "CreateSchemaMappingOutput$schemaName": "

The name of the schema.

", + "DeleteMatchingWorkflowInput$workflowName": "

The name of the workflow to be retrieved.

", + "DeleteSchemaMappingInput$schemaName": "

The name of the schema to delete.

", + "GetMatchIdInput$workflowName": "

The name of the workflow.

", + "GetMatchingJobInput$workflowName": "

The name of the workflow.

", + "GetMatchingWorkflowInput$workflowName": "

The name of the workflow.

", + "GetMatchingWorkflowOutput$workflowName": "

The name of the workflow.

", + "GetSchemaMappingInput$schemaName": "

The name of the schema to be retrieved.

", + "GetSchemaMappingOutput$schemaName": "

The name of the schema.

", + "InputSource$schemaName": "

The name of the schema to be retrieved.

", + "ListMatchingJobsInput$workflowName": "

The name of the workflow to be retrieved.

", + "MatchingWorkflowSummary$workflowName": "

The name of the workflow.

", + "SchemaMappingSummary$schemaName": "

The name of the schema.

", + "StartMatchingJobInput$workflowName": "

The name of the matching job to be retrieved.

", + "UpdateMatchingWorkflowInput$workflowName": "

The name of the workflow to be retrieved.

", + "UpdateMatchingWorkflowOutput$workflowName": "

The name of the workflow.

" + } + }, + "ErrorDetails": { + "base": "

An object containing an error message, if there was an error.

", + "refs": { + "GetMatchingJobOutput$errorDetails": "

An object containing an error message, if there was an error.

" + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "AccessDeniedException$message": null, + "ConflictException$message": null, + "ErrorDetails$errorMessage": "

The error message from the job, if there is one.

", + "ExceedsLimitException$message": null, + "InternalServerException$message": null, + "ResourceNotFoundException$message": null, + "ThrottlingException$message": null, + "ValidationException$message": null + } + }, + "ExceedsLimitException": { + "base": "

The request was rejected because it attempted to create resources beyond the current AWS Entity Resolution account limits. The error message describes the limit exceeded. HTTP Status Code: 402

", + "refs": { + } + }, + "GetMatchIdInput": { + "base": null, + "refs": { + } + }, + "GetMatchIdOutput": { + "base": null, + "refs": { + } + }, + "GetMatchingJobInput": { + "base": null, + "refs": { + } + }, + "GetMatchingJobOutput": { + "base": null, + "refs": { + } + }, + "GetMatchingWorkflowInput": { + "base": null, + "refs": { + } + }, + "GetMatchingWorkflowOutput": { + "base": null, + "refs": { + } + }, + "GetSchemaMappingInput": { + "base": null, + "refs": { + } + }, + "GetSchemaMappingOutput": { + "base": null, + "refs": { + } + }, + "IncrementalRunConfig": { + "base": "

An object which defines an incremental run type and has only incrementalRunType as a field.

", + "refs": { + "CreateMatchingWorkflowInput$incrementalRunConfig": "

An object which defines an incremental run type and has only incrementalRunType as a field.

", + "CreateMatchingWorkflowOutput$incrementalRunConfig": "

An object which defines an incremental run type and has only incrementalRunType as a field.

", + "GetMatchingWorkflowOutput$incrementalRunConfig": "

An object which defines an incremental run type and has only incrementalRunType as a field.

", + "UpdateMatchingWorkflowInput$incrementalRunConfig": "

An object which defines an incremental run type and has only incrementalRunType as a field.

", + "UpdateMatchingWorkflowOutput$incrementalRunConfig": "

An object which defines an incremental run type and has only incrementalRunType as a field.

" + } + }, + "IncrementalRunType": { + "base": null, + "refs": { + "IncrementalRunConfig$incrementalRunType": "

The type of incremental run. It takes only one value: IMMEDIATE.

" + } + }, + "InputSource": { + "base": "

An object containing InputSourceARN, SchemaName, and ApplyNormalization.

", + "refs": { + "InputSourceConfig$member": null + } + }, + "InputSourceConfig": { + "base": null, + "refs": { + "CreateMatchingWorkflowInput$inputSourceConfig": "

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

", + "CreateMatchingWorkflowOutput$inputSourceConfig": "

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

", + "GetMatchingWorkflowOutput$inputSourceConfig": "

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

", + "UpdateMatchingWorkflowInput$inputSourceConfig": "

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

", + "UpdateMatchingWorkflowOutput$inputSourceConfig": "

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

" + } + }, + "InputSourceInputSourceARNString": { + "base": null, + "refs": { + "InputSource$inputSourceARN": "

An Glue table ARN for the input source table.

" + } + }, + "Integer": { + "base": null, + "refs": { + "JobMetrics$inputRecords": "

The total number of input records.

", + "JobMetrics$matchIDs": "

The total number of matchIDs generated.

", + "JobMetrics$recordsNotProcessed": "

The total number of records that did not get processed,

", + "JobMetrics$totalRecordsProcessed": "

The total number of records processed.

" + } + }, + "InternalServerException": { + "base": "

This exception occurs when there is an internal failure in the AWS Entity Resolution service. HTTP Status Code: 500

", + "refs": { + } + }, + "JobId": { + "base": null, + "refs": { + "GetMatchingJobInput$jobId": "

The ID of the job.

", + "GetMatchingJobOutput$jobId": "

The ID of the job.

", + "JobSummary$jobId": "

The ID of the job.

", + "StartMatchingJobOutput$jobId": "

The ID of the job.

" + } + }, + "JobList": { + "base": null, + "refs": { + "ListMatchingJobsOutput$jobs": "

A list of JobSummary objects, each of which contain the ID, status, start time, and end time of a job.

" + } + }, + "JobMetrics": { + "base": "

An object containing InputRecords, TotalRecordsProcessed, MatchIDs, and RecordsNotProcessed.

", + "refs": { + "GetMatchingJobOutput$metrics": "

Metrics associated with the execution, specifically total records processed, unique IDs generated, and records the execution skipped.

" + } + }, + "JobStatus": { + "base": null, + "refs": { + "GetMatchingJobOutput$status": "

The current status of the job. Either running, succeeded, queued, or failed.

", + "JobSummary$status": "

The current status of the job. Either running, succeeded, queued, or failed.

" + } + }, + "JobSummary": { + "base": "

An object containing the JobId, Status, StartTime, and EndTime of a job.

", + "refs": { + "JobList$member": null + } + }, + "KMSArn": { + "base": null, + "refs": { + "OutputSource$KMSArn": "

Customer KMS ARN for encryption at rest. If not provided, system will use an Entity Resolution managed KMS key.

" + } + }, + "ListMatchingJobsInput": { + "base": null, + "refs": { + } + }, + "ListMatchingJobsInputMaxResultsInteger": { + "base": null, + "refs": { + "ListMatchingJobsInput$maxResults": "

The maximum number of objects returned per page.

" + } + }, + "ListMatchingJobsOutput": { + "base": null, + "refs": { + } + }, + "ListMatchingWorkflowsInput": { + "base": null, + "refs": { + } + }, + "ListMatchingWorkflowsInputMaxResultsInteger": { + "base": null, + "refs": { + "ListMatchingWorkflowsInput$maxResults": "

The maximum number of objects returned per page.

" + } + }, + "ListMatchingWorkflowsOutput": { + "base": null, + "refs": { + } + }, + "ListSchemaMappingsInput": { + "base": null, + "refs": { + } + }, + "ListSchemaMappingsInputMaxResultsInteger": { + "base": null, + "refs": { + "ListSchemaMappingsInput$maxResults": "

The maximum number of objects returned per page.

" + } + }, + "ListSchemaMappingsOutput": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceInput": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceOutput": { + "base": null, + "refs": { + } + }, + "MatchingWorkflowArn": { + "base": null, + "refs": { + "CreateMatchingWorkflowOutput$workflowArn": "

The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.

", + "GetMatchingWorkflowOutput$workflowArn": "

The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.

", + "MatchingWorkflowSummary$workflowArn": "

The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.

" + } + }, + "MatchingWorkflowList": { + "base": null, + "refs": { + "ListMatchingWorkflowsOutput$workflowSummaries": "

A list of MatchingWorkflowSummary objects, each of which contain the fields WorkflowName, WorkflowArn, CreatedAt, and UpdatedAt.

" + } + }, + "MatchingWorkflowSummary": { + "base": "

A list of MatchingWorkflowSummary objects, each of which contain the fields WorkflowName, WorkflowArn, CreatedAt, UpdatedAt.

", + "refs": { + "MatchingWorkflowList$member": null + } + }, + "NextToken": { + "base": null, + "refs": { + "ListMatchingJobsInput$nextToken": "

The pagination token from the previous ListSchemaMappings API call.

", + "ListMatchingJobsOutput$nextToken": "

The pagination token from the previous ListSchemaMappings API call.

", + "ListMatchingWorkflowsInput$nextToken": "

The pagination token from the previous ListSchemaMappings API call.

", + "ListMatchingWorkflowsOutput$nextToken": "

The pagination token from the previous ListSchemaMappings API call.

", + "ListSchemaMappingsInput$nextToken": "

The pagination token from the previous ListSchemaMappings API call.

", + "ListSchemaMappingsOutput$nextToken": "

The pagination token from the previous ListDomains API call.

" + } + }, + "OutputAttribute": { + "base": "

A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.

", + "refs": { + "OutputSourceOutputList$member": null + } + }, + "OutputSource": { + "base": "

A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.

", + "refs": { + "OutputSourceConfig$member": null + } + }, + "OutputSourceConfig": { + "base": null, + "refs": { + "CreateMatchingWorkflowInput$outputSourceConfig": "

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

", + "CreateMatchingWorkflowOutput$outputSourceConfig": "

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

", + "GetMatchingWorkflowOutput$outputSourceConfig": "

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

", + "UpdateMatchingWorkflowInput$outputSourceConfig": "

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

", + "UpdateMatchingWorkflowOutput$outputSourceConfig": "

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

" + } + }, + "OutputSourceOutputList": { + "base": null, + "refs": { + "OutputSource$output": "

A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.

" + } + }, + "OutputSourceOutputS3PathString": { + "base": null, + "refs": { + "OutputSource$outputS3Path": "

The S3 path to which Entity Resolution will write the output table.

" + } + }, + "RecordAttributeMap": { + "base": null, + "refs": { + "GetMatchIdInput$record": "

The record to fetch the Match ID for.

" + } + }, + "RecordAttributeMapKeyString": { + "base": null, + "refs": { + "RecordAttributeMap$key": null + } + }, + "RecordAttributeMapValueString": { + "base": null, + "refs": { + "RecordAttributeMap$value": null + } + }, + "ResolutionTechniques": { + "base": "

An object which defines the resolutionType and the ruleBasedProperties

", + "refs": { + "CreateMatchingWorkflowInput$resolutionTechniques": "

An object which defines the resolutionType and the ruleBasedProperties

", + "CreateMatchingWorkflowOutput$resolutionTechniques": "

An object which defines the resolutionType and the ruleBasedProperties

", + "GetMatchingWorkflowOutput$resolutionTechniques": "

An object which defines the resolutionType and the ruleBasedProperties

", + "UpdateMatchingWorkflowInput$resolutionTechniques": "

An object which defines the resolutionType and the ruleBasedProperties

", + "UpdateMatchingWorkflowOutput$resolutionTechniques": "

An object which defines the resolutionType and the ruleBasedProperties

" + } + }, + "ResolutionType": { + "base": null, + "refs": { + "ResolutionTechniques$resolutionType": "

There are two types of matching, RULE_MATCHING and ML_MATCHING

" + } + }, + "ResourceNotFoundException": { + "base": "

The resource could not be found. HTTP Status Code: 404

", + "refs": { + } + }, + "Rule": { + "base": "

An object containing RuleName, and MatchingKeys.

", + "refs": { + "RuleBasedPropertiesRulesList$member": null + } + }, + "RuleBasedProperties": { + "base": "

An object which defines the list of matching rules to run and has a field Rules, which is a list of rule objects.

", + "refs": { + "ResolutionTechniques$ruleBasedProperties": "

An object which defines the list of matching rules to run and has a field Rules, which is a list of rule objects.

" + } + }, + "RuleBasedPropertiesRulesList": { + "base": null, + "refs": { + "RuleBasedProperties$rules": "

A list of Rule objects, each of which have fields RuleName and MatchingKeys.

" + } + }, + "RuleMatchingKeysList": { + "base": null, + "refs": { + "Rule$matchingKeys": "

A list of MatchingKeys. The MatchingKeys must have been defined in the SchemaMapping. Two records are considered to match according to this rule if all of the MatchingKeys match.

" + } + }, + "RuleRuleNameString": { + "base": null, + "refs": { + "Rule$ruleName": "

A name for the matching rule.

" + } + }, + "SchemaAttributeType": { + "base": null, + "refs": { + "SchemaInputAttribute$type": "

The type of the attribute, selected from a list of values.

" + } + }, + "SchemaInputAttribute": { + "base": "

An object containing FieldField, Type, GroupName, and MatchKey.

", + "refs": { + "SchemaInputAttributes$member": null + } + }, + "SchemaInputAttributes": { + "base": null, + "refs": { + "CreateSchemaMappingInput$mappedInputFields": "

A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information that Entity Resolution uses for matching.

", + "CreateSchemaMappingOutput$mappedInputFields": "

A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information that Entity Resolution uses for matching.

", + "GetSchemaMappingOutput$mappedInputFields": "

A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information Venice uses for matching.

" + } + }, + "SchemaMappingArn": { + "base": null, + "refs": { + "CreateSchemaMappingOutput$schemaArn": "

The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.

", + "GetSchemaMappingOutput$schemaArn": "

The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.

", + "SchemaMappingSummary$schemaArn": "

The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.

" + } + }, + "SchemaMappingList": { + "base": null, + "refs": { + "ListSchemaMappingsOutput$schemaList": "

A list of SchemaMappingSummary objects, each of which contain the fields SchemaName, SchemaArn, CreatedAt, UpdatedAt.

" + } + }, + "SchemaMappingSummary": { + "base": "

An object containing SchemaName, SchemaArn, CreatedAt, andUpdatedAt.

", + "refs": { + "SchemaMappingList$member": null + } + }, + "StartMatchingJobInput": { + "base": null, + "refs": { + } + }, + "StartMatchingJobOutput": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "CreateMatchingWorkflowInput$roleArn": "

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

", + "CreateMatchingWorkflowOutput$roleArn": "

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

", + "DeleteMatchingWorkflowOutput$message": "

A successful operation message.

", + "DeleteSchemaMappingOutput$message": "

A successful operation message.

", + "GetMatchIdOutput$matchId": "

The unique identifiers for this group of match records.

", + "GetMatchingWorkflowOutput$roleArn": "

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access resources on your behalf.

", + "UpdateMatchingWorkflowInput$roleArn": "

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

", + "UpdateMatchingWorkflowOutput$roleArn": "

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

" + } + }, + "TagKey": { + "base": null, + "refs": { + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceInput$tagKeys": "

The list of tag keys to remove from the resource.

" + } + }, + "TagMap": { + "base": null, + "refs": { + "CreateMatchingWorkflowInput$tags": "

The tags used to organize, track, or control access for this resource.

", + "CreateSchemaMappingInput$tags": "

The tags used to organize, track, or control access for this resource.

", + "GetMatchingWorkflowOutput$tags": "

The tags used to organize, track, or control access for this resource.

", + "GetSchemaMappingOutput$tags": "

The tags used to organize, track, or control access for this resource.

", + "ListTagsForResourceOutput$tags": "

The tags used to organize, track, or control access for this resource.

", + "TagResourceInput$tags": "

The tags used to organize, track, or control access for this resource.

" + } + }, + "TagResourceInput": { + "base": null, + "refs": { + } + }, + "TagResourceOutput": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, + "ThrottlingException": { + "base": "

The request was denied due to request throttling. HTTP Status Code: 429

", + "refs": { + } + }, + "Timestamp": { + "base": null, + "refs": { + "GetMatchingJobOutput$endTime": "

The time at which the job has finished.

", + "GetMatchingJobOutput$startTime": "

The time at which the job was started.

", + "GetMatchingWorkflowOutput$createdAt": "

The timestamp of when the workflow was created.

", + "GetMatchingWorkflowOutput$updatedAt": "

The timestamp of when the workflow was last updated.

", + "GetSchemaMappingOutput$createdAt": "

The timestamp of when the SchemaMapping was created.

", + "GetSchemaMappingOutput$updatedAt": "

The timestamp of when the SchemaMapping was last updated.

", + "JobSummary$endTime": "

The time at which the job has finished.

", + "JobSummary$startTime": "

The time at which the job was started.

", + "MatchingWorkflowSummary$createdAt": "

The timestamp of when the workflow was created.

", + "MatchingWorkflowSummary$updatedAt": "

The timestamp of when the workflow was last updated.

", + "SchemaMappingSummary$createdAt": "

The timestamp of when the SchemaMapping was created.

", + "SchemaMappingSummary$updatedAt": "

The timestamp of when the SchemaMapping was last updated.

" + } + }, + "UntagResourceInput": { + "base": null, + "refs": { + } + }, + "UntagResourceOutput": { + "base": null, + "refs": { + } + }, + "UpdateMatchingWorkflowInput": { + "base": null, + "refs": { + } + }, + "UpdateMatchingWorkflowOutput": { + "base": null, + "refs": { + } + }, + "ValidationException": { + "base": "

The input fails to satisfy the constraints specified by AWS Entity Resolution. HTTP Status Code: 400

", + "refs": { + } + }, + "VeniceGlobalArn": { + "base": null, + "refs": { + "ListTagsForResourceInput$resourceArn": "

The ARN of the resource for which you want to view tags.

", + "TagResourceInput$resourceArn": "

The ARN of the resource for which you want to view tags.

", + "UntagResourceInput$resourceArn": "

The ARN of the resource for which you want to untag.

" + } + } + } +} diff --git a/models/apis/entityresolution/2018-05-10/endpoint-rule-set-1.json b/models/apis/entityresolution/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 00000000000..b4f271ba393 --- /dev/null +++ b/models/apis/entityresolution/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entityresolution-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entityresolution-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entityresolution.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entityresolution.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/entityresolution/2018-05-10/endpoint-tests-1.json b/models/apis/entityresolution/2018-05-10/endpoint-tests-1.json new file mode 100644 index 00000000000..a836f3e8d6c --- /dev/null +++ b/models/apis/entityresolution/2018-05-10/endpoint-tests-1.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": false, + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/entityresolution/2018-05-10/examples-1.json b/models/apis/entityresolution/2018-05-10/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/entityresolution/2018-05-10/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/entityresolution/2018-05-10/paginators-1.json b/models/apis/entityresolution/2018-05-10/paginators-1.json new file mode 100644 index 00000000000..5a129d437d5 --- /dev/null +++ b/models/apis/entityresolution/2018-05-10/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListMatchingJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "jobs" + }, + "ListMatchingWorkflows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "workflowSummaries" + }, + "ListSchemaMappings": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "schemaList" + } + } +} diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index 3b90178c9d3..2e813e34abf 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -4269,7 +4269,9 @@ "AmazonRedshiftSource":{"shape":"AmazonRedshiftSource"}, "AmazonRedshiftTarget":{"shape":"AmazonRedshiftTarget"}, "EvaluateDataQualityMultiFrame":{"shape":"EvaluateDataQualityMultiFrame"}, - "Recipe":{"shape":"Recipe"} + "Recipe":{"shape":"Recipe"}, + "SnowflakeSource":{"shape":"SnowflakeSource"}, + "SnowflakeTarget":{"shape":"SnowflakeTarget"} } }, "CodeGenConfigurationNodes":{ @@ -11014,6 +11016,56 @@ "SkewedColumnValueLocationMaps":{"shape":"LocationMap"} } }, + "SnowflakeNodeData":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"GenericLimitedString"}, + "Connection":{"shape":"Option"}, + "Schema":{"shape":"GenericString"}, + "Table":{"shape":"GenericString"}, + "Database":{"shape":"GenericString"}, + "TempDir":{"shape":"EnclosedInStringProperty"}, + "IamRole":{"shape":"Option"}, + "AdditionalOptions":{"shape":"AdditionalOptions"}, + "SampleQuery":{"shape":"GenericString"}, + "PreAction":{"shape":"GenericString"}, + "PostAction":{"shape":"GenericString"}, + "Action":{"shape":"GenericString"}, + "Upsert":{"shape":"BooleanValue"}, + "MergeAction":{"shape":"GenericLimitedString"}, + "MergeWhenMatched":{"shape":"GenericLimitedString"}, + "MergeWhenNotMatched":{"shape":"GenericLimitedString"}, + "MergeClause":{"shape":"GenericString"}, + "StagingTable":{"shape":"GenericString"}, + "SelectedColumns":{"shape":"OptionList"}, + "AutoPushdown":{"shape":"BooleanValue"}, + "TableSchema":{"shape":"OptionList"} + } + }, + "SnowflakeSource":{ + "type":"structure", + "required":[ + "Name", + "Data" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Data":{"shape":"SnowflakeNodeData"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "SnowflakeTarget":{ + "type":"structure", + "required":[ + "Name", + "Data" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Data":{"shape":"SnowflakeNodeData"}, + "Inputs":{"shape":"OneInput"} + } + }, "Sort":{ "type":"string", "enum":[ diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index e0eb9b30f2f..115094fc861 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -251,6 +251,7 @@ "S3HudiCatalogTarget$AdditionalOptions": "

Specifies additional connection options for the connector.

", "S3HudiDirectTarget$AdditionalOptions": "

Specifies additional connection options for the connector.

", "S3HudiSource$AdditionalHudiOptions": "

Specifies additional connection options.

", + "SnowflakeNodeData$AdditionalOptions": "

Specifies additional options passed to the Snowflake connector. If options are specified elsewhere in this node, this will take precedence.

", "SparkConnectorSource$AdditionalOptions": "

Additional connection options for the connector.

", "SparkConnectorTarget$AdditionalOptions": "

Additional connection options for the connector.

" } @@ -306,7 +307,7 @@ "base": "

Specifies an Amazon Redshift node.

", "refs": { "AmazonRedshiftSource$Data": "

Specifies the data of the Amazon Reshift source node.

", - "AmazonRedshiftTarget$Data": "

Specifies the data of the Amazon Reshift target node.

" + "AmazonRedshiftTarget$Data": "

Specifies the data of the Amazon Redshift target node.

" } }, "AmazonRedshiftSource": { @@ -767,6 +768,8 @@ "CreateTriggerRequest$StartOnCreation": "

Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers.

", "GetJobRunRequest$PredecessorsIncluded": "

True if a list of predecessor runs should be returned.

", "S3CsvSource$OptimizePerformance": "

A Boolean value that specifies whether to use the advanced SIMD CSV reader along with Apache Arrow based columnar memory formats. Only available in Glue version 3.0.

", + "SnowflakeNodeData$Upsert": "

Used when Action is append. Specifies the resolution behavior when a row already exists. If true, preexisting rows will be updated. If false, those rows will be inserted.

", + "SnowflakeNodeData$AutoPushdown": "

Specifies whether automatic query pushdown is enabled. If pushdown is enabled, then when a query is run on Spark, if part of the query can be \"pushed down\" to the Snowflake server, it is pushed down. This improves performance of some queries.

", "UpdateDevEndpointRequest$UpdateEtlLibraries": "

True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False if otherwise.

" } }, @@ -2904,6 +2907,7 @@ "S3JsonSource$JsonPath": "

A JsonPath string defining the JSON data.

", "S3ParquetSource$GroupSize": "

The target group size in bytes. The default is computed based on the input data size and the size of your cluster. When there are fewer than 50,000 input files, \"groupFiles\" must be set to \"inPartition\" for this to take effect.

", "S3ParquetSource$GroupFiles": "

Grouping files is turned on by default when the input contains more than 50,000 files. To turn on grouping with fewer than 50,000 files, set this parameter to \"inPartition\". To disable grouping when there are more than 50,000 files, set this parameter to \"none\".

", + "SnowflakeNodeData$TempDir": "

Not currently used.

", "SparkConnectorSource$ConnectionName": "

The name of the connection that is associated with the connector.

", "SparkConnectorSource$ConnectorName": "

The name of a connector that assists with accessing the data store in Glue Studio.

", "SparkConnectorSource$ConnectionType": "

The type of connection, such as marketplace.spark or custom.spark, designating a connection to an Apache Spark data store.

", @@ -3266,7 +3270,11 @@ "DQResultsPublishingOptions$EvaluationContext": "

The context of the evaluation.

", "Datatype$Id": "

The datatype of the value.

", "Datatype$Label": "

A label assigned to the datatype.

", - "LimitedStringList$member": null + "LimitedStringList$member": null, + "SnowflakeNodeData$SourceType": "

Specifies how retrieved data is specified. Valid values: \"table\", \"query\".

", + "SnowflakeNodeData$MergeAction": "

Specifies a merge action. Valid values: simple, custom. If simple, merge behavior is defined by MergeWhenMatched and MergeWhenNotMatched. If custom, defined by MergeClause.

", + "SnowflakeNodeData$MergeWhenMatched": "

Specifies how to resolve records that match preexisting data when merging. Valid values: update, delete.

", + "SnowflakeNodeData$MergeWhenNotMatched": "

Specifies how to process records that do not match preexisting data when merging. Valid values: insert, none.

" } }, "GenericMap": { @@ -3377,6 +3385,15 @@ "MapValue$value": null, "OrchestrationStringList$member": null, "PublicKeysList$member": null, + "SnowflakeNodeData$Schema": "

Specifies a Snowflake database schema for your node to use.

", + "SnowflakeNodeData$Table": "

Specifies a Snowflake table for your node to use.

", + "SnowflakeNodeData$Database": "

Specifies a Snowflake database for your node to use.

", + "SnowflakeNodeData$SampleQuery": "

A SQL string used to retrieve data with the query sourcetype.

", + "SnowflakeNodeData$PreAction": "

A SQL string run before the Snowflake connector performs its standard actions.

", + "SnowflakeNodeData$PostAction": "

A SQL string run after the Snowflake connector performs its standard actions.

", + "SnowflakeNodeData$Action": "

Specifies what action to take when writing to a table with preexisting data. Valid values: append, merge, truncate, drop.

", + "SnowflakeNodeData$MergeClause": "

A SQL statement that specifies a custom merge behavior.

", + "SnowflakeNodeData$StagingTable": "

The name of a staging table used when performing merge or upsert append actions. Data is written to this table, then moved to table by a generated postaction.

", "Statement$Code": "

The execution code of the statement.

", "StatementOutput$ErrorName": "

The name of the error in the output.

", "StatementOutput$ErrorValue": "

The error value of the output.

", @@ -4139,6 +4156,7 @@ "S3HudiSource$OutputSchemas": "

Specifies the data schema for the Hudi source.

", "S3JsonSource$OutputSchemas": "

Specifies the data schema for the S3 JSON source.

", "S3ParquetSource$OutputSchemas": "

Specifies the data schema for the S3 Parquet source.

", + "SnowflakeSource$OutputSchemas": "

Specifies user-defined schemas for your output data.

", "SparkConnectorSource$OutputSchemas": "

Specifies data schema for the custom spark source.

", "SparkConnectorTarget$OutputSchemas": "

Specifies the data schema for the custom spark target.

", "SparkSQL$OutputSchemas": "

Specifies the data schema for the SparkSQL transform.

" @@ -5736,6 +5754,8 @@ "S3ParquetSource$Name": "

The name of the data store.

", "SelectFields$Name": "

The name of the transform node.

", "SelectFromCollection$Name": "

The name of the transform node.

", + "SnowflakeSource$Name": "

The name of the Snowflake data source.

", + "SnowflakeTarget$Name": "

The name of the Snowflake target.

", "SparkConnectorSource$Name": "

The name of the data source.

", "SparkConnectorTarget$Name": "

The name of the data target.

", "SparkSQL$Name": "

The name of the transform node.

", @@ -5953,6 +5973,7 @@ "S3HudiDirectTarget$Inputs": "

The nodes that are inputs to the data target.

", "SelectFields$Inputs": "

The data inputs identified by their node names.

", "SelectFromCollection$Inputs": "

The data inputs identified by their node names.

", + "SnowflakeTarget$Inputs": "

The nodes that are inputs to the data target.

", "SparkConnectorTarget$Inputs": "

The nodes that are inputs to the data target.

", "Spigot$Inputs": "

The data inputs identified by their node names.

", "SplitFields$Inputs": "

The data inputs identified by their node names.

" @@ -5978,14 +5999,18 @@ "AmazonRedshiftNodeData$CatalogDatabase": "

The name of the Glue Data Catalog database when working with a data catalog.

", "AmazonRedshiftNodeData$CatalogTable": "

The Glue Data Catalog table name when working with a data catalog.

", "AmazonRedshiftNodeData$IamRole": "

Optional. The role name use when connection to S3. The IAM role ill default to the role on the job when left blank.

", - "OptionList$member": null + "OptionList$member": null, + "SnowflakeNodeData$Connection": "

Specifies a Glue Data Catalog Connection to a Snowflake endpoint.

", + "SnowflakeNodeData$IamRole": "

Not currently used.

" } }, "OptionList": { "base": null, "refs": { "AmazonRedshiftNodeData$TableSchema": "

The array of schema output for a given node.

", - "AmazonRedshiftNodeData$SelectedColumns": "

The list of column names used to determine a matching record when doing a MERGE or APPEND with upsert.

" + "AmazonRedshiftNodeData$SelectedColumns": "

The list of column names used to determine a matching record when doing a MERGE or APPEND with upsert.

", + "SnowflakeNodeData$SelectedColumns": "

Specifies the columns combined to identify a record when detecting matches for merges and upserts. A list of structures with value, label and description keys. Each structure describes a column.

", + "SnowflakeNodeData$TableSchema": "

Manually defines the target schema for the node. A list of structures with value , label and description keys. Each structure defines a column.

" } }, "OracleSQLCatalogSource": { @@ -7266,6 +7291,25 @@ "StorageDescriptor$SkewedInfo": "

The information about values that appear frequently in a column (skewed values).

" } }, + "SnowflakeNodeData": { + "base": "

Specifies configuration for Snowflake nodes in Glue Studio.

", + "refs": { + "SnowflakeSource$Data": "

Configuration for the Snowflake data source.

", + "SnowflakeTarget$Data": "

Specifies the data of the Snowflake target node.

" + } + }, + "SnowflakeSource": { + "base": "

Specifies a Snowflake data source.

", + "refs": { + "CodeGenConfigurationNode$SnowflakeSource": "

Specifies a Snowflake data source.

" + } + }, + "SnowflakeTarget": { + "base": "

Specifies a Snowflake target.

", + "refs": { + "CodeGenConfigurationNode$SnowflakeTarget": "

Specifies a target that writes to a Snowflake data source.

" + } + }, "Sort": { "base": null, "refs": { diff --git a/models/apis/healthlake/2017-07-01/docs-2.json b/models/apis/healthlake/2017-07-01/docs-2.json index 5d62f91210c..c5235afb508 100644 --- a/models/apis/healthlake/2017-07-01/docs-2.json +++ b/models/apis/healthlake/2017-07-01/docs-2.json @@ -1,20 +1,20 @@ { "version": "2.0", - "service": "

Amazon HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their FHIR-formatted data in a consistent fashion in the cloud.

", + "service": "

AWS HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their FHIR-formatted data in a consistent fashion in the cloud.

", "operations": { - "CreateFHIRDatastore": "

Creates a Data Store that can ingest and export FHIR formatted data.

", - "DeleteFHIRDatastore": "

Deletes a Data Store.

", - "DescribeFHIRDatastore": "

Gets the properties associated with the FHIR Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.

", + "CreateFHIRDatastore": "

Creates a data store that can ingest and export FHIR formatted data.

", + "DeleteFHIRDatastore": "

Deletes a data store.

", + "DescribeFHIRDatastore": "

Gets the properties associated with the FHIR data store, including the data store ID, data store ARN, data store name, data store status, when the data store was created, data store type version, and the data store's endpoint.

", "DescribeFHIRExportJob": "

Displays the properties of a FHIR export job, including the ID, ARN, name, and the status of the job.

", "DescribeFHIRImportJob": "

Displays the properties of a FHIR import job, including the ID, ARN, name, and the status of the job.

", - "ListFHIRDatastores": "

Lists all FHIR Data Stores that are in the user’s account, regardless of Data Store status.

", + "ListFHIRDatastores": "

Lists all FHIR data stores that are in the user’s account, regardless of data store status.

", "ListFHIRExportJobs": "

Lists all FHIR export jobs associated with an account and their statuses.

", "ListFHIRImportJobs": "

Lists all FHIR import jobs associated with an account and their statuses.

", - "ListTagsForResource": "

Returns a list of all existing tags associated with a Data Store.

", + "ListTagsForResource": "

Returns a list of all existing tags associated with a data store.

", "StartFHIRExportJob": "

Begins a FHIR export job.

", "StartFHIRImportJob": "

Begins a FHIR Import job.

", - "TagResource": "

Adds a user specified key and value tag to a Data Store.

", - "UntagResource": "

Removes tags from a Data Store.

" + "TagResource": "

Adds a user specified key and value tag to a data store.

", + "UntagResource": "

Removes tags from a data store.

" }, "shapes": { "AccessDeniedException": { @@ -25,28 +25,28 @@ "AmazonResourceName": { "base": null, "refs": { - "ListTagsForResourceRequest$ResourceARN": "

The Amazon Resource Name(ARN) of the Data Store for which tags are being added.

", - "TagResourceRequest$ResourceARN": "

The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the Data Store which tags are being added to.

", - "UntagResourceRequest$ResourceARN": "

\"The Amazon Resource Name(ARN) of the Data Store for which tags are being removed

" + "ListTagsForResourceRequest$ResourceARN": "

The Amazon Resource Name(ARN) of the data store for which tags are being added.

", + "TagResourceRequest$ResourceARN": "

The Amazon Resource Name(ARN)that gives AWS HealthLake access to the data store which tags are being added to.

", + "UntagResourceRequest$ResourceARN": "

The Amazon Resource Name(ARN) of the data store for which tags are being removed.

" } }, "AuthorizationStrategy": { "base": null, "refs": { - "IdentityProviderConfiguration$AuthorizationStrategy": "

The authorization strategy that you selected when you created the Data Store.

" + "IdentityProviderConfiguration$AuthorizationStrategy": "

The authorization strategy that you selected when you created the data store.

" } }, "Boolean": { "base": null, "refs": { - "IdentityProviderConfiguration$FineGrainedAuthorizationEnabled": "

If you enabled fine-grained authorization when you created the Data Store.

" + "IdentityProviderConfiguration$FineGrainedAuthorizationEnabled": "

If you enabled fine-grained authorization when you created the data store.

" } }, "BoundedLengthString": { "base": null, "refs": { - "CreateFHIRDatastoreResponse$DatastoreEndpoint": "

The AWS endpoint for the created Data Store.

", - "DeleteFHIRDatastoreResponse$DatastoreEndpoint": "

The AWS endpoint for the Data Store the user has requested to be deleted.

" + "CreateFHIRDatastoreResponse$DatastoreEndpoint": "

The AWS endpoint for the created data store.

", + "DeleteFHIRDatastoreResponse$DatastoreEndpoint": "

The AWS endpoint for the data store the user has requested to be deleted.

" } }, "ClientTokenString": { @@ -60,7 +60,7 @@ "CmkType": { "base": null, "refs": { - "KmsEncryptionConfig$CmkType": "

The type of customer-managed-key(CMK) used for encyrption. The two types of supported CMKs are customer owned CMKs and AWS owned CMKs.

" + "KmsEncryptionConfig$CmkType": "

The type of customer-managed-key(CMK) used for encryption. The two types of supported CMKs are customer owned CMKs and AWS owned CMKs.

" } }, "ConfigurationMetadata": { @@ -70,7 +70,7 @@ } }, "ConflictException": { - "base": "

The Data Store is in a transition state and the user requested action can not be performed.

", + "base": "

The data store is in a transition state and the user requested action can not be performed.

", "refs": { } }, @@ -87,65 +87,65 @@ "DatastoreArn": { "base": null, "refs": { - "CreateFHIRDatastoreResponse$DatastoreArn": "

The Data Store ARN is generated during the creation of the Data Store and can be found in the output from the initial Data Store creation call.

", - "DatastoreProperties$DatastoreArn": "

The Amazon Resource Name used in the creation of the Data Store.

", - "DeleteFHIRDatastoreResponse$DatastoreArn": "

The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission.

" + "CreateFHIRDatastoreResponse$DatastoreArn": "

The data store ARN is generated during the creation of the data store and can be found in the output from the initial data store creation call.

", + "DatastoreProperties$DatastoreArn": "

The Amazon Resource Name used in the creation of the data store.

", + "DeleteFHIRDatastoreResponse$DatastoreArn": "

The Amazon Resource Name (ARN) that gives AWS HealthLake access permission.

" } }, "DatastoreFilter": { - "base": "

The filters applied to Data Store query.

", + "base": "

The filters applied to data store query.

", "refs": { - "ListFHIRDatastoresRequest$Filter": "

Lists all filters associated with a FHIR Data Store request.

" + "ListFHIRDatastoresRequest$Filter": "

Lists all filters associated with a FHIR data store request.

" } }, "DatastoreId": { "base": null, "refs": { - "CreateFHIRDatastoreResponse$DatastoreId": "

The AWS-generated Data Store id. This id is in the output from the initial Data Store creation call.

", - "DatastoreProperties$DatastoreId": "

The AWS-generated ID number for the Data Store.

", - "DeleteFHIRDatastoreRequest$DatastoreId": "

The AWS-generated ID for the Data Store to be deleted.

", - "DeleteFHIRDatastoreResponse$DatastoreId": "

The AWS-generated ID for the Data Store to be deleted.

", - "DescribeFHIRDatastoreRequest$DatastoreId": "

The AWS-generated Data Store ID.

", - "DescribeFHIRExportJobRequest$DatastoreId": "

The AWS generated ID for the Data Store from which files are being exported from for an export job.

", - "DescribeFHIRImportJobRequest$DatastoreId": "

The AWS-generated ID of the Data Store.

", - "ExportJobProperties$DatastoreId": "

The AWS generated ID for the Data Store from which files are being exported for an export job.

", + "CreateFHIRDatastoreResponse$DatastoreId": "

The AWS-generated data store id. This id is in the output from the initial data store creation call.

", + "DatastoreProperties$DatastoreId": "

The AWS-generated ID number for the data store.

", + "DeleteFHIRDatastoreRequest$DatastoreId": "

The AWS-generated ID for the data store to be deleted.

", + "DeleteFHIRDatastoreResponse$DatastoreId": "

The AWS-generated ID for the data store to be deleted.

", + "DescribeFHIRDatastoreRequest$DatastoreId": "

The AWS-generated data store ID.

", + "DescribeFHIRExportJobRequest$DatastoreId": "

The AWS generated ID for the data store from which files are being exported from for an export job.

", + "DescribeFHIRImportJobRequest$DatastoreId": "

The AWS-generated ID of the data store.

", + "ExportJobProperties$DatastoreId": "

The AWS generated ID for the data store from which files are being exported for an export job.

", "ImportJobProperties$DatastoreId": "

The datastore id used when the Import job was created.

", - "ListFHIRExportJobsRequest$DatastoreId": "

This parameter limits the response to the export job with the specified Data Store ID.

", - "ListFHIRImportJobsRequest$DatastoreId": "

This parameter limits the response to the import job with the specified Data Store ID.

", - "StartFHIRExportJobRequest$DatastoreId": "

The AWS generated ID for the Data Store from which files are being exported for an export job.

", - "StartFHIRExportJobResponse$DatastoreId": "

The AWS generated ID for the Data Store from which files are being exported for an export job.

", - "StartFHIRImportJobRequest$DatastoreId": "

The AWS-generated Data Store ID.

", - "StartFHIRImportJobResponse$DatastoreId": "

The AWS-generated Data Store ID.

" + "ListFHIRExportJobsRequest$DatastoreId": "

This parameter limits the response to the export job with the specified data store ID.

", + "ListFHIRImportJobsRequest$DatastoreId": "

This parameter limits the response to the import job with the specified data store ID.

", + "StartFHIRExportJobRequest$DatastoreId": "

The AWS generated ID for the data store from which files are being exported for an export job.

", + "StartFHIRExportJobResponse$DatastoreId": "

The AWS generated ID for the data store from which files are being exported for an export job.

", + "StartFHIRImportJobRequest$DatastoreId": "

The AWS-generated data store ID.

", + "StartFHIRImportJobResponse$DatastoreId": "

The AWS-generated data store ID.

" } }, "DatastoreName": { "base": null, "refs": { - "CreateFHIRDatastoreRequest$DatastoreName": "

The user generated name for the Data Store.

", - "DatastoreFilter$DatastoreName": "

Allows the user to filter Data Store results by name.

", - "DatastoreProperties$DatastoreName": "

The user-generated name for the Data Store.

" + "CreateFHIRDatastoreRequest$DatastoreName": "

The user generated name for the data store.

", + "DatastoreFilter$DatastoreName": "

Allows the user to filter data store results by name.

", + "DatastoreProperties$DatastoreName": "

The user-generated name for the data store.

" } }, "DatastoreProperties": { - "base": "

Displays the properties of the Data Store, including the ID, ARN, name, and the status of the Data Store.

", + "base": "

Displays the properties of the data store, including the ID, ARN, name, and the status of the data store.

", "refs": { "DatastorePropertiesList$member": null, - "DescribeFHIRDatastoreResponse$DatastoreProperties": "

All properties associated with a Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.

" + "DescribeFHIRDatastoreResponse$DatastoreProperties": "

All properties associated with a data store, including the data store ID, data store ARN, data store name, data store status, when the data store was created, data store type version, and the data store's endpoint.

" } }, "DatastorePropertiesList": { "base": null, "refs": { - "ListFHIRDatastoresResponse$DatastorePropertiesList": "

All properties associated with the listed Data Stores.

" + "ListFHIRDatastoresResponse$DatastorePropertiesList": "

All properties associated with the listed data stores.

" } }, "DatastoreStatus": { "base": null, "refs": { - "CreateFHIRDatastoreResponse$DatastoreStatus": "

The status of the FHIR Data Store. Possible statuses are ‘CREATING’, ‘ACTIVE’, ‘DELETING’, ‘DELETED’.

", - "DatastoreFilter$DatastoreStatus": "

Allows the user to filter Data Store results by status.

", - "DatastoreProperties$DatastoreStatus": "

The status of the Data Store. Possible statuses are 'CREATING', 'ACTIVE', 'DELETING', or 'DELETED'.

", - "DeleteFHIRDatastoreResponse$DatastoreStatus": "

The status of the Data Store that the user has requested to be deleted.

" + "CreateFHIRDatastoreResponse$DatastoreStatus": "

The status of the FHIR data store.

", + "DatastoreFilter$DatastoreStatus": "

Allows the user to filter data store results by status.

", + "DatastoreProperties$DatastoreStatus": "

The status of the data store.

", + "DeleteFHIRDatastoreResponse$DatastoreStatus": "

The status of the data store that the user has requested to be deleted.

" } }, "DeleteFHIRDatastoreRequest": { @@ -191,7 +191,7 @@ "EncryptionKeyID": { "base": null, "refs": { - "KmsEncryptionConfig$KmsKeyId": "

The KMS encryption key id/alias used to encrypt the Data Store contents at rest.

", + "KmsEncryptionConfig$KmsKeyId": "

The KMS encryption key id/alias used to encrypt the data store contents at rest.

", "S3Configuration$KmsKeyId": "

The KMS key ID used to access the S3 bucket.

" } }, @@ -211,7 +211,7 @@ "FHIRVersion": { "base": null, "refs": { - "CreateFHIRDatastoreRequest$DatastoreTypeVersion": "

The FHIR version of the Data Store. The only supported version is R4.

", + "CreateFHIRDatastoreRequest$DatastoreTypeVersion": "

The FHIR version of the data store. The only supported version is R4.

", "DatastoreProperties$DatastoreTypeVersion": "

The FHIR version. Only R4 version data is supported.

" } }, @@ -219,20 +219,20 @@ "base": null, "refs": { "ExportJobProperties$DataAccessRoleArn": "

The Amazon Resource Name used during the initiation of the job.

", - "ImportJobProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your input data.

", + "ImportJobProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives AWS HealthLake access to your input data.

", "StartFHIRExportJobRequest$DataAccessRoleArn": "

The Amazon Resource Name used during the initiation of the job.

", - "StartFHIRImportJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission.

" + "StartFHIRImportJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives AWS HealthLake access permission.

" } }, "IdentityProviderConfiguration": { - "base": "

The identity provider configuration that you gave when the Data Store was created.

", + "base": "

The identity provider configuration that you gave when the data store was created.

", "refs": { - "CreateFHIRDatastoreRequest$IdentityProviderConfiguration": "

The configuration of the identity provider that you want to use for your Data Store.

", - "DatastoreProperties$IdentityProviderConfiguration": "

The identity provider that you selected when you created the Data Store.

" + "CreateFHIRDatastoreRequest$IdentityProviderConfiguration": "

The configuration of the identity provider that you want to use for your data store.

", + "DatastoreProperties$IdentityProviderConfiguration": "

The identity provider that you selected when you created the data store.

" } }, "ImportJobProperties": { - "base": "

Displays the properties of the import job, including the ID, Arn, Name, and the status of the Data Store.

", + "base": "

Displays the properties of the import job, including the ID, Arn, Name, and the status of the data store.

", "refs": { "DescribeFHIRImportJobResponse$ImportJobProperties": "

The properties of the Import job request, including the ID, ARN, name, and the status of the job.

", "ImportJobPropertiesList$member": null @@ -290,7 +290,7 @@ } }, "KmsEncryptionConfig": { - "base": "

The customer-managed-key(CMK) used when creating a Data Store. If a customer owned key is not specified, an AWS owned key will be used for encryption.

", + "base": "

The customer-managed-key(CMK) used when creating a data store. If a customer owned key is not specified, an AWS owned key will be used for encryption.

", "refs": { "SseConfiguration$KmsEncryptionConfig": "

The KMS encryption configuration used to provide details for data encryption.

" } @@ -344,7 +344,7 @@ "MaxResultsInteger": { "base": null, "refs": { - "ListFHIRDatastoresRequest$MaxResults": "

The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest call.

", + "ListFHIRDatastoresRequest$MaxResults": "

The maximum number of data stores returned in a single page of a ListFHIRDatastoresRequest call.

", "ListFHIRExportJobsRequest$MaxResults": "

This parameter limits the number of results returned for a ListFHIRExportJobs to a maximum quantity specified by the user.

", "ListFHIRImportJobsRequest$MaxResults": "

This parameter limits the number of results returned for a ListFHIRImportJobs to a maximum quantity specified by the user.

" } @@ -359,7 +359,7 @@ "NextToken": { "base": null, "refs": { - "ListFHIRDatastoresRequest$NextToken": "

Fetches the next page of Data Stores when results are paginated.

", + "ListFHIRDatastoresRequest$NextToken": "

Fetches the next page of data stores when results are paginated.

", "ListFHIRDatastoresResponse$NextToken": "

Pagination token that can be used to retrieve the next page of results.

", "ListFHIRExportJobsRequest$NextToken": "

A pagination token used to identify the next page of results to return for a ListFHIRExportJobs query.

", "ListFHIRExportJobsResponse$NextToken": "

A pagination token used to identify the next page of results to return for a ListFHIRExportJobs query.

", @@ -377,10 +377,10 @@ } }, "PreloadDataConfig": { - "base": "

The input properties for the preloaded Data Store. Only data preloaded from Synthea is supported.

", + "base": "

The input properties for the preloaded data store. Only data preloaded from Synthea is supported.

", "refs": { - "CreateFHIRDatastoreRequest$PreloadDataConfig": "

Optional parameter to preload data upon creation of the Data Store. Currently, the only supported preloaded data is synthetic data generated from Synthea.

", - "DatastoreProperties$PreloadDataConfig": "

The preloaded data configuration for the Data Store. Only data preloaded from Synthea is supported.

" + "CreateFHIRDatastoreRequest$PreloadDataConfig": "

Optional parameter to preload data upon creation of the data store. Currently, the only supported preloaded data is synthetic data generated from Synthea.

", + "DatastoreProperties$PreloadDataConfig": "

The preloaded data configuration for the data store. Only data preloaded from Synthea is supported.

" } }, "PreloadDataType": { @@ -390,7 +390,7 @@ } }, "ResourceNotFoundException": { - "base": "

The requested Data Store was not found.

", + "base": "

The requested data store was not found.

", "refs": { } }, @@ -403,14 +403,14 @@ "S3Uri": { "base": null, "refs": { - "InputDataConfig$S3Uri": "

The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.

", - "S3Configuration$S3Uri": "

The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.

" + "InputDataConfig$S3Uri": "

The S3Uri is the user specified S3 location of the FHIR data to be imported into AWS HealthLake.

", + "S3Configuration$S3Uri": "

The S3Uri is the user specified S3 location of the FHIR data to be imported into AWS HealthLake.

" } }, "SseConfiguration": { "base": "

The server-side encryption key configuration for a customer provided encryption key.

", "refs": { - "CreateFHIRDatastoreRequest$SseConfiguration": "

The server-side encryption key configuration for a customer provided encryption key specified for creating a Data Store.

", + "CreateFHIRDatastoreRequest$SseConfiguration": "

The server-side encryption key configuration for a customer provided encryption key specified for creating a data store.

", "DatastoreProperties$SseConfiguration": "

The server-side encryption key configuration for a customer provided encryption key (CMK).

" } }, @@ -439,7 +439,7 @@ "refs": { "AccessDeniedException$Message": null, "ConflictException$Message": null, - "DatastoreProperties$DatastoreEndpoint": "

The AWS endpoint for the Data Store. Each Data Store will have it's own endpoint with Data Store ID in the endpoint URL.

", + "DatastoreProperties$DatastoreEndpoint": "

The AWS endpoint for the data store. Each data store will have it's own endpoint with data store ID in the endpoint URL.

", "InternalServerException$Message": null, "ResourceNotFoundException$Message": null, "ThrottlingException$Message": null, @@ -462,15 +462,15 @@ "TagKeyList": { "base": null, "refs": { - "UntagResourceRequest$TagKeys": "

The keys for the tags to be removed from the Healthlake Data Store.

" + "UntagResourceRequest$TagKeys": "

The keys for the tags to be removed from the HealthLake data store.

" } }, "TagList": { "base": null, "refs": { - "CreateFHIRDatastoreRequest$Tags": "

Resource tags that are applied to a Data Store when it is created.

", - "ListTagsForResourceResponse$Tags": "

Returns a list of tags associated with a Data Store.

", - "TagResourceRequest$Tags": "

The user specified key and value pair tags being added to a Data Store.

" + "CreateFHIRDatastoreRequest$Tags": "

Resource tags that are applied to a data store when it is created.

", + "ListTagsForResourceResponse$Tags": "

Returns a list of tags associated with a data store.

", + "TagResourceRequest$Tags": "

The user specified key and value pair tags being added to a data store.

" } }, "TagResourceRequest": { @@ -497,9 +497,9 @@ "Timestamp": { "base": null, "refs": { - "DatastoreFilter$CreatedBefore": "

A filter that allows the user to set cutoff dates for records. All Data Stores created before the specified date will be included in the results.

", - "DatastoreFilter$CreatedAfter": "

A filter that allows the user to set cutoff dates for records. All Data Stores created after the specified date will be included in the results.

", - "DatastoreProperties$CreatedAt": "

The time that a Data Store was created.

", + "DatastoreFilter$CreatedBefore": "

A filter that allows the user to set cutoff dates for records. All data stores created before the specified date will be included in the results.

", + "DatastoreFilter$CreatedAfter": "

A filter that allows the user to set cutoff dates for records. All data stores created after the specified date will be included in the results.

", + "DatastoreProperties$CreatedAt": "

The time that a data store was created.

", "ExportJobProperties$SubmitTime": "

The time an export job was initiated.

", "ExportJobProperties$EndTime": "

The time an export job completed.

", "ImportJobProperties$SubmitTime": "

The time that the Import job was submitted for processing.

", diff --git a/models/apis/managedblockchain-query/2023-05-04/api-2.json b/models/apis/managedblockchain-query/2023-05-04/api-2.json new file mode 100644 index 00000000000..98ff96d9853 --- /dev/null +++ b/models/apis/managedblockchain-query/2023-05-04/api-2.json @@ -0,0 +1,692 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-05-04", + "endpointPrefix":"managedblockchain-query", + "jsonVersion":"1.1", + "protocol":"rest-json", + "ripServiceName":"chainquery", + "serviceFullName":"Amazon Managed Blockchain Query", + "serviceId":"ManagedBlockchain Query", + "signatureVersion":"v4", + "signingName":"managedblockchain-query", + "uid":"managedblockchain-query-2023-05-04" + }, + "operations":{ + "BatchGetTokenBalance":{ + "name":"BatchGetTokenBalance", + "http":{ + "method":"POST", + "requestUri":"/batch-get-token-balance", + "responseCode":200 + }, + "input":{"shape":"BatchGetTokenBalanceInput"}, + "output":{"shape":"BatchGetTokenBalanceOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ] + }, + "GetTokenBalance":{ + "name":"GetTokenBalance", + "http":{ + "method":"POST", + "requestUri":"/get-token-balance", + "responseCode":200 + }, + "input":{"shape":"GetTokenBalanceInput"}, + "output":{"shape":"GetTokenBalanceOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ] + }, + "GetTransaction":{ + "name":"GetTransaction", + "http":{ + "method":"POST", + "requestUri":"/get-transaction", + "responseCode":200 + }, + "input":{"shape":"GetTransactionInput"}, + "output":{"shape":"GetTransactionOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ] + }, + "ListTokenBalances":{ + "name":"ListTokenBalances", + "http":{ + "method":"POST", + "requestUri":"/list-token-balances", + "responseCode":200 + }, + "input":{"shape":"ListTokenBalancesInput"}, + "output":{"shape":"ListTokenBalancesOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ] + }, + "ListTransactionEvents":{ + "name":"ListTransactionEvents", + "http":{ + "method":"POST", + "requestUri":"/list-transaction-events", + "responseCode":200 + }, + "input":{"shape":"ListTransactionEventsInput"}, + "output":{"shape":"ListTransactionEventsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ] + }, + "ListTransactions":{ + "name":"ListTransactions", + "http":{ + "method":"POST", + "requestUri":"/list-transactions", + "responseCode":200 + }, + "input":{"shape":"ListTransactionsInput"}, + "output":{"shape":"ListTransactionsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "BatchGetTokenBalanceErrorItem":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "errorType" + ], + "members":{ + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "errorCode":{"shape":"String"}, + "errorMessage":{"shape":"String"}, + "errorType":{"shape":"ErrorType"} + } + }, + "BatchGetTokenBalanceErrors":{ + "type":"list", + "member":{"shape":"BatchGetTokenBalanceErrorItem"}, + "max":10, + "min":0 + }, + "BatchGetTokenBalanceInput":{ + "type":"structure", + "members":{ + "getTokenBalanceInputs":{"shape":"GetTokenBalanceInputList"} + } + }, + "BatchGetTokenBalanceInputItem":{ + "type":"structure", + "required":[ + "tokenIdentifier", + "ownerIdentifier" + ], + "members":{ + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"} + } + }, + "BatchGetTokenBalanceOutput":{ + "type":"structure", + "required":[ + "tokenBalances", + "errors" + ], + "members":{ + "tokenBalances":{"shape":"BatchGetTokenBalanceOutputList"}, + "errors":{"shape":"BatchGetTokenBalanceErrors"} + } + }, + "BatchGetTokenBalanceOutputItem":{ + "type":"structure", + "required":[ + "balance", + "atBlockchainInstant" + ], + "members":{ + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "balance":{"shape":"String"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "lastUpdatedTime":{"shape":"BlockchainInstant"} + } + }, + "BatchGetTokenBalanceOutputList":{ + "type":"list", + "member":{"shape":"BatchGetTokenBalanceOutputItem"}, + "max":10, + "min":0 + }, + "BlockHash":{ + "type":"string", + "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" + }, + "BlockchainInstant":{ + "type":"structure", + "members":{ + "time":{"shape":"Timestamp"} + } + }, + "ChainAddress":{ + "type":"string", + "pattern":"[-A-Za-z0-9]{13,74}" + }, + "ErrorType":{ + "type":"string", + "enum":[ + "VALIDATION_EXCEPTION", + "RESOURCE_NOT_FOUND_EXCEPTION" + ] + }, + "ExceptionMessage":{ + "type":"string", + "min":1 + }, + "GetTokenBalanceInput":{ + "type":"structure", + "required":[ + "tokenIdentifier", + "ownerIdentifier" + ], + "members":{ + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"} + } + }, + "GetTokenBalanceInputList":{ + "type":"list", + "member":{"shape":"BatchGetTokenBalanceInputItem"}, + "max":10, + "min":1 + }, + "GetTokenBalanceOutput":{ + "type":"structure", + "required":[ + "balance", + "atBlockchainInstant" + ], + "members":{ + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "balance":{"shape":"String"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "lastUpdatedTime":{"shape":"BlockchainInstant"} + } + }, + "GetTransactionInput":{ + "type":"structure", + "required":[ + "transactionHash", + "network" + ], + "members":{ + "transactionHash":{"shape":"QueryTransactionHash"}, + "network":{"shape":"QueryNetwork"} + } + }, + "GetTransactionOutput":{ + "type":"structure", + "required":["transaction"], + "members":{ + "transaction":{"shape":"Transaction"} + } + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ExceptionMessage"}, + "retryAfterSeconds":{ + "shape":"Integer", + "location":"header", + "locationName":"Retry-After" + } + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListTokenBalancesInput":{ + "type":"structure", + "required":["tokenFilter"], + "members":{ + "ownerFilter":{"shape":"OwnerFilter"}, + "tokenFilter":{"shape":"TokenFilter"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"ListTokenBalancesInputMaxResultsInteger"} + } + }, + "ListTokenBalancesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":250, + "min":1 + }, + "ListTokenBalancesOutput":{ + "type":"structure", + "required":["tokenBalances"], + "members":{ + "tokenBalances":{"shape":"TokenBalanceList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListTransactionEventsInput":{ + "type":"structure", + "required":[ + "transactionHash", + "network" + ], + "members":{ + "transactionHash":{"shape":"QueryTransactionHash"}, + "network":{"shape":"QueryNetwork"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"ListTransactionEventsInputMaxResultsInteger"} + } + }, + "ListTransactionEventsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":250, + "min":1 + }, + "ListTransactionEventsOutput":{ + "type":"structure", + "required":["events"], + "members":{ + "events":{"shape":"TransactionEventList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListTransactionsInput":{ + "type":"structure", + "required":[ + "address", + "network" + ], + "members":{ + "address":{"shape":"ChainAddress"}, + "network":{"shape":"QueryNetwork"}, + "fromBlockchainInstant":{"shape":"BlockchainInstant"}, + "toBlockchainInstant":{"shape":"BlockchainInstant"}, + "sort":{"shape":"ListTransactionsSort"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"ListTransactionsInputMaxResultsInteger"} + } + }, + "ListTransactionsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":250, + "min":1 + }, + "ListTransactionsOutput":{ + "type":"structure", + "required":["transactions"], + "members":{ + "transactions":{"shape":"TransactionOutputList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListTransactionsSort":{ + "type":"structure", + "members":{ + "sortBy":{"shape":"ListTransactionsSortBy"}, + "sortOrder":{"shape":"SortOrder"} + } + }, + "ListTransactionsSortBy":{ + "type":"string", + "enum":["TRANSACTION_TIMESTAMP"] + }, + "Long":{ + "type":"long", + "box":true + }, + "NextToken":{ + "type":"string", + "max":131070, + "min":0 + }, + "OwnerFilter":{ + "type":"structure", + "required":["address"], + "members":{ + "address":{"shape":"ChainAddress"} + } + }, + "OwnerIdentifier":{ + "type":"structure", + "required":["address"], + "members":{ + "address":{"shape":"ChainAddress"} + } + }, + "QueryNetwork":{ + "type":"string", + "enum":[ + "ETHEREUM_MAINNET", + "BITCOIN_MAINNET" + ] + }, + "QueryTokenId":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{1,66}" + }, + "QueryTransactionEventType":{ + "type":"string", + "enum":[ + "ERC20_TRANSFER", + "ERC20_MINT", + "ERC20_BURN", + "ERC20_DEPOSIT", + "ERC20_WITHDRAWAL", + "ERC721_TRANSFER", + "ERC1155_TRANSFER", + "BITCOIN_VIN", + "BITCOIN_VOUT", + "INTERNAL_ETH_TRANSFER", + "ETH_TRANSFER" + ] + }, + "QueryTransactionHash":{ + "type":"string", + "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" + }, + "QueryTransactionStatus":{ + "type":"string", + "enum":[ + "FINAL", + "FAILED" + ] + }, + "QuotaCode":{"type":"string"}, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"ExceptionMessage"}, + "resourceId":{"shape":"ResourceId"}, + "resourceType":{"shape":"ResourceType"} + }, + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":["collection"] + }, + "ServiceCode":{"type":"string"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{"shape":"ExceptionMessage"}, + "resourceId":{"shape":"ResourceId"}, + "resourceType":{"shape":"ResourceType"}, + "serviceCode":{"shape":"ServiceCode"}, + "quotaCode":{"shape":"QuotaCode"} + }, + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":[ + "message", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{"shape":"ExceptionMessage"}, + "serviceCode":{"shape":"ServiceCode"}, + "quotaCode":{"shape":"QuotaCode"}, + "retryAfterSeconds":{ + "shape":"Integer", + "location":"header", + "locationName":"Retry-After" + } + }, + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "TokenBalance":{ + "type":"structure", + "required":[ + "balance", + "atBlockchainInstant" + ], + "members":{ + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "balance":{"shape":"String"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "lastUpdatedTime":{"shape":"BlockchainInstant"} + } + }, + "TokenBalanceList":{ + "type":"list", + "member":{"shape":"TokenBalance"}, + "max":250, + "min":0 + }, + "TokenFilter":{ + "type":"structure", + "required":["network"], + "members":{ + "network":{"shape":"QueryNetwork"}, + "contractAddress":{"shape":"ChainAddress"}, + "tokenId":{"shape":"QueryTokenId"} + } + }, + "TokenIdentifier":{ + "type":"structure", + "required":["network"], + "members":{ + "network":{"shape":"QueryNetwork"}, + "contractAddress":{"shape":"ChainAddress"}, + "tokenId":{"shape":"QueryTokenId"} + } + }, + "Transaction":{ + "type":"structure", + "required":[ + "network", + "transactionHash", + "transactionTimestamp", + "transactionIndex", + "numberOfTransactions", + "status", + "to" + ], + "members":{ + "network":{"shape":"QueryNetwork"}, + "blockHash":{"shape":"BlockHash"}, + "transactionHash":{"shape":"QueryTransactionHash"}, + "blockNumber":{"shape":"String"}, + "transactionTimestamp":{"shape":"Timestamp"}, + "transactionIndex":{"shape":"Long"}, + "numberOfTransactions":{"shape":"Long"}, + "status":{"shape":"QueryTransactionStatus"}, + "to":{"shape":"ChainAddress"}, + "from":{"shape":"ChainAddress"}, + "contractAddress":{"shape":"ChainAddress"}, + "gasUsed":{"shape":"String"}, + "cumulativeGasUsed":{"shape":"String"}, + "effectiveGasPrice":{"shape":"String"}, + "signatureV":{"shape":"Integer"}, + "signatureR":{"shape":"String"}, + "signatureS":{"shape":"String"}, + "transactionFee":{"shape":"String"}, + "transactionId":{"shape":"String"} + } + }, + "TransactionEvent":{ + "type":"structure", + "required":[ + "network", + "transactionHash", + "eventType" + ], + "members":{ + "network":{"shape":"QueryNetwork"}, + "transactionHash":{"shape":"QueryTransactionHash"}, + "eventType":{"shape":"QueryTransactionEventType"}, + "from":{"shape":"ChainAddress"}, + "to":{"shape":"ChainAddress"}, + "value":{"shape":"String"}, + "contractAddress":{"shape":"ChainAddress"}, + "tokenId":{"shape":"QueryTokenId"}, + "transactionId":{"shape":"String"}, + "voutIndex":{"shape":"Integer"} + } + }, + "TransactionEventList":{ + "type":"list", + "member":{"shape":"TransactionEvent"}, + "max":250, + "min":0 + }, + "TransactionOutputItem":{ + "type":"structure", + "required":[ + "transactionHash", + "network", + "transactionTimestamp" + ], + "members":{ + "transactionHash":{"shape":"QueryTransactionHash"}, + "network":{"shape":"QueryNetwork"}, + "transactionTimestamp":{"shape":"Timestamp"} + } + }, + "TransactionOutputList":{ + "type":"list", + "member":{"shape":"TransactionOutputItem"}, + "max":250, + "min":0 + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{"shape":"ExceptionMessage"}, + "reason":{"shape":"ValidationExceptionReason"}, + "fieldList":{"shape":"ValidationExceptionFieldList"} + }, + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{"shape":"String"}, + "message":{"shape":"String"} + } + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + } + } +} diff --git a/models/apis/managedblockchain-query/2023-05-04/docs-2.json b/models/apis/managedblockchain-query/2023-05-04/docs-2.json new file mode 100644 index 00000000000..8438860f542 --- /dev/null +++ b/models/apis/managedblockchain-query/2023-05-04/docs-2.json @@ -0,0 +1,453 @@ +{ + "version": "2.0", + "service": "

Amazon Managed Blockchain (AMB) Query provides you with convenient access to multi-blockchain network data, which makes it easier for you to extract contextual data related to blockchain activity. You can use AMB Query to read data from public blockchain networks, such as Bitcoin Mainnet and Ethereum Mainnet. You can also get information such as the current and historical balances of addresses, or you can get a list of blockchain transactions for a given time period. Additionally, you can get details of a given transaction, such as transaction events, which you can further analyze or use in business logic for your applications.

", + "operations": { + "BatchGetTokenBalance": "

Gets the token balance for a batch of tokens by using the GetTokenBalance action for every token in the request.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

", + "GetTokenBalance": "

Gets the balance of a specific token, including native tokens, for a given address (wallet or contract) on the blockchain.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

", + "GetTransaction": "

Get the details of a transaction.

", + "ListTokenBalances": "

This action returns the following for a given a blockchain network:

You must always specify the network property of the tokenFilter when using this operation.

", + "ListTransactionEvents": "

An array of TransactionEvent objects. Each object contains details about the transaction event.

", + "ListTransactions": "

Lists all of the transactions on a given wallet address or to a specific contract.

" + }, + "shapes": { + "AccessDeniedException": { + "base": "

The Amazon Web Services account doesn’t have access to this resource.

", + "refs": { + } + }, + "BatchGetTokenBalanceErrorItem": { + "base": "

Error generated from a failed BatchGetTokenBalance request.

", + "refs": { + "BatchGetTokenBalanceErrors$member": null + } + }, + "BatchGetTokenBalanceErrors": { + "base": null, + "refs": { + "BatchGetTokenBalanceOutput$errors": "

An array of BatchGetTokenBalanceErrorItem objects returned from the request.

" + } + }, + "BatchGetTokenBalanceInput": { + "base": null, + "refs": { + } + }, + "BatchGetTokenBalanceInputItem": { + "base": "

The container for the input for getting a token balance.

", + "refs": { + "GetTokenBalanceInputList$member": null + } + }, + "BatchGetTokenBalanceOutput": { + "base": null, + "refs": { + } + }, + "BatchGetTokenBalanceOutputItem": { + "base": "

The container for the properties of a token balance output.

", + "refs": { + "BatchGetTokenBalanceOutputList$member": null + } + }, + "BatchGetTokenBalanceOutputList": { + "base": null, + "refs": { + "BatchGetTokenBalanceOutput$tokenBalances": "

An array of BatchGetTokenBalanceOutputItem objects returned by the response.

" + } + }, + "BlockHash": { + "base": null, + "refs": { + "Transaction$blockHash": "

The block hash is a unique identifier for a block. It is a fixed-size string that is calculated by using the information in the block. The block hash is used to verify the integrity of the data in the block.

" + } + }, + "BlockchainInstant": { + "base": "

The container for time.

", + "refs": { + "BatchGetTokenBalanceErrorItem$atBlockchainInstant": null, + "BatchGetTokenBalanceInputItem$atBlockchainInstant": null, + "BatchGetTokenBalanceOutputItem$atBlockchainInstant": null, + "BatchGetTokenBalanceOutputItem$lastUpdatedTime": null, + "GetTokenBalanceInput$atBlockchainInstant": "

The time for when the TokenBalance is requested or the current time if a time is not provided in the request.

This time will only be recorded up to the second.

", + "GetTokenBalanceOutput$atBlockchainInstant": null, + "GetTokenBalanceOutput$lastUpdatedTime": null, + "ListTransactionsInput$fromBlockchainInstant": null, + "ListTransactionsInput$toBlockchainInstant": null, + "TokenBalance$atBlockchainInstant": "

The time for when the TokenBalance is requested or the current time if a time is not provided in the request.

This time will only be recorded up to the second.

", + "TokenBalance$lastUpdatedTime": "

The timestamp of the last transaction at which the balance for the token in the wallet was updated.

" + } + }, + "ChainAddress": { + "base": null, + "refs": { + "ListTransactionsInput$address": "

The address (either a contract or wallet), whose transactions are being requested.

", + "OwnerFilter$address": "

The contract or wallet address.

", + "OwnerIdentifier$address": "

The contract or wallet address for the owner.

", + "TokenFilter$contractAddress": "

This is the address of the contract.

", + "TokenIdentifier$contractAddress": "

This is the token's contract address.

", + "Transaction$to": "

The identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "Transaction$from": "

The initiator of the transaction. It is either in the form a public key or a contract address.

", + "Transaction$contractAddress": "

The blockchain address for the contract.

", + "TransactionEvent$from": "

The wallet address initiating the transaction. It can either be a public key or a contract.

", + "TransactionEvent$to": "

The wallet address receiving the transaction. It can either be a public key or a contract.

", + "TransactionEvent$contractAddress": "

The blockchain address. for the contract

" + } + }, + "ErrorType": { + "base": null, + "refs": { + "BatchGetTokenBalanceErrorItem$errorType": "

The type of error.

" + } + }, + "ExceptionMessage": { + "base": null, + "refs": { + "AccessDeniedException$message": "

The container for the exception message.

", + "InternalServerException$message": "

The container for the exception message.

", + "ResourceNotFoundException$message": "

The container for the exception message.

", + "ServiceQuotaExceededException$message": "

The container for the exception message.

", + "ThrottlingException$message": "

The container for the exception message.

", + "ValidationException$message": "

The container for the exception message.

" + } + }, + "GetTokenBalanceInput": { + "base": null, + "refs": { + } + }, + "GetTokenBalanceInputList": { + "base": null, + "refs": { + "BatchGetTokenBalanceInput$getTokenBalanceInputs": "

An array of GetTokenBalanceInput objects whose balance is being requested.

" + } + }, + "GetTokenBalanceOutput": { + "base": null, + "refs": { + } + }, + "GetTransactionInput": { + "base": null, + "refs": { + } + }, + "GetTransactionOutput": { + "base": null, + "refs": { + } + }, + "Integer": { + "base": null, + "refs": { + "InternalServerException$retryAfterSeconds": "

The container of the retryAfterSeconds value.

", + "ThrottlingException$retryAfterSeconds": "

The container of the retryAfterSeconds value.

", + "Transaction$signatureV": "

The signature of the transaction. The Z coordinate of a point V.

", + "TransactionEvent$voutIndex": "

The position of the vout in the transaction output list.

" + } + }, + "InternalServerException": { + "base": "

The request processing has failed because of an internal error in the service.

", + "refs": { + } + }, + "ListTokenBalancesInput": { + "base": null, + "refs": { + } + }, + "ListTokenBalancesInputMaxResultsInteger": { + "base": null, + "refs": { + "ListTokenBalancesInput$maxResults": "

The maximum number of token balances to return.

" + } + }, + "ListTokenBalancesOutput": { + "base": null, + "refs": { + } + }, + "ListTransactionEventsInput": { + "base": null, + "refs": { + } + }, + "ListTransactionEventsInputMaxResultsInteger": { + "base": null, + "refs": { + "ListTransactionEventsInput$maxResults": "

The maximum number of transaction events to list.

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + } + }, + "ListTransactionEventsOutput": { + "base": null, + "refs": { + } + }, + "ListTransactionsInput": { + "base": null, + "refs": { + } + }, + "ListTransactionsInputMaxResultsInteger": { + "base": null, + "refs": { + "ListTransactionsInput$maxResults": "

The maximum number of transactions to list.

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + } + }, + "ListTransactionsOutput": { + "base": null, + "refs": { + } + }, + "ListTransactionsSort": { + "base": "

The container for determining how the list transaction result will be sorted.

", + "refs": { + "ListTransactionsInput$sort": "

Sorts items in an ascending order if the first page starts at fromTime. Sorts items in a descending order if the first page starts at toTime.

" + } + }, + "ListTransactionsSortBy": { + "base": null, + "refs": { + "ListTransactionsSort$sortBy": "

Defaults to the value TRANSACTION_TIMESTAMP.

" + } + }, + "Long": { + "base": null, + "refs": { + "Transaction$transactionIndex": "

The index of the transaction within a blockchain.

", + "Transaction$numberOfTransactions": "

The number of transactions in the block.

" + } + }, + "NextToken": { + "base": null, + "refs": { + "ListTokenBalancesInput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", + "ListTokenBalancesOutput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", + "ListTransactionEventsInput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", + "ListTransactionEventsOutput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", + "ListTransactionsInput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", + "ListTransactionsOutput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

" + } + }, + "OwnerFilter": { + "base": "

The container for the owner information to filter by.

", + "refs": { + "ListTokenBalancesInput$ownerFilter": "

The contract or wallet address on the blockchain network by which to filter the request. You must specify the address property of the ownerFilter when listing balances of tokens owned by the address.

" + } + }, + "OwnerIdentifier": { + "base": "

The container for the identifier of the owner.

", + "refs": { + "BatchGetTokenBalanceErrorItem$ownerIdentifier": null, + "BatchGetTokenBalanceInputItem$ownerIdentifier": null, + "BatchGetTokenBalanceOutputItem$ownerIdentifier": null, + "GetTokenBalanceInput$ownerIdentifier": "

The container for the identifier for the owner.

", + "GetTokenBalanceOutput$ownerIdentifier": null, + "TokenBalance$ownerIdentifier": "

The container for the identifier of the owner.

" + } + }, + "QueryNetwork": { + "base": null, + "refs": { + "GetTransactionInput$network": "

The blockchain network where the transaction occurred.

", + "ListTransactionEventsInput$network": "

The blockchain network where the transaction events occurred.

", + "ListTransactionsInput$network": "

The blockchain network where the transactions occurred.

", + "TokenFilter$network": "

The blockchain network of the token.

", + "TokenIdentifier$network": "

The blockchain network of the token.

", + "Transaction$network": "

The blockchain network where the transaction occured.

", + "TransactionEvent$network": "

The blockchain network where the transaction occurred.

", + "TransactionOutputItem$network": "

The blockchain network where the transaction occurred.

" + } + }, + "QueryTokenId": { + "base": null, + "refs": { + "TokenFilter$tokenId": "

The unique identifier of the token.

", + "TokenIdentifier$tokenId": "

The unique identifier of the token.

", + "TransactionEvent$tokenId": "

The unique identifier for the token involved in the transaction.

" + } + }, + "QueryTransactionEventType": { + "base": null, + "refs": { + "TransactionEvent$eventType": "

The type of transaction event.

" + } + }, + "QueryTransactionHash": { + "base": null, + "refs": { + "GetTransactionInput$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "ListTransactionEventsInput$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "Transaction$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "TransactionEvent$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "TransactionOutputItem$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + } + }, + "QueryTransactionStatus": { + "base": null, + "refs": { + "Transaction$status": "

The status of the transaction.

" + } + }, + "QuotaCode": { + "base": null, + "refs": { + "ServiceQuotaExceededException$quotaCode": "

The container for the quotaCode.

", + "ThrottlingException$quotaCode": "

The container for the quotaCode.

" + } + }, + "ResourceId": { + "base": null, + "refs": { + "ResourceNotFoundException$resourceId": "

The resourceId of the resource that caused the exception.

", + "ServiceQuotaExceededException$resourceId": "

The resourceId of the resource that caused the exception.

" + } + }, + "ResourceNotFoundException": { + "base": "

The resource was not found.

", + "refs": { + } + }, + "ResourceType": { + "base": null, + "refs": { + "ResourceNotFoundException$resourceType": "

The resourceType of the resource that caused the exception.

", + "ServiceQuotaExceededException$resourceType": "

The resourceType of the resource that caused the exception.

" + } + }, + "ServiceCode": { + "base": null, + "refs": { + "ServiceQuotaExceededException$serviceCode": "

The container for the serviceCode.

", + "ThrottlingException$serviceCode": "

The container for the serviceCode.

" + } + }, + "ServiceQuotaExceededException": { + "base": "

The service quota has been exceeded for this resource.

", + "refs": { + } + }, + "SortOrder": { + "base": null, + "refs": { + "ListTransactionsSort$sortOrder": "

The container for the sort order for ListTransactions. The SortOrder field only accepts the values ASCENDING and DESCENDING. Not providing SortOrder will default to ASCENDING.

" + } + }, + "String": { + "base": null, + "refs": { + "BatchGetTokenBalanceErrorItem$errorCode": "

The error code associated with the error.

", + "BatchGetTokenBalanceErrorItem$errorMessage": "

The message associated with the error.

", + "BatchGetTokenBalanceOutputItem$balance": "

The container for the token balance.

", + "GetTokenBalanceOutput$balance": "

The container for the token balance.

", + "TokenBalance$balance": "

The container of the token balance.

", + "Transaction$blockNumber": "

The block number in which the transaction is recorded.

", + "Transaction$gasUsed": "

The amount of gas used for the transaction.

", + "Transaction$cumulativeGasUsed": "

The amount of gas used up to the specified point in the block.

", + "Transaction$effectiveGasPrice": "

The effective gas price.

", + "Transaction$signatureR": "

The signature of the transaction. The X coordinate of a point R.

", + "Transaction$signatureS": "

The signature of the transaction. The Y coordinate of a point S.

", + "Transaction$transactionFee": "

The transaction fee.

", + "Transaction$transactionId": "

The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "TransactionEvent$value": "

The value that was transacted.

", + "TransactionEvent$transactionId": "

The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "ValidationExceptionField$name": "

The name of the field that triggered the ValidationException.

", + "ValidationExceptionField$message": "

The ValidationException message.

" + } + }, + "ThrottlingException": { + "base": "

The request or operation couldn't be performed because a service is throttling requests. The most common source of throttling errors is when you create resources that exceed your service limit for this resource type. Request a limit increase or delete unused resources, if possible.

", + "refs": { + } + }, + "Timestamp": { + "base": null, + "refs": { + "BlockchainInstant$time": "

The container of the Timestamp of the blockchain instant.

This timestamp will only be recorded up to the second.

", + "Transaction$transactionTimestamp": "

The Timestamp of the transaction.

", + "TransactionOutputItem$transactionTimestamp": "

The time when the transaction occurred.

" + } + }, + "TokenBalance": { + "base": "

The balance of the token.

", + "refs": { + "TokenBalanceList$member": null + } + }, + "TokenBalanceList": { + "base": null, + "refs": { + "ListTokenBalancesOutput$tokenBalances": "

An array of TokenBalance objects. Each object contains details about the token balance.

" + } + }, + "TokenFilter": { + "base": "

The container of the token filter like the contract address on a given blockchain network or a unique token identifier on a given blockchain network.

You must always specify the network property of this container when using this operation.

", + "refs": { + "ListTokenBalancesInput$tokenFilter": "

The contract address or a token identifier on the blockchain network by which to filter the request. You must specify the contractAddress property of this container when listing tokens minted by a contract.

You must always specify the network property of this container when using this operation.

" + } + }, + "TokenIdentifier": { + "base": "

The container for the identifier for the token including the unique token ID and its blockchain network.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

", + "refs": { + "BatchGetTokenBalanceErrorItem$tokenIdentifier": null, + "BatchGetTokenBalanceInputItem$tokenIdentifier": null, + "BatchGetTokenBalanceOutputItem$tokenIdentifier": null, + "GetTokenBalanceInput$tokenIdentifier": "

The container for the identifier for the token, including the unique token ID and its blockchain network.

", + "GetTokenBalanceOutput$tokenIdentifier": null, + "TokenBalance$tokenIdentifier": "

The identifier for the token, including the unique token ID and its blockchain network.

" + } + }, + "Transaction": { + "base": "

There are two possible types of transactions used for this data type:

", + "refs": { + "GetTransactionOutput$transaction": "

Contains the details of the transaction.

" + } + }, + "TransactionEvent": { + "base": "

The container for the properties of a transaction event.

", + "refs": { + "TransactionEventList$member": null + } + }, + "TransactionEventList": { + "base": null, + "refs": { + "ListTransactionEventsOutput$events": "

An array of TransactionEvent objects. Each object contains details about the transaction events.

" + } + }, + "TransactionOutputItem": { + "base": "

The container of the transaction output.

", + "refs": { + "TransactionOutputList$member": null + } + }, + "TransactionOutputList": { + "base": null, + "refs": { + "ListTransactionsOutput$transactions": "

The array of transactions returned by the request.

" + } + }, + "ValidationException": { + "base": "

The resource passed is invalid.

", + "refs": { + } + }, + "ValidationExceptionField": { + "base": "

The resource passed is invalid.

", + "refs": { + "ValidationExceptionFieldList$member": null + } + }, + "ValidationExceptionFieldList": { + "base": null, + "refs": { + "ValidationException$fieldList": "

The container for the fieldList of the exception.

" + } + }, + "ValidationExceptionReason": { + "base": null, + "refs": { + "ValidationException$reason": "

The container for the reason for the exception

" + } + } + } +} diff --git a/models/apis/managedblockchain-query/2023-05-04/endpoint-rule-set-1.json b/models/apis/managedblockchain-query/2023-05-04/endpoint-rule-set-1.json new file mode 100644 index 00000000000..1f94e43d857 --- /dev/null +++ b/models/apis/managedblockchain-query/2023-05-04/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://managedblockchain-query-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://managedblockchain-query-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://managedblockchain-query.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://managedblockchain-query.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/managedblockchain-query/2023-05-04/endpoint-tests-1.json b/models/apis/managedblockchain-query/2023-05-04/endpoint-tests-1.json new file mode 100644 index 00000000000..7287b49a660 --- /dev/null +++ b/models/apis/managedblockchain-query/2023-05-04/endpoint-tests-1.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/managedblockchain-query/2023-05-04/examples-1.json b/models/apis/managedblockchain-query/2023-05-04/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/managedblockchain-query/2023-05-04/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/managedblockchain-query/2023-05-04/paginators-1.json b/models/apis/managedblockchain-query/2023-05-04/paginators-1.json new file mode 100644 index 00000000000..c8f2452b78f --- /dev/null +++ b/models/apis/managedblockchain-query/2023-05-04/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListTokenBalances": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "tokenBalances" + }, + "ListTransactionEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "events" + }, + "ListTransactions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "transactions" + } + } +} diff --git a/models/apis/managedblockchain-query/2023-05-04/smoke.json b/models/apis/managedblockchain-query/2023-05-04/smoke.json new file mode 100644 index 00000000000..a9756813e4a --- /dev/null +++ b/models/apis/managedblockchain-query/2023-05-04/smoke.json @@ -0,0 +1,6 @@ +{ + "version": 1, + "defaultRegion": "us-west-2", + "testCases": [ + ] +} diff --git a/models/apis/managedblockchain-query/2023-05-04/waiters-2.json b/models/apis/managedblockchain-query/2023-05-04/waiters-2.json new file mode 100644 index 00000000000..13f60ee66be --- /dev/null +++ b/models/apis/managedblockchain-query/2023-05-04/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/models/apis/mediaconvert/2017-08-29/api-2.json b/models/apis/mediaconvert/2017-08-29/api-2.json index 8f3be5c8a24..97725f2474a 100644 --- a/models/apis/mediaconvert/2017-08-29/api-2.json +++ b/models/apis/mediaconvert/2017-08-29/api-2.json @@ -11830,4 +11830,4 @@ "timestampFormat": "unixTimestamp" } } -} \ No newline at end of file +} diff --git a/models/apis/mediaconvert/2017-08-29/docs-2.json b/models/apis/mediaconvert/2017-08-29/docs-2.json index 977035f7fea..03a3ceca4fa 100644 --- a/models/apis/mediaconvert/2017-08-29/docs-2.json +++ b/models/apis/mediaconvert/2017-08-29/docs-2.json @@ -63,9 +63,9 @@ } }, "AacSettings": { - "base": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode.", + "base": "Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode.", "refs": { - "AudioCodecSettings$AacSettings": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode." + "AudioCodecSettings$AacSettings": "Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode." } }, "AacSpecification": { @@ -93,21 +93,21 @@ } }, "Ac3DynamicRangeCompressionLine": { - "base": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "base": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "refs": { - "Ac3Settings$DynamicRangeCompressionLine": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "Ac3Settings$DynamicRangeCompressionLine": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." } }, "Ac3DynamicRangeCompressionProfile": { - "base": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile (DynamicRangeCompressionProfile). The mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None (NONE) to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) to set the profile to Dolby's film standard profile for all operating modes.", + "base": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile. The mode-specific settings are Dynamic range compression profile, line mode and Dynamic range compression profile, RF mode. Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None to leave out DRC signaling. Keep the default Film standard to set the profile to Dolby's film standard profile for all operating modes.", "refs": { - "Ac3Settings$DynamicRangeCompressionProfile": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile (DynamicRangeCompressionProfile). The mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None (NONE) to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) to set the profile to Dolby's film standard profile for all operating modes." + "Ac3Settings$DynamicRangeCompressionProfile": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile. The mode-specific settings are Dynamic range compression profile, line mode and Dynamic range compression profile, RF mode. Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None to leave out DRC signaling. Keep the default Film standard to set the profile to Dolby's film standard profile for all operating modes." } }, "Ac3DynamicRangeCompressionRf": { - "base": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "base": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "refs": { - "Ac3Settings$DynamicRangeCompressionRf": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "Ac3Settings$DynamicRangeCompressionRf": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." } }, "Ac3LfeFilter": { @@ -123,9 +123,9 @@ } }, "Ac3Settings": { - "base": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AC3.", + "base": "Required when you set Codec to the value AC3.", "refs": { - "AudioCodecSettings$Ac3Settings": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AC3." + "AudioCodecSettings$Ac3Settings": "Required when you set Codec to the value AC3." } }, "AccelerationMode": { @@ -177,15 +177,15 @@ } }, "AfdSignaling": { - "base": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", + "base": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", "refs": { - "VideoDescription$AfdSignaling": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data." + "VideoDescription$AfdSignaling": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data." } }, "AiffSettings": { - "base": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AIFF.", + "base": "Required when you set Codec to the value AIFF.", "refs": { - "AudioCodecSettings$AiffSettings": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AIFF." + "AudioCodecSettings$AiffSettings": "Required when you set Codec to the value AIFF." } }, "AllowedRenditionSize": { @@ -201,9 +201,9 @@ } }, "AncillaryConvert608To708": { - "base": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "base": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "refs": { - "AncillarySourceSettings$Convert608To708": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "AncillarySourceSettings$Convert608To708": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." } }, "AncillarySourceSettings": { @@ -247,9 +247,9 @@ } }, "AudioCodec": { - "base": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output", + "base": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output", "refs": { - "AudioCodecSettings$Codec": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output" + "AudioCodecSettings$Codec": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output" } }, "AudioCodecSettings": { @@ -277,9 +277,9 @@ } }, "AudioLanguageCodeControl": { - "base": "Specify which source for language code takes precedence for this audio track. When you choose Follow input (FOLLOW_INPUT), the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code (languageCode or customLanguageCode). When you choose Use configured (USE_CONFIGURED), the service uses the language code that you specify.", + "base": "Specify which source for language code takes precedence for this audio track. When you choose Follow input, the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code. When you choose Use configured, the service uses the language code that you specify.", "refs": { - "AudioDescription$LanguageCodeControl": "Specify which source for language code takes precedence for this audio track. When you choose Follow input (FOLLOW_INPUT), the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code (languageCode or customLanguageCode). When you choose Use configured (USE_CONFIGURED), the service uses the language code that you specify." + "AudioDescription$LanguageCodeControl": "Specify which source for language code takes precedence for this audio track. When you choose Follow input, the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code. When you choose Use configured, the service uses the language code that you specify." } }, "AudioNormalizationAlgorithm": { @@ -313,13 +313,13 @@ } }, "AudioSelector": { - "base": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input.", + "base": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input.", "refs": { "__mapOfAudioSelector$member": null } }, "AudioSelectorGroup": { - "base": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group.", + "base": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group.", "refs": { "__mapOfAudioSelectorGroup$member": null } @@ -355,21 +355,21 @@ } }, "Av1AdaptiveQuantization": { - "base": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization).", + "base": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization.", "refs": { - "Av1Settings$AdaptiveQuantization": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization)." + "Av1Settings$AdaptiveQuantization": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization." } }, "Av1BitDepth": { - "base": "Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit (BIT_10).", + "base": "Specify the Bit depth. You can choose 8-bit or 10-bit.", "refs": { - "Av1Settings$BitDepth": "Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit (BIT_10)." + "Av1Settings$BitDepth": "Specify the Bit depth. You can choose 8-bit or 10-bit." } }, "Av1FramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "Av1Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "Av1Settings$FramerateControl": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "Av1FramerateConversionAlgorithm": { @@ -379,9 +379,9 @@ } }, "Av1QvbrSettings": { - "base": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode).", + "base": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Use these settings only when you set QVBR for Rate control mode.", "refs": { - "Av1Settings$QvbrSettings": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "Av1Settings$QvbrSettings": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." } }, "Av1RateControlMode": { @@ -397,9 +397,9 @@ } }, "Av1SpatialAdaptiveQuantization": { - "base": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "base": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "refs": { - "Av1Settings$SpatialAdaptiveQuantization": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "Av1Settings$SpatialAdaptiveQuantization": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." } }, "AvailBlanking": { @@ -416,9 +416,9 @@ } }, "AvcIntraFramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "AvcIntraSettings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "AvcIntraSettings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "AvcIntraFramerateConversionAlgorithm": { @@ -428,15 +428,15 @@ } }, "AvcIntraInterlaceMode": { - "base": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "base": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "refs": { - "AvcIntraSettings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "AvcIntraSettings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." } }, "AvcIntraScanTypeConversionMode": { - "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "refs": { - "AvcIntraSettings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "AvcIntraSettings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." } }, "AvcIntraSettings": { @@ -446,27 +446,27 @@ } }, "AvcIntraSlowPal": { - "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "refs": { - "AvcIntraSettings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "AvcIntraSettings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." } }, "AvcIntraTelecine": { - "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "refs": { - "AvcIntraSettings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "AvcIntraSettings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." } }, "AvcIntraUhdQualityTuningLevel": { - "base": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass (MULTI_PASS), your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS).", + "base": "Optional. Use Quality tuning level to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass, your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass, your encoding time is faster. The default behavior is Single-pass.", "refs": { - "AvcIntraUhdSettings$QualityTuningLevel": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass (MULTI_PASS), your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS)." + "AvcIntraUhdSettings$QualityTuningLevel": "Optional. Use Quality tuning level to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass, your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass, your encoding time is faster. The default behavior is Single-pass." } }, "AvcIntraUhdSettings": { - "base": "Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object isn't allowed.", + "base": "Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra class to a different value, this object isn't allowed.", "refs": { - "AvcIntraSettings$AvcIntraUhdSettings": "Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object isn't allowed." + "AvcIntraSettings$AvcIntraUhdSettings": "Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra class to a different value, this object isn't allowed." } }, "BadRequestException": { @@ -501,15 +501,15 @@ } }, "BurnInSubtitleStylePassthrough": { - "base": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", + "base": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", "refs": { - "BurninDestinationSettings$StylePassthrough": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." + "BurninDestinationSettings$StylePassthrough": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." } }, "BurninDestinationSettings": { - "base": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to BURN_IN.", + "base": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html.", "refs": { - "CaptionDestinationSettings$BurninDestinationSettings": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to BURN_IN." + "CaptionDestinationSettings$BurninDestinationSettings": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html." } }, "BurninSubtitleAlignment": { @@ -519,45 +519,45 @@ } }, "BurninSubtitleApplyFontColor": { - "base": "Ignore this setting unless Style passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", + "base": "Ignore this setting unless Style passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", "refs": { - "BurninDestinationSettings$ApplyFontColor": "Ignore this setting unless Style passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." + "BurninDestinationSettings$ApplyFontColor": "Ignore this setting unless Style passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." } }, "BurninSubtitleBackgroundColor": { - "base": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present.", + "base": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present.", "refs": { - "BurninDestinationSettings$BackgroundColor": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present." + "BurninDestinationSettings$BackgroundColor": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present." } }, "BurninSubtitleFallbackFont": { - "base": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", + "base": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", "refs": { - "BurninDestinationSettings$FallbackFont": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." + "BurninDestinationSettings$FallbackFont": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." } }, "BurninSubtitleFontColor": { - "base": "Specify the color of the burned-in captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present.", + "base": "Specify the color of the burned-in captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present.", "refs": { - "BurninDestinationSettings$FontColor": "Specify the color of the burned-in captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present." + "BurninDestinationSettings$FontColor": "Specify the color of the burned-in captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present." } }, "BurninSubtitleOutlineColor": { - "base": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present.", + "base": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present.", "refs": { - "BurninDestinationSettings$OutlineColor": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present." + "BurninDestinationSettings$OutlineColor": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present." } }, "BurninSubtitleShadowColor": { - "base": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present.", + "base": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present.", "refs": { - "BurninDestinationSettings$ShadowColor": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present." + "BurninDestinationSettings$ShadowColor": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present." } }, "BurninSubtitleTeletextSpacing": { - "base": "Specify whether the text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions.", + "base": "Specify whether the text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions.", "refs": { - "BurninDestinationSettings$TeletextSpacing": "Specify whether the text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions." + "BurninDestinationSettings$TeletextSpacing": "Specify whether the text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions." } }, "CancelJobRequest": { @@ -583,16 +583,16 @@ } }, "CaptionDestinationSettings": { - "base": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.", + "base": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.", "refs": { - "CaptionDescription$DestinationSettings": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.", - "CaptionDescriptionPreset$DestinationSettings": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." + "CaptionDescription$DestinationSettings": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.", + "CaptionDescriptionPreset$DestinationSettings": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." } }, "CaptionDestinationType": { - "base": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20).", + "base": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20.", "refs": { - "CaptionDestinationSettings$DestinationType": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20)." + "CaptionDestinationSettings$DestinationType": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20." } }, "CaptionSelector": { @@ -608,9 +608,9 @@ } }, "CaptionSourceFramerate": { - "base": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. When you work directly in your JSON job specification, use the settings framerateNumerator and framerateDenominator. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps.", + "base": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps.", "refs": { - "FileSourceSettings$Framerate": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. When you work directly in your JSON job specification, use the settings framerateNumerator and framerateDenominator. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + "FileSourceSettings$Framerate": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." } }, "CaptionSourceSettings": { @@ -620,15 +620,15 @@ } }, "CaptionSourceType": { - "base": "Use Source (SourceType) to identify the format of your input captions. The service cannot auto-detect caption format.", + "base": "Use Source to identify the format of your input captions. The service cannot auto-detect caption format.", "refs": { - "CaptionSourceSettings$SourceType": "Use Source (SourceType) to identify the format of your input captions. The service cannot auto-detect caption format." + "CaptionSourceSettings$SourceType": "Use Source to identify the format of your input captions. The service cannot auto-detect caption format." } }, "ChannelMapping": { - "base": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both.", + "base": "Channel mapping contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both.", "refs": { - "RemixSettings$ChannelMapping": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both." + "RemixSettings$ChannelMapping": "Channel mapping contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both." } }, "ClipLimits": { @@ -644,9 +644,9 @@ } }, "CmafClientCache": { - "base": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", + "base": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header.", "refs": { - "CmafGroupSettings$ClientCache": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." + "CmafGroupSettings$ClientCache": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header." } }, "CmafCodecSpecification": { @@ -662,21 +662,21 @@ } }, "CmafEncryptionType": { - "base": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR).", + "base": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample or AES_CTR.", "refs": { - "CmafEncryptionSettings$EncryptionMethod": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR)." + "CmafEncryptionSettings$EncryptionMethod": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample or AES_CTR." } }, "CmafGroupSettings": { - "base": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to CMAF_GROUP_SETTINGS.", + "base": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.", "refs": { - "OutputGroupSettings$CmafGroupSettings": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to CMAF_GROUP_SETTINGS." + "OutputGroupSettings$CmafGroupSettings": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." } }, "CmafImageBasedTrickPlay": { - "base": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "base": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest, MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "refs": { - "CmafGroupSettings$ImageBasedTrickPlay": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "CmafGroupSettings$ImageBasedTrickPlay": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest, MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" } }, "CmafImageBasedTrickPlaySettings": { @@ -722,15 +722,15 @@ } }, "CmafMpdProfile": { - "base": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE).", + "base": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file.", "refs": { - "CmafGroupSettings$MpdProfile": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE)." + "CmafGroupSettings$MpdProfile": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file." } }, "CmafPtsOffsetHandlingForBFrames": { - "base": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", + "base": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", "refs": { - "CmafGroupSettings$PtsOffsetHandlingForBFrames": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." + "CmafGroupSettings$PtsOffsetHandlingForBFrames": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." } }, "CmafSegmentControl": { @@ -740,9 +740,9 @@ } }, "CmafSegmentLengthControl": { - "base": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "base": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "refs": { - "CmafGroupSettings$SegmentLengthControl": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "CmafGroupSettings$SegmentLengthControl": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." } }, "CmafStreamInfResolution": { @@ -776,34 +776,34 @@ } }, "CmafWriteSegmentTimelineInRepresentation": { - "base": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", + "base": "When you enable Precise segment duration in DASH manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", "refs": { - "CmafGroupSettings$WriteSegmentTimelineInRepresentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." + "CmafGroupSettings$WriteSegmentTimelineInRepresentation": "When you enable Precise segment duration in DASH manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." } }, "CmfcAudioDuration": { - "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "refs": { - "CmfcSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", - "Mp4Settings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "CmfcSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "Mp4Settings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." } }, "CmfcAudioTrackType": { - "base": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", + "base": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", "refs": { - "CmfcSettings$AudioTrackType": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." + "CmfcSettings$AudioTrackType": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." } }, "CmfcDescriptiveVideoServiceFlag": { - "base": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", + "base": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", "refs": { - "CmfcSettings$DescriptiveVideoServiceFlag": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." + "CmfcSettings$DescriptiveVideoServiceFlag": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." } }, "CmfcIFrameOnlyManifest": { - "base": "Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE).", + "base": "Choose Include to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude.", "refs": { - "CmfcSettings$IFrameOnlyManifest": "Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE)." + "CmfcSettings$IFrameOnlyManifest": "Choose Include to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude." } }, "CmfcKlvMetadata": { @@ -813,21 +813,21 @@ } }, "CmfcManifestMetadataSignaling": { - "base": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough.", + "base": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough.", "refs": { - "CmfcSettings$ManifestMetadataSignaling": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough." + "CmfcSettings$ManifestMetadataSignaling": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough." } }, "CmfcScte35Esam": { - "base": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", + "base": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML.", "refs": { - "CmfcSettings$Scte35Esam": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "CmfcSettings$Scte35Esam": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." } }, "CmfcScte35Source": { - "base": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output.", + "base": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output.", "refs": { - "CmfcSettings$Scte35Source": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output." + "CmfcSettings$Scte35Source": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output." } }, "CmfcSettings": { @@ -837,15 +837,15 @@ } }, "CmfcTimedMetadata": { - "base": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", + "base": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank.", "refs": { - "CmfcSettings$TimedMetadata": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank." + "CmfcSettings$TimedMetadata": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank." } }, "CmfcTimedMetadataBoxVersion": { - "base": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough.", + "base": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough.", "refs": { - "CmfcSettings$TimedMetadataBoxVersion": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough." + "CmfcSettings$TimedMetadataBoxVersion": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough." } }, "ColorCorrector": { @@ -855,9 +855,9 @@ } }, "ColorMetadata": { - "base": "Choose Insert (INSERT) for this setting to include color metadata in this output. Choose Ignore (IGNORE) to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default.", + "base": "Choose Insert for this setting to include color metadata in this output. Choose Ignore to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default.", "refs": { - "VideoDescription$ColorMetadata": "Choose Insert (INSERT) for this setting to include color metadata in this output. Choose Ignore (IGNORE) to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default." + "VideoDescription$ColorMetadata": "Choose Insert for this setting to include color metadata in this output. Choose Ignore to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default." } }, "ColorSpace": { @@ -873,9 +873,9 @@ } }, "ColorSpaceUsage": { - "base": "There are two sources for color metadata, the input file and the job input settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). The Color space usage setting determines which takes precedence. Choose Force (FORCE) to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings.", + "base": "There are two sources for color metadata, the input file and the job input settings Color space and HDR master display information settings. The Color space usage setting determines which takes precedence. Choose Force to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings.", "refs": { - "VideoSelector$ColorSpaceUsage": "There are two sources for color metadata, the input file and the job input settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). The Color space usage setting determines which takes precedence. Choose Force (FORCE) to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings." + "VideoSelector$ColorSpaceUsage": "There are two sources for color metadata, the input file and the job input settings Color space and HDR master display information settings. The Color space usage setting determines which takes precedence. Choose Force to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings." } }, "Commitment": { @@ -962,15 +962,15 @@ } }, "DashIsoGroupAudioChannelConfigSchemeIdUri": { - "base": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.", + "base": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration, to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.", "refs": { - "DashIsoGroupSettings$AudioChannelConfigSchemeIdUri": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011." + "DashIsoGroupSettings$AudioChannelConfigSchemeIdUri": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration, to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011." } }, "DashIsoGroupSettings": { - "base": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to DASH_ISO_GROUP_SETTINGS.", + "base": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.", "refs": { - "OutputGroupSettings$DashIsoGroupSettings": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to DASH_ISO_GROUP_SETTINGS." + "OutputGroupSettings$DashIsoGroupSettings": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." } }, "DashIsoHbbtvCompliance": { @@ -980,9 +980,9 @@ } }, "DashIsoImageBasedTrickPlay": { - "base": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "base": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "refs": { - "DashIsoGroupSettings$ImageBasedTrickPlay": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "DashIsoGroupSettings$ImageBasedTrickPlay": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" } }, "DashIsoImageBasedTrickPlaySettings": { @@ -1004,21 +1004,21 @@ } }, "DashIsoMpdProfile": { - "base": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE).", + "base": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file.", "refs": { - "DashIsoGroupSettings$MpdProfile": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE)." + "DashIsoGroupSettings$MpdProfile": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file." } }, "DashIsoPlaybackDeviceCompatibility": { - "base": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted.", + "base": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted.", "refs": { - "DashIsoEncryptionSettings$PlaybackDeviceCompatibility": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted." + "DashIsoEncryptionSettings$PlaybackDeviceCompatibility": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted." } }, "DashIsoPtsOffsetHandlingForBFrames": { - "base": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", + "base": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", "refs": { - "DashIsoGroupSettings$PtsOffsetHandlingForBFrames": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." + "DashIsoGroupSettings$PtsOffsetHandlingForBFrames": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." } }, "DashIsoSegmentControl": { @@ -1028,9 +1028,9 @@ } }, "DashIsoSegmentLengthControl": { - "base": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "base": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "refs": { - "DashIsoGroupSettings$SegmentLengthControl": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "DashIsoGroupSettings$SegmentLengthControl": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." } }, "DashIsoVideoCompositionOffsets": { @@ -1040,7 +1040,7 @@ } }, "DashIsoWriteSegmentTimelineInRepresentation": { - "base": "When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", + "base": "When you enable Precise segment duration in manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", "refs": { "DashIsoGroupSettings$WriteSegmentTimelineInRepresentation": "If you get an HTTP error in the 400 range when you play back your DASH output, enable this setting and run your transcoding job again. When you enable this setting, the service writes precise segment durations in the DASH manifest. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When you don't enable this setting, the service writes approximate segment durations in your DASH manifest." } @@ -1077,9 +1077,9 @@ } }, "DeinterlacerMode": { - "base": "Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. Default is Deinterlace. - Deinterlace converts interlaced to progressive. - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. - Adaptive auto-detects and converts to progressive.", + "base": "Use Deinterlacer to choose how the service will do deinterlacing. Default is Deinterlace.\n- Deinterlace converts interlaced to progressive.\n- Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p.\n- Adaptive auto-detects and converts to progressive.", "refs": { - "Deinterlacer$Mode": "Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. Default is Deinterlace. - Deinterlace converts interlaced to progressive. - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. - Adaptive auto-detects and converts to progressive." + "Deinterlacer$Mode": "Use Deinterlacer to choose how the service will do deinterlacing. Default is Deinterlace.\n- Deinterlace converts interlaced to progressive.\n- Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p.\n- Adaptive auto-detects and converts to progressive." } }, "DeleteJobTemplateRequest": { @@ -1189,27 +1189,27 @@ } }, "DropFrameTimecode": { - "base": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion (TimecodeInsertion) is enabled.", + "base": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled.", "refs": { - "VideoDescription$DropFrameTimecode": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion (TimecodeInsertion) is enabled." + "VideoDescription$DropFrameTimecode": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled." } }, "DvbNitSettings": { - "base": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings.", + "base": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output.", "refs": { - "M2tsSettings$DvbNitSettings": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "M2tsSettings$DvbNitSettings": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output." } }, "DvbSdtSettings": { - "base": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings.", + "base": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output.", "refs": { - "M2tsSettings$DvbSdtSettings": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "M2tsSettings$DvbSdtSettings": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output." } }, "DvbSubDestinationSettings": { - "base": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to DVB_SUB.", + "base": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html.", "refs": { - "CaptionDestinationSettings$DvbSubDestinationSettings": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to DVB_SUB." + "CaptionDestinationSettings$DvbSubDestinationSettings": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html." } }, "DvbSubSourceSettings": { @@ -1219,9 +1219,9 @@ } }, "DvbSubSubtitleFallbackFont": { - "base": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", + "base": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", "refs": { - "DvbSubDestinationSettings$FallbackFont": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." + "DvbSubDestinationSettings$FallbackFont": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." } }, "DvbSubtitleAlignment": { @@ -1231,45 +1231,45 @@ } }, "DvbSubtitleApplyFontColor": { - "base": "Ignore this setting unless Style Passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", + "base": "Ignore this setting unless Style Passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", "refs": { - "DvbSubDestinationSettings$ApplyFontColor": "Ignore this setting unless Style Passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." + "DvbSubDestinationSettings$ApplyFontColor": "Ignore this setting unless Style Passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." } }, "DvbSubtitleBackgroundColor": { - "base": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present.", + "base": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present.", "refs": { - "DvbSubDestinationSettings$BackgroundColor": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present." + "DvbSubDestinationSettings$BackgroundColor": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present." } }, "DvbSubtitleFontColor": { - "base": "Specify the color of the captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "base": "Specify the color of the captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "refs": { - "DvbSubDestinationSettings$FontColor": "Specify the color of the captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "DvbSubDestinationSettings$FontColor": "Specify the color of the captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." } }, "DvbSubtitleOutlineColor": { - "base": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "base": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "refs": { - "DvbSubDestinationSettings$OutlineColor": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "DvbSubDestinationSettings$OutlineColor": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." } }, "DvbSubtitleShadowColor": { - "base": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "base": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "refs": { - "DvbSubDestinationSettings$ShadowColor": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "DvbSubDestinationSettings$ShadowColor": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." } }, "DvbSubtitleStylePassthrough": { - "base": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", + "base": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", "refs": { - "DvbSubDestinationSettings$StylePassthrough": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." + "DvbSubDestinationSettings$StylePassthrough": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." } }, "DvbSubtitleTeletextSpacing": { - "base": "Specify whether the Text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical.", + "base": "Specify whether the Text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical.", "refs": { - "DvbSubDestinationSettings$TeletextSpacing": "Specify whether the Text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical." + "DvbSubDestinationSettings$TeletextSpacing": "Specify whether the Text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical." } }, "DvbSubtitlingType": { @@ -1279,9 +1279,9 @@ } }, "DvbTdtSettings": { - "base": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings.", + "base": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output.", "refs": { - "M2tsSettings$DvbTdtSettings": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "M2tsSettings$DvbTdtSettings": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output." } }, "DvbddsHandling": { @@ -1309,27 +1309,27 @@ } }, "Eac3AtmosDownmixControl": { - "base": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom (SPECIFIED) to provide downmix values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for Downmix control (DownmixControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings.", + "base": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom to provide downmix values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround, Left total/Right total surround, Left total/Right total center, Left only/Right only center, and Stereo downmix. When you keep Custom for Downmix control and you don't specify values for the related settings, MediaConvert uses default values for those settings.", "refs": { - "Eac3AtmosSettings$DownmixControl": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom (SPECIFIED) to provide downmix values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for Downmix control (DownmixControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings." + "Eac3AtmosSettings$DownmixControl": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom to provide downmix values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround, Left total/Right total surround, Left total/Right total center, Left only/Right only center, and Stereo downmix. When you keep Custom for Downmix control and you don't specify values for the related settings, MediaConvert uses default values for those settings." } }, "Eac3AtmosDynamicRangeCompressionLine": { - "base": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "base": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression line. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "refs": { - "Eac3AtmosSettings$DynamicRangeCompressionLine": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "Eac3AtmosSettings$DynamicRangeCompressionLine": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression line. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." } }, "Eac3AtmosDynamicRangeCompressionRf": { - "base": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "base": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression RF. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "refs": { - "Eac3AtmosSettings$DynamicRangeCompressionRf": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "Eac3AtmosSettings$DynamicRangeCompressionRf": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression RF. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." } }, "Eac3AtmosDynamicRangeControl": { - "base": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom (SPECIFIED), to provide dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings.", + "base": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom, to provide dynamic range control values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line and Dynamic range compression RF. When you keep the value Custom for Dynamic range control and you don't specify values for the related settings, MediaConvert uses default values for those settings.", "refs": { - "Eac3AtmosSettings$DynamicRangeControl": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom (SPECIFIED), to provide dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings." + "Eac3AtmosSettings$DynamicRangeControl": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom, to provide dynamic range control values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line and Dynamic range compression RF. When you keep the value Custom for Dynamic range control and you don't specify values for the related settings, MediaConvert uses default values for those settings." } }, "Eac3AtmosMeteringMode": { @@ -1339,15 +1339,15 @@ } }, "Eac3AtmosSettings": { - "base": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3_ATMOS.", + "base": "Required when you set Codec to the value EAC3_ATMOS.", "refs": { - "AudioCodecSettings$Eac3AtmosSettings": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3_ATMOS." + "AudioCodecSettings$Eac3AtmosSettings": "Required when you set Codec to the value EAC3_ATMOS." } }, "Eac3AtmosStereoDownmix": { - "base": "Choose how the service does stereo downmixing. Default value: Not indicated (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo downmix (StereoDownmix).", + "base": "Choose how the service does stereo downmixing. Default value: Not indicated Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo downmix.", "refs": { - "Eac3AtmosSettings$StereoDownmix": "Choose how the service does stereo downmixing. Default value: Not indicated (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo downmix (StereoDownmix)." + "Eac3AtmosSettings$StereoDownmix": "Choose how the service does stereo downmixing. Default value: Not indicated Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo downmix." } }, "Eac3AtmosSurroundExMode": { @@ -1381,15 +1381,15 @@ } }, "Eac3DynamicRangeCompressionLine": { - "base": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "base": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "refs": { - "Eac3Settings$DynamicRangeCompressionLine": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "Eac3Settings$DynamicRangeCompressionLine": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." } }, "Eac3DynamicRangeCompressionRf": { - "base": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "base": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "refs": { - "Eac3Settings$DynamicRangeCompressionRf": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "Eac3Settings$DynamicRangeCompressionRf": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." } }, "Eac3LfeControl": { @@ -1423,15 +1423,15 @@ } }, "Eac3Settings": { - "base": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3.", + "base": "Required when you set Codec to the value EAC3.", "refs": { - "AudioCodecSettings$Eac3Settings": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3." + "AudioCodecSettings$Eac3Settings": "Required when you set Codec to the value EAC3." } }, "Eac3StereoDownmix": { - "base": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix).", + "base": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Stereo downmix.", "refs": { - "Eac3Settings$StereoDownmix": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix)." + "Eac3Settings$StereoDownmix": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Stereo downmix." } }, "Eac3SurroundExMode": { @@ -1447,15 +1447,15 @@ } }, "EmbeddedConvert608To708": { - "base": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "base": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "refs": { - "EmbeddedSourceSettings$Convert608To708": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "EmbeddedSourceSettings$Convert608To708": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." } }, "EmbeddedDestinationSettings": { - "base": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED.", + "base": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html.", "refs": { - "CaptionDestinationSettings$EmbeddedDestinationSettings": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED." + "CaptionDestinationSettings$EmbeddedDestinationSettings": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html." } }, "EmbeddedSourceSettings": { @@ -1471,9 +1471,9 @@ } }, "EmbeddedTimecodeOverride": { - "base": "Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode override blank, or set to None (NONE), when your input does not contain MDPM timecode.", + "base": "Set Embedded timecode override to Use MDPM when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata. When you do, we recommend you also set Timecode source to Embedded. Leave Embedded timecode override blank, or set to None, when your input does not contain MDPM timecode.", "refs": { - "VideoSelector$EmbeddedTimecodeOverride": "Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode override blank, or set to None (NONE), when your input does not contain MDPM timecode." + "VideoSelector$EmbeddedTimecodeOverride": "Set Embedded timecode override to Use MDPM when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata. When you do, we recommend you also set Timecode source to Embedded. Leave Embedded timecode override blank, or set to None, when your input does not contain MDPM timecode." } }, "Endpoint": { @@ -1485,7 +1485,7 @@ "EsamManifestConfirmConditionNotification": { "base": "ESAM ManifestConfirmConditionNotification defined by OC-SP-ESAM-API-I03-131025.", "refs": { - "EsamSettings$ManifestConfirmConditionNotification": "Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning instructions that you provide in the setting MCC XML (mccXml)." + "EsamSettings$ManifestConfirmConditionNotification": "Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning instructions that you provide in the setting MCC XML." } }, "EsamSettings": { @@ -1498,7 +1498,7 @@ "EsamSignalProcessingNotification": { "base": "ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025.", "refs": { - "EsamSettings$SignalProcessingNotification": "Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing instructions that you provide in the setting SCC XML (sccXml)." + "EsamSettings$SignalProcessingNotification": "Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing instructions that you provide in the setting SCC XML." } }, "ExceptionBody": { @@ -1526,15 +1526,15 @@ } }, "FileGroupSettings": { - "base": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to FILE_GROUP_SETTINGS.", + "base": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package.", "refs": { - "OutputGroupSettings$FileGroupSettings": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to FILE_GROUP_SETTINGS." + "OutputGroupSettings$FileGroupSettings": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package." } }, "FileSourceConvert608To708": { - "base": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "base": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "refs": { - "FileSourceSettings$Convert608To708": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "FileSourceSettings$Convert608To708": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." } }, "FileSourceSettings": { @@ -1544,16 +1544,16 @@ } }, "FileSourceTimeDeltaUnits": { - "base": "When you use the setting Time delta (TimeDelta) to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units (TimeDeltaUnits), MediaConvert uses seconds by default.", + "base": "When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default.", "refs": { - "FileSourceSettings$TimeDeltaUnits": "When you use the setting Time delta (TimeDelta) to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units (TimeDeltaUnits), MediaConvert uses seconds by default." + "FileSourceSettings$TimeDeltaUnits": "When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default." } }, "FontScript": { "base": "Provide the font script, using an ISO 15924 script code, if the LanguageCode is not sufficient for determining the script type. Where LanguageCode or CustomLanguageCode is sufficient, use \"AUTOMATIC\" or leave unset.", "refs": { - "BurninDestinationSettings$FontScript": "Set Font script (FontScript) to Automatically determined (AUTOMATIC), or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese.", - "DvbSubDestinationSettings$FontScript": "Set Font script (FontScript) to Automatically determined (AUTOMATIC), or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese. Within your job settings, all of your DVB-Sub settings must be identical." + "BurninDestinationSettings$FontScript": "Set Font script to Automatically determined, or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese.", + "DvbSubDestinationSettings$FontScript": "Set Font script to Automatically determined, or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese. Within your job settings, all of your DVB-Sub settings must be identical." } }, "ForbiddenException": { @@ -1568,9 +1568,9 @@ } }, "FrameCaptureSettings": { - "base": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value FRAME_CAPTURE.", + "base": "Required when you set Codec to the value FRAME_CAPTURE.", "refs": { - "VideoCodecSettings$FrameCaptureSettings": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value FRAME_CAPTURE." + "VideoCodecSettings$FrameCaptureSettings": "Required when you set Codec to the value FRAME_CAPTURE." } }, "GetJobRequest": { @@ -1624,15 +1624,15 @@ } }, "H264AdaptiveQuantization": { - "base": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.", + "base": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.", "refs": { - "H264Settings$AdaptiveQuantization": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization." + "H264Settings$AdaptiveQuantization": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization." } }, "H264CodecLevel": { - "base": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto (AUTO).", + "base": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto.", "refs": { - "H264Settings$CodecLevel": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto (AUTO)." + "H264Settings$CodecLevel": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto." } }, "H264CodecProfile": { @@ -1642,7 +1642,7 @@ } }, "H264DynamicSubGop": { - "base": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "base": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", "refs": { "H264Settings$DynamicSubGop": "Specify whether to allow the number of B-frames in your output GOP structure to vary or not depending on your input video content. To improve the subjective video quality of your output that has high-motion content: Leave blank or keep the default value Adaptive. MediaConvert will use fewer B-frames for high-motion video content than low-motion content. The maximum number of B- frames is limited by the value that you choose for B-frames between reference frames. To use the same number B-frames for all types of content: Choose Static." } @@ -1654,21 +1654,21 @@ } }, "H264FieldEncoding": { - "base": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs.", + "base": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs.", "refs": { - "H264Settings$FieldEncoding": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs." + "H264Settings$FieldEncoding": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs." } }, "H264FlickerAdaptiveQuantization": { - "base": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "base": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled. Change this value to Enabled to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "refs": { - "H264Settings$FlickerAdaptiveQuantization": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "H264Settings$FlickerAdaptiveQuantization": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled. Change this value to Enabled to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." } }, "H264FramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "H264Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "H264Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "H264FramerateConversionAlgorithm": { @@ -1684,21 +1684,21 @@ } }, "H264GopSizeUnits": { - "base": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize).", + "base": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size.", "refs": { - "H264Settings$GopSizeUnits": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize)." + "H264Settings$GopSizeUnits": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size." } }, "H264InterlaceMode": { - "base": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "base": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "refs": { - "H264Settings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "H264Settings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." } }, "H264ParControl": { - "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "refs": { - "H264Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "H264Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." } }, "H264QualityTuningLevel": { @@ -1708,9 +1708,9 @@ } }, "H264QvbrSettings": { - "base": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode).", + "base": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Use these settings only when you set QVBR for Rate control mode.", "refs": { - "H264Settings$QvbrSettings": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "H264Settings$QvbrSettings": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." } }, "H264RateControlMode": { @@ -1726,33 +1726,33 @@ } }, "H264ScanTypeConversionMode": { - "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "refs": { - "H264Settings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "H264Settings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." } }, "H264SceneChangeDetect": { - "base": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", + "base": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", "refs": { - "H264Settings$SceneChangeDetect": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." + "H264Settings$SceneChangeDetect": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." } }, "H264Settings": { - "base": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value H_264.", + "base": "Required when you set Codec to the value H_264.", "refs": { - "VideoCodecSettings$H264Settings": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value H_264." + "VideoCodecSettings$H264Settings": "Required when you set Codec to the value H_264." } }, "H264SlowPal": { - "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "refs": { - "H264Settings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "H264Settings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." } }, "H264SpatialAdaptiveQuantization": { - "base": "Only use this setting when you change the default value, Auto (AUTO), for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (H264AdaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "base": "Only use this setting when you change the default value, Auto, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "refs": { - "H264Settings$SpatialAdaptiveQuantization": "Only use this setting when you change the default value, Auto (AUTO), for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (H264AdaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "H264Settings$SpatialAdaptiveQuantization": "Only use this setting when you change the default value, Auto, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." } }, "H264Syntax": { @@ -1762,15 +1762,15 @@ } }, "H264Telecine": { - "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "refs": { - "H264Settings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "H264Settings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." } }, "H264TemporalAdaptiveQuantization": { - "base": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization). To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "base": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization. To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "refs": { - "H264Settings$TemporalAdaptiveQuantization": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization). To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "H264Settings$TemporalAdaptiveQuantization": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization. To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." } }, "H264UnregisteredSeiTimecode": { @@ -1780,9 +1780,9 @@ } }, "H265AdaptiveQuantization": { - "base": "When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low (LOW), Medium (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive Quantization (H265FlickerAdaptiveQuantization), to further control the quantization filter. Set Adaptive Quantization to Off (OFF) to apply no quantization to your output.", + "base": "When you set Adaptive Quantization to Auto, or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, and Flicker Adaptive Quantization, to further control the quantization filter. Set Adaptive Quantization to Off to apply no quantization to your output.", "refs": { - "H265Settings$AdaptiveQuantization": "When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low (LOW), Medium (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive Quantization (H265FlickerAdaptiveQuantization), to further control the quantization filter. Set Adaptive Quantization to Off (OFF) to apply no quantization to your output." + "H265Settings$AdaptiveQuantization": "When you set Adaptive Quantization to Auto, or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, and Flicker Adaptive Quantization, to further control the quantization filter. Set Adaptive Quantization to Off to apply no quantization to your output." } }, "H265AlternateTransferFunctionSei": { @@ -1804,21 +1804,21 @@ } }, "H265DynamicSubGop": { - "base": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "base": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", "refs": { "H265Settings$DynamicSubGop": "Specify whether to allow the number of B-frames in your output GOP structure to vary or not depending on your input video content. To improve the subjective video quality of your output that has high-motion content: Leave blank or keep the default value Adaptive. MediaConvert will use fewer B-frames for high-motion video content than low-motion content. The maximum number of B- frames is limited by the value that you choose for B-frames between reference frames. To use the same number B-frames for all types of content: Choose Static." } }, "H265FlickerAdaptiveQuantization": { - "base": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off (OFF).", + "base": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off.", "refs": { - "H265Settings$FlickerAdaptiveQuantization": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off (OFF)." + "H265Settings$FlickerAdaptiveQuantization": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off." } }, "H265FramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "H265Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "H265Settings$FramerateControl": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "H265FramerateConversionAlgorithm": { @@ -1834,33 +1834,33 @@ } }, "H265GopSizeUnits": { - "base": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize).", + "base": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size.", "refs": { - "H265Settings$GopSizeUnits": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize)." + "H265Settings$GopSizeUnits": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size." } }, "H265InterlaceMode": { - "base": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "base": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "refs": { - "H265Settings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "H265Settings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." } }, "H265ParControl": { - "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "refs": { - "H265Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "H265Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." } }, "H265QualityTuningLevel": { - "base": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "base": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "refs": { - "H265Settings$QualityTuningLevel": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "H265Settings$QualityTuningLevel": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." } }, "H265QvbrSettings": { - "base": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode).", + "base": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode.", "refs": { - "H265Settings$QvbrSettings": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "H265Settings$QvbrSettings": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." } }, "H265RateControlMode": { @@ -1876,15 +1876,15 @@ } }, "H265ScanTypeConversionMode": { - "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "refs": { - "H265Settings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "H265Settings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." } }, "H265SceneChangeDetect": { - "base": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", + "base": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", "refs": { - "H265Settings$SceneChangeDetect": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." + "H265Settings$SceneChangeDetect": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." } }, "H265Settings": { @@ -1894,27 +1894,27 @@ } }, "H265SlowPal": { - "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "refs": { - "H265Settings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "H265Settings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." } }, "H265SpatialAdaptiveQuantization": { - "base": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "base": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "refs": { - "H265Settings$SpatialAdaptiveQuantization": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "H265Settings$SpatialAdaptiveQuantization": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." } }, "H265Telecine": { - "base": "This field applies only if the Streams > Advanced > Framerate (framerate) field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced Mode field (interlace_mode) to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i.", + "base": "This field applies only if the Streams > Advanced > Framerate field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field and the Streams > Advanced > Interlaced Mode field to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i.", "refs": { - "H265Settings$Telecine": "This field applies only if the Streams > Advanced > Framerate (framerate) field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced Mode field (interlace_mode) to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i." + "H265Settings$Telecine": "This field applies only if the Streams > Advanced > Framerate field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field and the Streams > Advanced > Interlaced Mode field to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i." } }, "H265TemporalAdaptiveQuantization": { - "base": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization).", + "base": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization.", "refs": { - "H265Settings$TemporalAdaptiveQuantization": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization)." + "H265Settings$TemporalAdaptiveQuantization": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization." } }, "H265TemporalIds": { @@ -1950,8 +1950,8 @@ "Hdr10Metadata": { "base": "Use these settings to specify static color calibration metadata, as defined by SMPTE ST 2086. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator.", "refs": { - "ColorCorrector$Hdr10Metadata": "Use these settings when you convert to the HDR 10 color space. Specify the SMPTE ST 2086 Mastering Display Color Volume static metadata that you want signaled in the output. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator. When you set Color space conversion (ColorSpaceConversion) to HDR 10 (FORCE_HDR10), these settings are required. You must set values for Max frame average light level (maxFrameAverageLightLevel) and Max content light level (maxContentLightLevel); these settings don't have a default value. The default values for the other HDR 10 metadata settings are defined by the P3D65 color space. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.", - "VideoSelector$Hdr10Metadata": "Use these settings to provide HDR 10 metadata that is missing or inaccurate in your input video. Appropriate values vary depending on the input video and must be provided by a color grader. The color grader generates these values during the HDR 10 mastering process. The valid range for each of these settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related settings - When you specify these values, you must also set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the values you specify here take precedence over the values in the metadata of your input file, set Color space usage (ColorSpaceUsage). To specify whether color metadata is included in an output, set Color metadata (ColorMetadata). For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr." + "ColorCorrector$Hdr10Metadata": "Use these settings when you convert to the HDR 10 color space. Specify the SMPTE ST 2086 Mastering Display Color Volume static metadata that you want signaled in the output. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator. When you set Color space conversion to HDR 10, these settings are required. You must set values for Max frame average light level and Max content light level; these settings don't have a default value. The default values for the other HDR 10 metadata settings are defined by the P3D65 color space. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.", + "VideoSelector$Hdr10Metadata": "Use these settings to provide HDR 10 metadata that is missing or inaccurate in your input video. Appropriate values vary depending on the input video and must be provided by a color grader. The color grader generates these values during the HDR 10 mastering process. The valid range for each of these settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related settings - When you specify these values, you must also set Color space to HDR 10. To specify whether the the values you specify here take precedence over the values in the metadata of your input file, set Color space usage. To specify whether color metadata is included in an output, set Color metadata. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr." } }, "Hdr10Plus": { @@ -1973,15 +1973,15 @@ } }, "HlsAudioOnlyContainer": { - "base": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic (AUTOMATIC) to create a raw audio-only file with no container. Regardless of the value that you specify here, if this output has video, the service will place outputs into an MPEG2-TS container.", + "base": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic to create a raw audio-only file with no container. Regardless of the value that you specify here, if this output has video, the service will place outputs into an MPEG2-TS container.", "refs": { - "HlsSettings$AudioOnlyContainer": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless of the value that you specify here, if this output has video, the service will place the output into an MPEG2-TS container." + "HlsSettings$AudioOnlyContainer": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic to create an audio-only file in a raw container. Regardless of the value that you specify here, if this output has video, the service will place the output into an MPEG2-TS container." } }, "HlsAudioOnlyHeader": { - "base": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include (INCLUDE), to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only headers from your audio segments.", + "base": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include, to output audio-only headers. Choose Exclude to remove the audio-only headers from your audio segments.", "refs": { - "HlsGroupSettings$AudioOnlyHeader": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include (INCLUDE), to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only headers from your audio segments." + "HlsGroupSettings$AudioOnlyHeader": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include, to output audio-only headers. Choose Exclude to remove the audio-only headers from your audio segments." } }, "HlsAudioTrackType": { @@ -2003,15 +2003,15 @@ } }, "HlsCaptionSegmentLengthControl": { - "base": "Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long.", + "base": "Set Caption segment length control to Match video to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments to create caption segments that are 300 seconds long.", "refs": { - "HlsGroupSettings$CaptionSegmentLengthControl": "Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long." + "HlsGroupSettings$CaptionSegmentLengthControl": "Set Caption segment length control to Match video to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments to create caption segments that are 300 seconds long." } }, "HlsClientCache": { - "base": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", + "base": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header.", "refs": { - "HlsGroupSettings$ClientCache": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." + "HlsGroupSettings$ClientCache": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header." } }, "HlsCodecSpecification": { @@ -2021,9 +2021,9 @@ } }, "HlsDescriptiveVideoServiceFlag": { - "base": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", + "base": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", "refs": { - "HlsSettings$DescriptiveVideoServiceFlag": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." + "HlsSettings$DescriptiveVideoServiceFlag": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." } }, "HlsDirectoryStructure": { @@ -2045,21 +2045,21 @@ } }, "HlsGroupSettings": { - "base": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to HLS_GROUP_SETTINGS.", + "base": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.", "refs": { - "OutputGroupSettings$HlsGroupSettings": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to HLS_GROUP_SETTINGS." + "OutputGroupSettings$HlsGroupSettings": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." } }, "HlsIFrameOnlyManifest": { - "base": "Choose Include (INCLUDE) to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE).", + "base": "Choose Include to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude.", "refs": { - "HlsSettings$IFrameOnlyManifest": "Choose Include (INCLUDE) to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE)." + "HlsSettings$IFrameOnlyManifest": "Choose Include to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude." } }, "HlsImageBasedTrickPlay": { - "base": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "base": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "refs": { - "HlsGroupSettings$ImageBasedTrickPlay": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "HlsGroupSettings$ImageBasedTrickPlay": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" } }, "HlsImageBasedTrickPlaySettings": { @@ -2135,9 +2135,9 @@ } }, "HlsSegmentLengthControl": { - "base": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "base": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "refs": { - "HlsGroupSettings$SegmentLengthControl": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "HlsGroupSettings$SegmentLengthControl": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." } }, "HlsSettings": { @@ -2159,9 +2159,9 @@ } }, "HlsTimedMetadataId3Frame": { - "base": "Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE).", + "base": "Specify the type of the ID3 frame to use for ID3 timestamps in your output. To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. To exclude ID3 timestamps: Set ID3 timestamp frame type to None.", "refs": { - "HlsGroupSettings$TimedMetadataId3Frame": "Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE)." + "HlsGroupSettings$TimedMetadataId3Frame": "Specify the type of the ID3 frame to use for ID3 timestamps in your output. To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. To exclude ID3 timestamps: Set ID3 timestamp frame type to None." } }, "HopDestination": { @@ -2171,7 +2171,7 @@ } }, "Id3Insertion": { - "base": "To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to specify the base 64 encoded string and use Timecode (TimeCode) to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion (Id3Insertion).", + "base": "To insert ID3 tags in your output, specify two values. Use ID3 tag to specify the base 64 encoded string and use Timecode to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion.", "refs": { "__listOfId3Insertion$member": null } @@ -2181,7 +2181,7 @@ "refs": { "Input$ImageInserter": "Enable the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input individually. This setting is disabled by default.", "InputTemplate$ImageInserter": "Enable the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input individually. This setting is disabled by default.", - "VideoPreprocessor$ImageInserter": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each output individually. This setting is disabled by default." + "VideoPreprocessor$ImageInserter": "Enable the Image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each output individually. This setting is disabled by default." } }, "ImscAccessibilitySubs": { @@ -2191,9 +2191,9 @@ } }, "ImscDestinationSettings": { - "base": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to IMSC.", + "base": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.", "refs": { - "CaptionDestinationSettings$ImscDestinationSettings": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to IMSC." + "CaptionDestinationSettings$ImscDestinationSettings": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." } }, "ImscStylePassthrough": { @@ -2215,10 +2215,10 @@ } }, "InputDeblockFilter": { - "base": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", + "base": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", "refs": { - "Input$DeblockFilter": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", - "InputTemplate$DeblockFilter": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." + "Input$DeblockFilter": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", + "InputTemplate$DeblockFilter": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." } }, "InputDecryptionSettings": { @@ -2228,10 +2228,10 @@ } }, "InputDenoiseFilter": { - "base": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.", + "base": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.", "refs": { - "Input$DenoiseFilter": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.", - "InputTemplate$DenoiseFilter": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." + "Input$DenoiseFilter": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.", + "InputTemplate$DenoiseFilter": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." } }, "InputFilterEnable": { @@ -2250,29 +2250,29 @@ } }, "InputPsiControl": { - "base": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data.", + "base": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data.", "refs": { - "Input$PsiControl": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data.", - "InputTemplate$PsiControl": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data." + "Input$PsiControl": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data.", + "InputTemplate$PsiControl": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data." } }, "InputRotate": { - "base": "Use Rotate (InputRotate) to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata.", + "base": "Use Rotate to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata.", "refs": { - "VideoSelector$Rotate": "Use Rotate (InputRotate) to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata." + "VideoSelector$Rotate": "Use Rotate to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata." } }, "InputSampleRange": { - "base": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow (FOLLOW), for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata.", + "base": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow, for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata.", "refs": { - "VideoSelector$SampleRange": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow (FOLLOW), for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata." + "VideoSelector$SampleRange": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow, for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata." } }, "InputScanType": { - "base": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.", + "base": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.", "refs": { - "Input$InputScanType": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.", - "InputTemplate$InputScanType": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." + "Input$InputScanType": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.", + "InputTemplate$InputScanType": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." } }, "InputTemplate": { @@ -2282,10 +2282,10 @@ } }, "InputTimecodeSource": { - "base": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", + "base": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", "refs": { - "Input$TimecodeSource": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", - "InputTemplate$TimecodeSource": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "Input$TimecodeSource": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", + "InputTemplate$TimecodeSource": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." } }, "InputVideoGenerator": { @@ -2439,9 +2439,9 @@ } }, "M2tsAudioDuration": { - "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "refs": { - "M2tsSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "M2tsSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." } }, "M2tsBufferModel": { @@ -2451,9 +2451,9 @@ } }, "M2tsDataPtsControl": { - "base": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values.", + "base": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value to allow all PTS values.", "refs": { - "M2tsSettings$DataPTSControl": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values." + "M2tsSettings$DataPTSControl": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value to allow all PTS values." } }, "M2tsEbpAudioInterval": { @@ -2475,9 +2475,9 @@ } }, "M2tsForceTsVideoEbpOrder": { - "base": "Keep the default value (DEFAULT) unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force (FORCE).", + "base": "Keep the default value unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force.", "refs": { - "M2tsSettings$ForceTsVideoEbpOrder": "Keep the default value (DEFAULT) unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force (FORCE)." + "M2tsSettings$ForceTsVideoEbpOrder": "Keep the default value unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force." } }, "M2tsKlvMetadata": { @@ -2505,15 +2505,15 @@ } }, "M2tsScte35Esam": { - "base": "Settings for SCTE-35 signals from ESAM. Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", + "base": "Settings for SCTE-35 signals from ESAM. Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML.", "refs": { - "M2tsSettings$Scte35Esam": "Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "M2tsSettings$Scte35Esam": "Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." } }, "M2tsScte35Source": { - "base": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam).", + "base": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None. Also provide the ESAM XML as a string in the setting Signal processing notification XML. Also enable ESAM SCTE-35 (include the property scte35Esam).", "refs": { - "M2tsSettings$Scte35Source": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam)." + "M2tsSettings$Scte35Source": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None. Also provide the ESAM XML as a string in the setting Signal processing notification XML. Also enable ESAM SCTE-35 (include the property scte35Esam)." } }, "M2tsSegmentationMarkers": { @@ -2529,21 +2529,21 @@ } }, "M2tsSettings": { - "base": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset.", + "base": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset.", "refs": { - "ContainerSettings$M2tsSettings": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." + "ContainerSettings$M2tsSettings": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." } }, "M3u8AudioDuration": { - "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "refs": { - "M3u8Settings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "M3u8Settings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." } }, "M3u8DataPtsControl": { - "base": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values.", + "base": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value AUTO to allow all PTS values.", "refs": { - "M3u8Settings$DataPTSControl": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values." + "M3u8Settings$DataPTSControl": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value AUTO to allow all PTS values." } }, "M3u8NielsenId3": { @@ -2559,9 +2559,9 @@ } }, "M3u8Scte35Source": { - "base": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers (adMarkers) if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml).", + "base": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None if you don't want manifest conditioning. Choose Passthrough and choose Ad markers if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML.", "refs": { - "M3u8Settings$Scte35Source": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers (adMarkers) if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml)." + "M3u8Settings$Scte35Source": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None if you don't want manifest conditioning. Choose Passthrough and choose Ad markers if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML." } }, "M3u8Settings": { @@ -2650,9 +2650,9 @@ } }, "Mp2Settings": { - "base": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2.", + "base": "Required when you set Codec to the value MP2.", "refs": { - "AudioCodecSettings$Mp2Settings": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2." + "AudioCodecSettings$Mp2Settings": "Required when you set Codec to the value MP2." } }, "Mp3RateControlMode": { @@ -2692,21 +2692,21 @@ } }, "MpdAccessibilityCaptionHints": { - "base": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: ", + "base": "Optional. Choose Include to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude, to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: ", "refs": { - "MpdSettings$AccessibilityCaptionHints": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: " + "MpdSettings$AccessibilityCaptionHints": "Optional. Choose Include to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude, to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: " } }, "MpdAudioDuration": { - "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "refs": { - "MpdSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "MpdSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." } }, "MpdCaptionContainerType": { - "base": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw (RAW) for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files.", + "base": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files.", "refs": { - "MpdSettings$CaptionContainerType": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw (RAW) for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files." + "MpdSettings$CaptionContainerType": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files." } }, "MpdKlvMetadata": { @@ -2716,21 +2716,21 @@ } }, "MpdManifestMetadataSignaling": { - "base": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough.", + "base": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough.", "refs": { - "MpdSettings$ManifestMetadataSignaling": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough." + "MpdSettings$ManifestMetadataSignaling": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough." } }, "MpdScte35Esam": { - "base": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", + "base": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML.", "refs": { - "MpdSettings$Scte35Esam": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "MpdSettings$Scte35Esam": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." } }, "MpdScte35Source": { - "base": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output.", + "base": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output.", "refs": { - "MpdSettings$Scte35Source": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output." + "MpdSettings$Scte35Source": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output." } }, "MpdSettings": { @@ -2740,45 +2740,45 @@ } }, "MpdTimedMetadata": { - "base": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", + "base": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank.", "refs": { - "MpdSettings$TimedMetadata": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank." + "MpdSettings$TimedMetadata": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank." } }, "MpdTimedMetadataBoxVersion": { - "base": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough.", + "base": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough.", "refs": { - "MpdSettings$TimedMetadataBoxVersion": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough." + "MpdSettings$TimedMetadataBoxVersion": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough." } }, "Mpeg2AdaptiveQuantization": { - "base": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).", + "base": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Spatial adaptive quantization, and Temporal adaptive quantization.", "refs": { - "Mpeg2Settings$AdaptiveQuantization": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization)." + "Mpeg2Settings$AdaptiveQuantization": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Spatial adaptive quantization, and Temporal adaptive quantization." } }, "Mpeg2CodecLevel": { - "base": "Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output.", + "base": "Use Level to set the MPEG-2 level for the video output.", "refs": { - "Mpeg2Settings$CodecLevel": "Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output." + "Mpeg2Settings$CodecLevel": "Use Level to set the MPEG-2 level for the video output." } }, "Mpeg2CodecProfile": { - "base": "Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output.", + "base": "Use Profile to set the MPEG-2 profile for the video output.", "refs": { - "Mpeg2Settings$CodecProfile": "Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output." + "Mpeg2Settings$CodecProfile": "Use Profile to set the MPEG-2 profile for the video output." } }, "Mpeg2DynamicSubGop": { - "base": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "base": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", "refs": { - "Mpeg2Settings$DynamicSubGop": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames)." + "Mpeg2Settings$DynamicSubGop": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames." } }, "Mpeg2FramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "Mpeg2Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "Mpeg2Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "Mpeg2FramerateConversionAlgorithm": { @@ -2788,45 +2788,45 @@ } }, "Mpeg2GopSizeUnits": { - "base": "Specify the units for GOP size (GopSize). If you don't specify a value here, by default the encoder measures GOP size in frames.", + "base": "Specify the units for GOP size. If you don't specify a value here, by default the encoder measures GOP size in frames.", "refs": { - "Mpeg2Settings$GopSizeUnits": "Specify the units for GOP size (GopSize). If you don't specify a value here, by default the encoder measures GOP size in frames." + "Mpeg2Settings$GopSizeUnits": "Specify the units for GOP size. If you don't specify a value here, by default the encoder measures GOP size in frames." } }, "Mpeg2InterlaceMode": { - "base": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "base": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "refs": { - "Mpeg2Settings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "Mpeg2Settings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." } }, "Mpeg2IntraDcPrecision": { - "base": "Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision for intra-block DC coefficients. If you choose the value auto, the service will automatically select the precision based on the per-frame compression ratio.", + "base": "Use Intra DC precision to set quantization precision for intra-block DC coefficients. If you choose the value auto, the service will automatically select the precision based on the per-frame compression ratio.", "refs": { - "Mpeg2Settings$IntraDcPrecision": "Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision for intra-block DC coefficients. If you choose the value auto, the service will automatically select the precision based on the per-frame compression ratio." + "Mpeg2Settings$IntraDcPrecision": "Use Intra DC precision to set quantization precision for intra-block DC coefficients. If you choose the value auto, the service will automatically select the precision based on the per-frame compression ratio." } }, "Mpeg2ParControl": { - "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "refs": { - "Mpeg2Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "Mpeg2Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." } }, "Mpeg2QualityTuningLevel": { - "base": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "base": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "refs": { - "Mpeg2Settings$QualityTuningLevel": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "Mpeg2Settings$QualityTuningLevel": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." } }, "Mpeg2RateControlMode": { - "base": "Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate is variable (vbr) or constant (cbr).", + "base": "Use Rate control mode to specify whether the bitrate is variable (vbr) or constant (cbr).", "refs": { - "Mpeg2Settings$RateControlMode": "Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate is variable (vbr) or constant (cbr)." + "Mpeg2Settings$RateControlMode": "Use Rate control mode to specify whether the bitrate is variable (vbr) or constant (cbr)." } }, "Mpeg2ScanTypeConversionMode": { - "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "refs": { - "Mpeg2Settings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "Mpeg2Settings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." } }, "Mpeg2SceneChangeDetect": { @@ -2836,39 +2836,39 @@ } }, "Mpeg2Settings": { - "base": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value MPEG2.", + "base": "Required when you set Codec to the value MPEG2.", "refs": { - "VideoCodecSettings$Mpeg2Settings": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value MPEG2." + "VideoCodecSettings$Mpeg2Settings": "Required when you set Codec to the value MPEG2." } }, "Mpeg2SlowPal": { - "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "refs": { - "Mpeg2Settings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "Mpeg2Settings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." } }, "Mpeg2SpatialAdaptiveQuantization": { - "base": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "base": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "refs": { - "Mpeg2Settings$SpatialAdaptiveQuantization": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "Mpeg2Settings$SpatialAdaptiveQuantization": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." } }, "Mpeg2Syntax": { - "base": "Specify whether this output's video uses the D10 syntax. Keep the default value to not use the syntax. Related settings: When you choose D10 (D_10) for your MXF profile (profile), you must also set this value to D10 (D_10).", + "base": "Specify whether this output's video uses the D10 syntax. Keep the default value to not use the syntax. Related settings: When you choose D10 for your MXF profile, you must also set this value to D10.", "refs": { - "Mpeg2Settings$Syntax": "Specify whether this output's video uses the D10 syntax. Keep the default value to not use the syntax. Related settings: When you choose D10 (D_10) for your MXF profile (profile), you must also set this value to D10 (D_10)." + "Mpeg2Settings$Syntax": "Specify whether this output's video uses the D10 syntax. Keep the default value to not use the syntax. Related settings: When you choose D10 for your MXF profile, you must also set this value to D10." } }, "Mpeg2Telecine": { - "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "refs": { - "Mpeg2Settings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "Mpeg2Settings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." } }, "Mpeg2TemporalAdaptiveQuantization": { - "base": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization).", + "base": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization.", "refs": { - "Mpeg2Settings$TemporalAdaptiveQuantization": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization)." + "Mpeg2Settings$TemporalAdaptiveQuantization": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization." } }, "MsSmoothAdditionalManifest": { @@ -2884,33 +2884,33 @@ } }, "MsSmoothEncryptionSettings": { - "base": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider.", + "base": "If you are using DRM, set DRM System to specify the value SpekeKeyProvider.", "refs": { - "MsSmoothGroupSettings$Encryption": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider." + "MsSmoothGroupSettings$Encryption": "If you are using DRM, set DRM System to specify the value SpekeKeyProvider." } }, "MsSmoothFragmentLengthControl": { - "base": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "base": "Specify how you want MediaConvert to determine the fragment length. Choose Exact to have the encoder use the exact length that you specify with the setting Fragment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "refs": { - "MsSmoothGroupSettings$FragmentLengthControl": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "MsSmoothGroupSettings$FragmentLengthControl": "Specify how you want MediaConvert to determine the fragment length. Choose Exact to have the encoder use the exact length that you specify with the setting Fragment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." } }, "MsSmoothGroupSettings": { - "base": "Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to MS_SMOOTH_GROUP_SETTINGS.", + "base": "Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.", "refs": { - "OutputGroupSettings$MsSmoothGroupSettings": "Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to MS_SMOOTH_GROUP_SETTINGS." + "OutputGroupSettings$MsSmoothGroupSettings": "Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." } }, "MsSmoothManifestEncoding": { - "base": "Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16.", + "base": "Use Manifest encoding to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16.", "refs": { - "MsSmoothGroupSettings$ManifestEncoding": "Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16." + "MsSmoothGroupSettings$ManifestEncoding": "Use Manifest encoding to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16." } }, "MxfAfdSignaling": { - "base": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings.", + "base": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings.", "refs": { - "MxfSettings$AfdSignaling": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings." + "MxfSettings$AfdSignaling": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings." } }, "MxfProfile": { @@ -2926,9 +2926,9 @@ } }, "MxfXavcDurationMode": { - "base": "To create an output that complies with the XAVC file format guidelines for interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE). To include all frames from your input in this output, keep the default setting, Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert excludes when you set this to Drop frames for compliance depends on the output frame rate and duration.", + "base": "To create an output that complies with the XAVC file format guidelines for interoperability, keep the default value, Drop frames for compliance. To include all frames from your input in this output, keep the default setting, Allow any duration. The number of frames that MediaConvert excludes when you set this to Drop frames for compliance depends on the output frame rate and duration.", "refs": { - "MxfXavcProfileSettings$DurationMode": "To create an output that complies with the XAVC file format guidelines for interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE). To include all frames from your input in this output, keep the default setting, Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert excludes when you set this to Drop frames for compliance depends on the output frame rate and duration." + "MxfXavcProfileSettings$DurationMode": "To create an output that complies with the XAVC file format guidelines for interoperability, keep the default value, Drop frames for compliance. To include all frames from your input in this output, keep the default setting, Allow any duration. The number of frames that MediaConvert excludes when you set this to Drop frames for compliance depends on the output frame rate and duration." } }, "MxfXavcProfileSettings": { @@ -2944,16 +2944,16 @@ } }, "NielsenActiveWatermarkProcessType": { - "base": "Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the setting SID (sourceId). When you choose CBET (CBET), you must provide a value for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings.", + "base": "Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW, you must provide a value for the setting SID. When you choose CBET, you must provide a value for the setting CSID. When you choose NAES 2, NW, and CBET, you must provide values for both of these settings.", "refs": { - "NielsenNonLinearWatermarkSettings$ActiveWatermarkProcess": "Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the setting SID (sourceId). When you choose CBET (CBET), you must provide a value for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings." + "NielsenNonLinearWatermarkSettings$ActiveWatermarkProcess": "Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW, you must provide a value for the setting SID. When you choose CBET, you must provide a value for the setting CSID. When you choose NAES 2, NW, and CBET, you must provide values for both of these settings." } }, "NielsenConfiguration": { - "base": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting.", + "base": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job.", "refs": { - "JobSettings$NielsenConfiguration": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting.", - "JobTemplateSettings$NielsenConfiguration": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting." + "JobSettings$NielsenConfiguration": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job.", + "JobTemplateSettings$NielsenConfiguration": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job." } }, "NielsenNonLinearWatermarkSettings": { @@ -2964,27 +2964,27 @@ } }, "NielsenSourceWatermarkStatusType": { - "base": "Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked (WATERMARKED), the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks.", + "base": "Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked, the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks.", "refs": { - "NielsenNonLinearWatermarkSettings$SourceWatermarkStatus": "Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked (WATERMARKED), the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks." + "NielsenNonLinearWatermarkSettings$SourceWatermarkStatus": "Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked, the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks." } }, "NielsenUniqueTicPerAudioTrackType": { - "base": "To create assets that have the same TIC values in each audio track, keep the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK).", + "base": "To create assets that have the same TIC values in each audio track, keep the default value Share TICs. To create assets that have unique TIC values for each audio track, choose Use unique TICs.", "refs": { - "NielsenNonLinearWatermarkSettings$UniqueTicPerAudioTrack": "To create assets that have the same TIC values in each audio track, keep the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK)." + "NielsenNonLinearWatermarkSettings$UniqueTicPerAudioTrack": "To create assets that have the same TIC values in each audio track, keep the default value Share TICs. To create assets that have unique TIC values for each audio track, choose Use unique TICs." } }, "NoiseFilterPostTemporalSharpening": { - "base": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening (postTemporalSharpening) to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (postTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening.", + "base": "When you set Noise reducer to Temporal, the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto, allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled, specify how much sharpening is applied using Post temporal sharpening strength. Set Post temporal sharpening to Disabled to not apply sharpening.", "refs": { - "NoiseReducerTemporalFilterSettings$PostTemporalSharpening": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening (postTemporalSharpening) to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (postTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening." + "NoiseReducerTemporalFilterSettings$PostTemporalSharpening": "When you set Noise reducer to Temporal, the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto, allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled, specify how much sharpening is applied using Post temporal sharpening strength. Set Post temporal sharpening to Disabled to not apply sharpening." } }, "NoiseFilterPostTemporalSharpeningStrength": { - "base": "Use Post temporal sharpening strength (postTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High (HIGH) to indicate the amount of sharpening.", + "base": "Use Post temporal sharpening strength to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low, Medium, or High to indicate the amount of sharpening.", "refs": { - "NoiseReducerTemporalFilterSettings$PostTemporalSharpeningStrength": "Use Post temporal sharpening strength (postTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High (HIGH) to indicate the amount of sharpening." + "NoiseReducerTemporalFilterSettings$PostTemporalSharpeningStrength": "Use Post temporal sharpening strength to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low, Medium, or High to indicate the amount of sharpening." } }, "NoiseReducer": { @@ -2994,9 +2994,9 @@ } }, "NoiseReducerFilter": { - "base": "Use Noise reducer filter (NoiseReducerFilter) to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion.", + "base": "Use Noise reducer filter to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer. * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion.", "refs": { - "NoiseReducer$Filter": "Use Noise reducer filter (NoiseReducerFilter) to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion." + "NoiseReducer$Filter": "Use Noise reducer filter to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer. * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion." } }, "NoiseReducerFilterSettings": { @@ -3092,9 +3092,9 @@ } }, "PadVideo": { - "base": "Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video (padVideo) to Black (BLACK), MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled (DISABLED) or leave blank.", + "base": "Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video to Black, MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled or leave blank.", "refs": { - "VideoSelector$PadVideo": "Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video (padVideo) to Black (BLACK), MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled (DISABLED) or leave blank." + "VideoSelector$PadVideo": "Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video to Black, MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled or leave blank." } }, "PartnerWatermarking": { @@ -3142,21 +3142,21 @@ } }, "ProresChromaSampling": { - "base": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer.", + "base": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer.", "refs": { - "ProresSettings$ChromaSampling": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer." + "ProresSettings$ChromaSampling": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer." } }, "ProresCodecProfile": { - "base": "Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec to use for this output.", + "base": "Use Profile to specify the type of Apple ProRes codec to use for this output.", "refs": { - "ProresSettings$CodecProfile": "Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec to use for this output." + "ProresSettings$CodecProfile": "Use Profile to specify the type of Apple ProRes codec to use for this output." } }, "ProresFramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "ProresSettings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "ProresSettings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "ProresFramerateConversionAlgorithm": { @@ -3166,39 +3166,39 @@ } }, "ProresInterlaceMode": { - "base": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "base": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "refs": { - "ProresSettings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "ProresSettings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." } }, "ProresParControl": { - "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "refs": { - "ProresSettings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "ProresSettings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." } }, "ProresScanTypeConversionMode": { - "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "refs": { - "ProresSettings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "ProresSettings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." } }, "ProresSettings": { - "base": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES.", + "base": "Required when you set Codec to the value PRORES.", "refs": { - "VideoCodecSettings$ProresSettings": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES." + "VideoCodecSettings$ProresSettings": "Required when you set Codec to the value PRORES." } }, "ProresSlowPal": { - "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "refs": { - "ProresSettings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "ProresSettings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." } }, "ProresTelecine": { - "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "refs": { - "ProresSettings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "ProresSettings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." } }, "PutPolicyRequest": { @@ -3243,16 +3243,16 @@ "Rectangle": { "base": "Use Rectangle to identify a specific area of the video frame.", "refs": { - "Input$Crop": "Use Cropping selection (crop) to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection (crop).", - "Input$Position": "Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement (position). If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior (scalingBehavior).", - "InputTemplate$Crop": "Use Cropping selection (crop) to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection (crop).", - "InputTemplate$Position": "Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement (position). If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior (scalingBehavior).", - "VideoDescription$Crop": "Use Cropping selection (crop) to specify the video area that the service will include in the output video frame.", - "VideoDescription$Position": "Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black." + "Input$Crop": "Use Cropping selection to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection.", + "Input$Position": "Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement. If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD to Respond. If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior.", + "InputTemplate$Crop": "Use Cropping selection to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection.", + "InputTemplate$Position": "Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement. If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD to Respond. If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior.", + "VideoDescription$Crop": "Use Cropping selection to specify the video area that the service will include in the output video frame.", + "VideoDescription$Position": "Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black." } }, "RemixSettings": { - "base": "Use Manual audio remixing (RemixSettings) to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides.", + "base": "Use Manual audio remixing to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides.", "refs": { "AudioDescription$RemixSettings": "Advanced audio remixing settings.", "AudioSelector$RemixSettings": "Use these settings to reorder the audio channels of one input to match those of another input. This allows you to combine the two files into a single output, one after the other." @@ -3297,9 +3297,9 @@ } }, "RespondToAfd": { - "base": "Use Respond to AFD (RespondToAfd) to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to (NONE). A preferred implementation of this workflow is to set RespondToAfd to (NONE) and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values from this output.", + "base": "Use Respond to AFD to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to NONE. A preferred implementation of this workflow is to set RespondToAfd to and set AfdSignaling to AUTO. * Choose None to remove all input AFD values from this output.", "refs": { - "VideoDescription$RespondToAfd": "Use Respond to AFD (RespondToAfd) to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to (NONE). A preferred implementation of this workflow is to set RespondToAfd to (NONE) and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values from this output." + "VideoDescription$RespondToAfd": "Use Respond to AFD to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to NONE. A preferred implementation of this workflow is to set RespondToAfd to and set AfdSignaling to AUTO. * Choose None to remove all input AFD values from this output." } }, "RuleType": { @@ -3333,9 +3333,9 @@ } }, "S3ServerSideEncryptionType": { - "base": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN (kmsKeyArn).", + "base": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3. If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS. By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN.", "refs": { - "S3EncryptionSettings$EncryptionType": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN (kmsKeyArn)." + "S3EncryptionSettings$EncryptionType": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3. If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS. By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN." } }, "SampleRangeConversion": { @@ -3345,21 +3345,21 @@ } }, "ScalingBehavior": { - "base": "Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) to have the service stretch your video image to fit. Keep the setting Default (DEFAULT) to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement (position) in this output.", + "base": "Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output to have the service stretch your video image to fit. Keep the setting Default to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement in this output.", "refs": { - "VideoDescription$ScalingBehavior": "Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) to have the service stretch your video image to fit. Keep the setting Default (DEFAULT) to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement (position) in this output." + "VideoDescription$ScalingBehavior": "Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output to have the service stretch your video image to fit. Keep the setting Default to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement in this output." } }, "SccDestinationFramerate": { - "base": "Set Framerate (SccDestinationFramerate) to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe (FRAMERATE_29_97_NON_DROPFRAME).", + "base": "Set Framerate to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe.", "refs": { - "SccDestinationSettings$Framerate": "Set Framerate (SccDestinationFramerate) to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe (FRAMERATE_29_97_NON_DROPFRAME)." + "SccDestinationSettings$Framerate": "Set Framerate to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe." } }, "SccDestinationSettings": { - "base": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SCC.", + "base": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html.", "refs": { - "CaptionDestinationSettings$SccDestinationSettings": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SCC." + "CaptionDestinationSettings$SccDestinationSettings": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html." } }, "SimulateReservedQueue": { @@ -3384,15 +3384,15 @@ } }, "SrtDestinationSettings": { - "base": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SRT.", + "base": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video.", "refs": { - "CaptionDestinationSettings$SrtDestinationSettings": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SRT." + "CaptionDestinationSettings$SrtDestinationSettings": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video." } }, "SrtStylePassthrough": { - "base": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions.", + "base": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions.", "refs": { - "SrtDestinationSettings$StylePassthrough": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions." + "SrtDestinationSettings$StylePassthrough": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions." } }, "StaticKeyProvider": { @@ -3423,9 +3423,9 @@ } }, "TeletextDestinationSettings": { - "base": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TELETEXT.", + "base": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html.", "refs": { - "CaptionDestinationSettings$TeletextDestinationSettings": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TELETEXT." + "CaptionDestinationSettings$TeletextDestinationSettings": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html." } }, "TeletextPageType": { @@ -3447,9 +3447,9 @@ } }, "TimecodeBurninPosition": { - "base": "Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to specify the location the burned-in timecode on output video.", + "base": "Use Position under Timecode burn-in to specify the location the burned-in timecode on output video.", "refs": { - "TimecodeBurnin$Position": "Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to specify the location the burned-in timecode on output video." + "TimecodeBurnin$Position": "Use Position under Timecode burn-in to specify the location the burned-in timecode on output video." } }, "TimecodeConfig": { @@ -3460,22 +3460,22 @@ } }, "TimecodeSource": { - "base": "Use Source (TimecodeSource) to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 (ZEROBASED) instead. * Start at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame to a value other than zero. You use Start timecode (Start) to provide this value.", + "base": "Use Source to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 instead. * Start at 0 - Set the timecode of the initial frame to 00:00:00:00. * Specified Start - Set the timecode of the initial frame to a value other than zero. You use Start timecode to provide this value.", "refs": { - "TimecodeConfig$Source": "Use Source (TimecodeSource) to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 (ZEROBASED) instead. * Start at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame to a value other than zero. You use Start timecode (Start) to provide this value." + "TimecodeConfig$Source": "Use Source to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 instead. * Start at 0 - Set the timecode of the initial frame to 00:00:00:00. * Specified Start - Set the timecode of the initial frame to a value other than zero. You use Start timecode to provide this value." } }, "TimedMetadata": { - "base": "Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 metadata to None (NONE) or leave blank.", + "base": "Set ID3 metadata to Passthrough to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period, and Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: set ID3 metadata to None or leave blank.", "refs": { - "M3u8Settings$TimedMetadata": "Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 metadata to None (NONE) or leave blank." + "M3u8Settings$TimedMetadata": "Set ID3 metadata to Passthrough to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period, and Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: set ID3 metadata to None or leave blank." } }, "TimedMetadataInsertion": { - "base": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH).", + "base": "Insert user-defined custom ID3 metadata at timecodes that you specify. In each output that you want to include this metadata, you must set ID3 metadata to Passthrough.", "refs": { - "JobSettings$TimedMetadataInsertion": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH).", - "JobTemplateSettings$TimedMetadataInsertion": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." + "JobSettings$TimedMetadataInsertion": "Insert user-defined custom ID3 metadata at timecodes that you specify. In each output that you want to include this metadata, you must set ID3 metadata to Passthrough.", + "JobTemplateSettings$TimedMetadataInsertion": "Insert user-defined custom ID3 metadata at timecodes that you specify. In each output that you want to include this metadata, you must set ID3 metadata to Passthrough." } }, "Timing": { @@ -3496,9 +3496,9 @@ } }, "TtmlDestinationSettings": { - "base": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TTML.", + "base": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.", "refs": { - "CaptionDestinationSettings$TtmlDestinationSettings": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TTML." + "CaptionDestinationSettings$TtmlDestinationSettings": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." } }, "TtmlStylePassthrough": { @@ -3556,15 +3556,15 @@ } }, "Vc3Class": { - "base": "Specify the VC3 class to choose the quality characteristics for this output. VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) and Resolution (height and width), determine your output bitrate. For example, say that your video resolution is 1920x1080 and your framerate is 29.97. Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit depth of your output.", + "base": "Specify the VC3 class to choose the quality characteristics for this output. VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) and Resolution (height and width), determine your output bitrate. For example, say that your video resolution is 1920x1080 and your framerate is 29.97. Then Class 145 gives you an output with a bitrate of approximately 145 Mbps and Class 220 gives you and output with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit depth of your output.", "refs": { - "Vc3Settings$Vc3Class": "Specify the VC3 class to choose the quality characteristics for this output. VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) and Resolution (height and width), determine your output bitrate. For example, say that your video resolution is 1920x1080 and your framerate is 29.97. Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit depth of your output." + "Vc3Settings$Vc3Class": "Specify the VC3 class to choose the quality characteristics for this output. VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) and Resolution (height and width), determine your output bitrate. For example, say that your video resolution is 1920x1080 and your framerate is 29.97. Then Class 145 gives you an output with a bitrate of approximately 145 Mbps and Class 220 gives you and output with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit depth of your output." } }, "Vc3FramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "Vc3Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "Vc3Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "Vc3FramerateConversionAlgorithm": { @@ -3580,27 +3580,27 @@ } }, "Vc3ScanTypeConversionMode": { - "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "base": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "refs": { - "Vc3Settings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "Vc3Settings$ScanTypeConversionMode": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." } }, "Vc3Settings": { - "base": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VC3", + "base": "Required when you set Codec to the value VC3", "refs": { - "VideoCodecSettings$Vc3Settings": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VC3" + "VideoCodecSettings$Vc3Settings": "Required when you set Codec to the value VC3" } }, "Vc3SlowPal": { - "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Framerate to 25.", "refs": { - "Vc3Settings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "Vc3Settings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Framerate to 25." } }, "Vc3Telecine": { - "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "base": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "refs": { - "Vc3Settings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "Vc3Settings$Telecine": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." } }, "VchipAction": { @@ -3612,17 +3612,17 @@ "VideoCodec": { "base": "Type of video codec", "refs": { - "VideoCodecSettings$Codec": "Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV." + "VideoCodecSettings$Codec": "Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV." } }, "VideoCodecSettings": { - "base": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings", + "base": "Video codec settings contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec. For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings", "refs": { - "VideoDescription$CodecSettings": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings" + "VideoDescription$CodecSettings": "Video codec settings contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec. For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings" } }, "VideoDescription": { - "base": "Settings related to video encoding of your output. The specific video settings depend on the video codec that you choose. When you work directly in your JSON job specification, include one instance of Video description (VideoDescription) per output.", + "base": "Settings related to video encoding of your output. The specific video settings depend on the video codec that you choose.", "refs": { "Output$VideoDescription": "VideoDescription contains a group of video encoding settings. The specific video settings depend on the video codec that you choose for the property codec. Include one instance of VideoDescription per output.", "PresetSettings$VideoDescription": "VideoDescription contains a group of video encoding settings. The specific video settings depend on the video codec that you choose for the property codec. Include one instance of VideoDescription per output." @@ -3635,9 +3635,9 @@ } }, "VideoPreprocessor": { - "base": "Find additional transcoding features under Preprocessors (VideoPreprocessors). Enable the features at each output individually. These features are disabled by default.", + "base": "Find additional transcoding features under Preprocessors. Enable the features at each output individually. These features are disabled by default.", "refs": { - "VideoDescription$VideoPreprocessors": "Find additional transcoding features under Preprocessors (VideoPreprocessors). Enable the features at each output individually. These features are disabled by default." + "VideoDescription$VideoPreprocessors": "Find additional transcoding features under Preprocessors. Enable the features at each output individually. These features are disabled by default." } }, "VideoSelector": { @@ -3648,9 +3648,9 @@ } }, "VideoTimecodeInsertion": { - "base": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration (TimecodeConfig). In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings (InputTimecodeSource) does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration (TimecodeSource) does.", + "base": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration. In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration does.", "refs": { - "VideoDescription$TimecodeInsertion": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration (TimecodeConfig). In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings (InputTimecodeSource) does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration (TimecodeSource) does." + "VideoDescription$TimecodeInsertion": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration. In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration does." } }, "VorbisSettings": { @@ -3660,9 +3660,9 @@ } }, "Vp8FramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "Vp8Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "Vp8Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "Vp8FramerateConversionAlgorithm": { @@ -3672,15 +3672,15 @@ } }, "Vp8ParControl": { - "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "refs": { - "Vp8Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "Vp8Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." } }, "Vp8QualityTuningLevel": { - "base": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", + "base": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", "refs": { - "Vp8Settings$QualityTuningLevel": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." + "Vp8Settings$QualityTuningLevel": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." } }, "Vp8RateControlMode": { @@ -3690,15 +3690,15 @@ } }, "Vp8Settings": { - "base": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP8.", + "base": "Required when you set Codec to the value VP8.", "refs": { - "VideoCodecSettings$Vp8Settings": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP8." + "VideoCodecSettings$Vp8Settings": "Required when you set Codec to the value VP8." } }, "Vp9FramerateControl": { - "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "refs": { - "Vp9Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "Vp9Settings$FramerateControl": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." } }, "Vp9FramerateConversionAlgorithm": { @@ -3708,15 +3708,15 @@ } }, "Vp9ParControl": { - "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "base": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "refs": { "Vp9Settings$ParControl": "Optional. Specify how the service determines the pixel aspect ratio for this output. The default behavior is to use the same pixel aspect ratio as your input video." } }, "Vp9QualityTuningLevel": { - "base": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", + "base": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", "refs": { - "Vp9Settings$QualityTuningLevel": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." + "Vp9Settings$QualityTuningLevel": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." } }, "Vp9RateControlMode": { @@ -3726,9 +3726,9 @@ } }, "Vp9Settings": { - "base": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP9.", + "base": "Required when you set Codec to the value VP9.", "refs": { - "VideoCodecSettings$Vp9Settings": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP9." + "VideoCodecSettings$Vp9Settings": "Required when you set Codec to the value VP9." } }, "WarningGroup": { @@ -3750,9 +3750,9 @@ } }, "WavSettings": { - "base": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value WAV.", + "base": "Required when you set Codec to the value WAV.", "refs": { - "AudioCodecSettings$WavSettings": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value WAV." + "AudioCodecSettings$WavSettings": "Required when you set Codec to the value WAV." } }, "WebvttAccessibilitySubs": { @@ -3762,9 +3762,9 @@ } }, "WebvttDestinationSettings": { - "base": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to WebVTT.", + "base": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.", "refs": { - "CaptionDestinationSettings$WebvttDestinationSettings": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to WebVTT." + "CaptionDestinationSettings$WebvttDestinationSettings": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." } }, "WebvttHlsSourceSettings": { @@ -3774,9 +3774,9 @@ } }, "WebvttStylePassthrough": { - "base": "To use the available style, color, and position information from your input captions: Set Style passthrough (stylePassthrough) to Enabled (ENABLED). MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict (STRICT). MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled (DISABLED), or leave blank.", + "base": "To use the available style, color, and position information from your input captions: Set Style passthrough to Enabled. MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict. MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled, or leave blank.", "refs": { - "WebvttDestinationSettings$StylePassthrough": "To use the available style, color, and position information from your input captions: Set Style passthrough (stylePassthrough) to Enabled (ENABLED). MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict (STRICT). MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled (DISABLED), or leave blank." + "WebvttDestinationSettings$StylePassthrough": "To use the available style, color, and position information from your input captions: Set Style passthrough to Enabled. MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict. MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled, or leave blank." } }, "Xavc4kIntraCbgProfileClass": { @@ -3786,9 +3786,9 @@ } }, "Xavc4kIntraCbgProfileSettings": { - "base": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K_INTRA_CBG.", + "base": "Required when you set Profile to the value XAVC_4K_INTRA_CBG.", "refs": { - "XavcSettings$Xavc4kIntraCbgProfileSettings": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K_INTRA_CBG." + "XavcSettings$Xavc4kIntraCbgProfileSettings": "Required when you set Profile to the value XAVC_4K_INTRA_CBG." } }, "Xavc4kIntraVbrProfileClass": { @@ -3798,9 +3798,9 @@ } }, "Xavc4kIntraVbrProfileSettings": { - "base": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K_INTRA_VBR.", + "base": "Required when you set Profile to the value XAVC_4K_INTRA_VBR.", "refs": { - "XavcSettings$Xavc4kIntraVbrProfileSettings": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K_INTRA_VBR." + "XavcSettings$Xavc4kIntraVbrProfileSettings": "Required when you set Profile to the value XAVC_4K_INTRA_VBR." } }, "Xavc4kProfileBitrateClass": { @@ -3816,21 +3816,21 @@ } }, "Xavc4kProfileQualityTuningLevel": { - "base": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "base": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "refs": { - "Xavc4kProfileSettings$QualityTuningLevel": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "Xavc4kProfileSettings$QualityTuningLevel": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." } }, "Xavc4kProfileSettings": { - "base": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K.", + "base": "Required when you set Profile to the value XAVC_4K.", "refs": { - "XavcSettings$Xavc4kProfileSettings": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K." + "XavcSettings$Xavc4kProfileSettings": "Required when you set Profile to the value XAVC_4K." } }, "XavcAdaptiveQuantization": { - "base": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off (OFF). Related settings: The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).", + "base": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set Adaptive quantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization, and Temporal adaptive quantization.", "refs": { - "XavcSettings$AdaptiveQuantization": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off (OFF). Related settings: The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization)." + "XavcSettings$AdaptiveQuantization": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set Adaptive quantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization, and Temporal adaptive quantization." } }, "XavcEntropyEncoding": { @@ -3840,16 +3840,16 @@ } }, "XavcFlickerAdaptiveQuantization": { - "base": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization (adaptiveQuantization) to a value other than Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides.", + "base": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization to a value other than Off or Auto. Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides.", "refs": { - "Xavc4kProfileSettings$FlickerAdaptiveQuantization": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization (adaptiveQuantization) to a value other than Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides.", - "XavcHdProfileSettings$FlickerAdaptiveQuantization": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization (adaptiveQuantization) to a value other than Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides." + "Xavc4kProfileSettings$FlickerAdaptiveQuantization": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization to a value other than Off or Auto. Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides.", + "XavcHdProfileSettings$FlickerAdaptiveQuantization": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization to a value other than Off or Auto. Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides." } }, "XavcFramerateControl": { - "base": "If you are using the console, use the Frame rate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list. The framerates shown in the dropdown list are decimal approximations of fractions. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate that you specify in the settings FramerateNumerator and FramerateDenominator.", + "base": "If you are using the console, use the Frame rate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list. The framerates shown in the dropdown list are decimal approximations of fractions.", "refs": { - "XavcSettings$FramerateControl": "If you are using the console, use the Frame rate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list. The framerates shown in the dropdown list are decimal approximations of fractions. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate that you specify in the settings FramerateNumerator and FramerateDenominator." + "XavcSettings$FramerateControl": "If you are using the console, use the Frame rate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list. The framerates shown in the dropdown list are decimal approximations of fractions." } }, "XavcFramerateConversionAlgorithm": { @@ -3859,10 +3859,10 @@ } }, "XavcGopBReference": { - "base": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames as reference frames. Choose Don't allow (DISABLED) to prevent the encoder from using B-frames as reference frames.", + "base": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow to allow the encoder to use B-frames as reference frames. Choose Don't allow to prevent the encoder from using B-frames as reference frames.", "refs": { - "Xavc4kProfileSettings$GopBReference": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames as reference frames. Choose Don't allow (DISABLED) to prevent the encoder from using B-frames as reference frames.", - "XavcHdProfileSettings$GopBReference": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames as reference frames. Choose Don't allow (DISABLED) to prevent the encoder from using B-frames as reference frames." + "Xavc4kProfileSettings$GopBReference": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow to allow the encoder to use B-frames as reference frames. Choose Don't allow to prevent the encoder from using B-frames as reference frames.", + "XavcHdProfileSettings$GopBReference": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow to allow the encoder to use B-frames as reference frames. Choose Don't allow to prevent the encoder from using B-frames as reference frames." } }, "XavcHdIntraCbgProfileClass": { @@ -3872,9 +3872,9 @@ } }, "XavcHdIntraCbgProfileSettings": { - "base": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD_INTRA_CBG.", + "base": "Required when you set Profile to the value XAVC_HD_INTRA_CBG.", "refs": { - "XavcSettings$XavcHdIntraCbgProfileSettings": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD_INTRA_CBG." + "XavcSettings$XavcHdIntraCbgProfileSettings": "Required when you set Profile to the value XAVC_HD_INTRA_CBG." } }, "XavcHdProfileBitrateClass": { @@ -3884,27 +3884,27 @@ } }, "XavcHdProfileQualityTuningLevel": { - "base": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "base": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "refs": { - "XavcHdProfileSettings$QualityTuningLevel": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "XavcHdProfileSettings$QualityTuningLevel": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." } }, "XavcHdProfileSettings": { - "base": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD.", + "base": "Required when you set Profile to the value XAVC_HD.", "refs": { - "XavcSettings$XavcHdProfileSettings": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD." + "XavcSettings$XavcHdProfileSettings": "Required when you set Profile to the value XAVC_HD." } }, "XavcHdProfileTelecine": { - "base": "Ignore this setting unless you set Frame rate (framerateNumerator divided by framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard (HARD). Otherwise, keep the default value None (NONE). For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html.", + "base": "Ignore this setting unless you set Frame rate (framerateNumerator divided by framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard. Otherwise, keep the default value None. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html.", "refs": { - "XavcHdProfileSettings$Telecine": "Ignore this setting unless you set Frame rate (framerateNumerator divided by framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard (HARD). Otherwise, keep the default value None (NONE). For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html." + "XavcHdProfileSettings$Telecine": "Ignore this setting unless you set Frame rate (framerateNumerator divided by framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard. Otherwise, keep the default value None. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html." } }, "XavcInterlaceMode": { - "base": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "base": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "refs": { - "XavcHdProfileSettings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "XavcHdProfileSettings$InterlaceMode": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." } }, "XavcProfile": { @@ -3914,40 +3914,40 @@ } }, "XavcSettings": { - "base": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value XAVC.", + "base": "Required when you set Codec to the value XAVC.", "refs": { - "VideoCodecSettings$XavcSettings": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value XAVC." + "VideoCodecSettings$XavcSettings": "Required when you set Codec to the value XAVC." } }, "XavcSlowPal": { - "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Frame rate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "base": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Frame rate to 25.", "refs": { - "XavcSettings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Frame rate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "XavcSettings$SlowPal": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Frame rate to 25." } }, "XavcSpatialAdaptiveQuantization": { - "base": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "base": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "refs": { - "XavcSettings$SpatialAdaptiveQuantization": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "XavcSettings$SpatialAdaptiveQuantization": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." } }, "XavcTemporalAdaptiveQuantization": { - "base": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal adaptive quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization).", + "base": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal adaptive quantization, adjust the strength of the filter with the setting Adaptive quantization.", "refs": { - "XavcSettings$TemporalAdaptiveQuantization": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal adaptive quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization)." + "XavcSettings$TemporalAdaptiveQuantization": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal adaptive quantization, adjust the strength of the filter with the setting Adaptive quantization." } }, "__doubleMin0": { "base": null, "refs": { "Av1Settings$GopSize": "Specify the GOP length (keyframe interval) in frames. With AV1, MediaConvert doesn't support GOP length in seconds. This value must be greater than zero and preferably equal to 1 + ((numberBFrames + 1) * x), where x is an integer value.", - "H264Settings$GopSize": "Use this setting only when you set GOP mode control (GopSizeUnits) to Specified, frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control (GopSizeUnits). If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto (AUTO). If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group.", - "H265Settings$GopSize": "Use this setting only when you set GOP mode control (GopSizeUnits) to Specified, frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control (GopSizeUnits). If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto (AUTO). If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group.", + "H264Settings$GopSize": "Use this setting only when you set GOP mode control to Specified, frames or Specified, seconds. Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control. If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto. If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group.", + "H265Settings$GopSize": "Use this setting only when you set GOP mode control to Specified, frames or Specified, seconds. Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control. If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto. If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group.", "KantarWatermarkSettings$FileOffset": "Optional. Specify an offset, in whole seconds, from the start of your output and the beginning of the watermarking. When you don't specify an offset, Kantar defaults to zero.", "M2tsSettings$FragmentTime": "The length, in seconds, of each fragment. Only used with EBP markers.", "M2tsSettings$NullPacketBitrate": "Value in bits per second of extra null packets to insert into the transport stream. This can be used if a downstream encryption system requires periodic null packets.", "M2tsSettings$SegmentationTime": "Specify the length, in seconds, of each segment. Required unless markers is set to _none_.", - "Mpeg2Settings$GopSize": "Specify the interval between keyframes, in seconds or frames, for this output. Default: 12 Related settings: When you specify the GOP size in seconds, set GOP mode control (GopSizeUnits) to Specified, seconds (SECONDS). The default value for GOP mode control (GopSizeUnits) is Frames (FRAMES).", + "Mpeg2Settings$GopSize": "Specify the interval between keyframes, in seconds or frames, for this output. Default: 12 Related settings: When you specify the GOP size in seconds, set GOP mode control to Specified, seconds. The default value for GOP mode control is Frames.", "Vp8Settings$GopSize": "GOP Length (keyframe interval) in frames. Must be greater than zero.", "Vp9Settings$GopSize": "GOP Length (keyframe interval) in frames. Must be greater than zero." } @@ -3974,14 +3974,14 @@ "__doubleMinNegative59Max0": { "base": null, "refs": { - "AudioNormalizationSettings$TargetLkfs": "When you use Audio normalization (AudioNormalizationSettings), optionally use this setting to specify a target loudness. If you don't specify a value here, the encoder chooses a value for you, based on the algorithm that you choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS." + "AudioNormalizationSettings$TargetLkfs": "When you use Audio normalization, optionally use this setting to specify a target loudness. If you don't specify a value here, the encoder chooses a value for you, based on the algorithm that you choose for Algorithm. If you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS." } }, "__doubleMinNegative60Max3": { "base": null, "refs": { - "Eac3Settings$LoRoCenterMixLevel": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel).", - "Eac3Settings$LtRtCenterMixLevel": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel)." + "Eac3Settings$LoRoCenterMixLevel": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only center mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left only/Right only center.", + "Eac3Settings$LtRtCenterMixLevel": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total center mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left total/Right total center." } }, "__doubleMinNegative60Max6": { @@ -3993,17 +3993,17 @@ "__doubleMinNegative60MaxNegative1": { "base": null, "refs": { - "Eac3AtmosSettings$LoRoSurroundMixLevel": "Specify a value for the following Dolby Atmos setting: Left only/Right only (Lo/Ro surround). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right only surround (LoRoSurroundMixLevel).", - "Eac3AtmosSettings$LtRtSurroundMixLevel": "Specify a value for the following Dolby Atmos setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, the service ignores Left total/Right total surround (LtRtSurroundMixLevel).", - "Eac3Settings$LoRoSurroundMixLevel": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left only/Right only surround (loRoSurroundMixLevel).", - "Eac3Settings$LtRtSurroundMixLevel": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel)." + "Eac3AtmosSettings$LoRoSurroundMixLevel": "Specify a value for the following Dolby Atmos setting: Left only/Right only. MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left only/Right only surround.", + "Eac3AtmosSettings$LtRtSurroundMixLevel": "Specify a value for the following Dolby Atmos setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. Default value: -3 dB Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, the service ignores Left total/Right total surround.", + "Eac3Settings$LoRoSurroundMixLevel": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left only/Right only surround.", + "Eac3Settings$LtRtSurroundMixLevel": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total surround mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left total/Right total surround." } }, "__doubleMinNegative6Max3": { "base": null, "refs": { - "Eac3AtmosSettings$LoRoCenterMixLevel": "Specify a value for the following Dolby Atmos setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right only center (LoRoCenterMixLevel).", - "Eac3AtmosSettings$LtRtCenterMixLevel": "Specify a value for the following Dolby Atmos setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left total/Right total center (LtRtCenterMixLevel)." + "Eac3AtmosSettings$LoRoCenterMixLevel": "Specify a value for the following Dolby Atmos setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left only/Right only center.", + "Eac3AtmosSettings$LtRtCenterMixLevel": "Specify a value for the following Dolby Atmos setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default value: -3 dB Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left total/Right total center." } }, "__doubleMinNegative8Max0": { @@ -4040,14 +4040,14 @@ "__integerMin0Max1": { "base": null, "refs": { - "Mp4Settings$CttsVersion": "Ignore this setting unless compliance to the CTTS box version specification matters in your workflow. Specify a value of 1 to set your CTTS box version to 1 and make your output compliant with the specification. When you specify a value of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. Keep the default value 0 to set your CTTS box version to 0. This can provide backward compatibility for some players and packagers." + "Mp4Settings$CttsVersion": "Ignore this setting unless compliance to the CTTS box version specification matters in your workflow. Specify a value of 1 to set your CTTS box version to 1 and make your output compliant with the specification. When you specify a value of 1, you must also set CSLG atom to the value INCLUDE. Keep the default value 0 to set your CTTS box version to 0. This can provide backward compatibility for some players and packagers." } }, "__integerMin0Max10": { "base": null, "refs": { - "BurninDestinationSettings$OutlineSize": "Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave Outline size blank and set Style passthrough (StylePassthrough) to enabled to use the outline size data from your input captions, if present.", - "DvbSubDestinationSettings$OutlineSize": "Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave Outline size blank and set Style passthrough (StylePassthrough) to enabled to use the outline size data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "BurninDestinationSettings$OutlineSize": "Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present.", + "DvbSubDestinationSettings$OutlineSize": "Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." } }, "__integerMin0Max100": { @@ -4058,10 +4058,10 @@ "H264Settings$HrdBufferInitialFillPercentage": "Percentage of the buffer that should initially be filled (HRD buffer model).", "H265Settings$HrdBufferFinalFillPercentage": "If your downstream systems have strict buffer requirements: Specify the minimum percentage of the HRD buffer that's available at the end of each encoded video segment. For the best video quality: Set to 0 or leave blank to automatically determine the final buffer fill percentage.", "H265Settings$HrdBufferInitialFillPercentage": "Percentage of the buffer that should initially be filled (HRD buffer model).", - "InsertableImage$Opacity": "Use Opacity (Opacity) to specify how much of the underlying video shows through the inserted image. 0 is transparent and 100 is fully opaque. Default is 50.", + "InsertableImage$Opacity": "Use Opacity to specify how much of the underlying video shows through the inserted image. 0 is transparent and 100 is fully opaque. Default is 50.", "Mpeg2Settings$HrdBufferFinalFillPercentage": "If your downstream systems have strict buffer requirements: Specify the minimum percentage of the HRD buffer that's available at the end of each encoded video segment. For the best video quality: Set to 0 or leave blank to automatically determine the final buffer fill percentage.", "Mpeg2Settings$HrdBufferInitialFillPercentage": "Percentage of the buffer that should initially be filled (HRD buffer model).", - "VideoDescription$Sharpness": "Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. This setting changes the width of the anti-alias filter kernel used for scaling. Sharpness only applies if your output resolution is different from your input resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended for most content." + "VideoDescription$Sharpness": "Use Sharpness setting to specify the strength of anti-aliasing. This setting changes the width of the anti-alias filter kernel used for scaling. Sharpness only applies if your output resolution is different from your input resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended for most content." } }, "__integerMin0Max1000": { @@ -4091,9 +4091,9 @@ "base": null, "refs": { "ClipLimits$MinimumYUV": "Specify the Minimum YUV color sample limit. MediaConvert conforms any pixels in your input below the value that you specify to typical limited range bounds. Enter an integer from 0 to 128. Leave blank to use the default value 64. The value that you enter applies to 10-bit ranges. For 8-bit ranges, MediaConvert automatically scales this value down. When you specify a value for Minumum YUV, you must set Sample range conversion to Limited range clip.", - "H264Settings$Softness": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video.", - "Mpeg2Settings$Softness": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, to use the AWS Elemental default matrices. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video.", - "XavcSettings$Softness": "Ignore this setting unless your downstream workflow requires that you specify it explicitly. Otherwise, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." + "H264Settings$Softness": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness or by enabling a noise reducer filter. The Softness setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video.", + "Mpeg2Settings$Softness": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness or by enabling a noise reducer filter. The Softness setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, to use the AWS Elemental default matrices. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video.", + "XavcSettings$Softness": "Ignore this setting unless your downstream workflow requires that you specify it explicitly. Otherwise, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness or by enabling a noise reducer filter. The Softness setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." } }, "__integerMin0Max1466400000": { @@ -4106,7 +4106,7 @@ "base": null, "refs": { "Av1Settings$NumberBFramesBetweenReferenceFrames": "Specify from the number of B-frames, in the range of 0-15. For AV1 encoding, we recommend using 7 or 15. Choose a larger number for a lower bitrate and smaller file size; choose a smaller number for better video quality.", - "VideoDescription$FixedAfd": "Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use Fixed (FixedAfd) to specify a four-bit AFD value which the service will write on all frames of this video output." + "VideoDescription$FixedAfd": "Applies only if you set AFD Signaling to Fixed. Use Fixed to specify a four-bit AFD value which the service will write on all frames of this video output." } }, "__integerMin0Max16": { @@ -4119,16 +4119,16 @@ "__integerMin0Max2147483647": { "base": null, "refs": { - "BurninDestinationSettings$XPosition": "Specify the horizontal position (XPosition) of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter.", - "BurninDestinationSettings$YPosition": "Specify the vertical position (YPosition) of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output.", + "BurninDestinationSettings$XPosition": "Specify the horizontal position of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter.", + "BurninDestinationSettings$YPosition": "Specify the vertical position of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output.", "CmafGroupSettings$MinBufferTime": "Minimum time of initially buffered media that is needed to ensure smooth playout.", "DashIsoGroupSettings$MinBufferTime": "Minimum time of initially buffered media that is needed to ensure smooth playout.", - "DvbSubDestinationSettings$DdsXCoordinate": "Use this setting, along with DDS y-coordinate (ddsYCoordinate), to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the left side of the frame and the left side of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match.", - "DvbSubDestinationSettings$DdsYCoordinate": "Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the top of the frame and the top of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match.", - "DvbSubDestinationSettings$XPosition": "Specify the horizontal position (XPosition) of the captions, relative to the left side of the outputin pixels. A value of 10 would result in the captions starting 10 pixels from the left ofthe output. If no explicit x_position is provided, the horizontal caption position will bedetermined by the alignment parameter. Within your job settings, all of your DVB-Sub settings must be identical.", - "DvbSubDestinationSettings$YPosition": "Specify the vertical position (YPosition) of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. Within your job settings, all of your DVB-Sub settings must be identical.", - "H264Settings$GopClosedCadence": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0.", - "H265Settings$GopClosedCadence": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0.", + "DvbSubDestinationSettings$DdsXCoordinate": "Use this setting, along with DDS y-coordinate, to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the left side of the frame and the left side of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling to a value other than None. MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment. All burn-in and DVB-Sub font settings must match.", + "DvbSubDestinationSettings$DdsYCoordinate": "Use this setting, along with DDS x-coordinate, to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the top of the frame and the top of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling to a value other than None. MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match.", + "DvbSubDestinationSettings$XPosition": "Specify the horizontal position of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter. Within your job settings, all of your DVB-Sub settings must be identical.", + "DvbSubDestinationSettings$YPosition": "Specify the vertical position of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. Within your job settings, all of your DVB-Sub settings must be identical.", + "H264Settings$GopClosedCadence": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0.", + "H265Settings$GopClosedCadence": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0.", "Hdr10Metadata$MaxLuminance": "Nominal maximum mastering display luminance in units of of 0.0001 candelas per square meter.", "Hdr10Metadata$MinLuminance": "Nominal minimum mastering display luminance in units of of 0.0001 candelas per square meter", "HlsGroupSettings$MinSegmentLength": "When set, Minimum Segment Size is enforced by looking ahead and back within the specified range for a nearby avail and extending the segment size if needed.", @@ -4146,7 +4146,7 @@ "MotionImageInsertionOffset$ImageX": "Set the distance, in pixels, between the overlay and the left edge of the video frame.", "MotionImageInsertionOffset$ImageY": "Set the distance, in pixels, between the overlay and the top edge of the video frame.", "Mpeg2Settings$GopClosedCadence": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. When you create a streaming output, we recommend that you keep the default value, 1, so that players starting mid-stream receive an IDR frame as quickly as possible. Don't set this value to 0; that would break output segmenting.", - "MxfXavcProfileSettings$MaxAncDataSize": "Specify a value for this setting only for outputs that you set up with one of these two XAVC profiles: XAVC HD Intra CBG (XAVC_HD_INTRA_CBG) or XAVC 4K Intra CBG (XAVC_4K_INTRA_CBG). Specify the amount of space in each frame that the service reserves for ancillary data, such as teletext captions. The default value for this setting is 1492 bytes per frame. This should be sufficient to prevent overflow unless you have multiple pages of teletext captions data. If you have a large amount of teletext data, specify a larger number.", + "MxfXavcProfileSettings$MaxAncDataSize": "Specify a value for this setting only for outputs that you set up with one of these two XAVC profiles: XAVC HD Intra CBG or XAVC 4K Intra CBG. Specify the amount of space in each frame that the service reserves for ancillary data, such as teletext captions. The default value for this setting is 1492 bytes per frame. This should be sufficient to prevent overflow unless you have multiple pages of teletext captions data. If you have a large amount of teletext data, specify a larger number.", "Rectangle$X": "The distance, in pixels, between the rectangle and the left edge of the video frame. Specify only even numbers.", "Rectangle$Y": "The distance, in pixels, between the rectangle and the top edge of the video frame. Specify only even numbers.", "Xavc4kProfileSettings$GopClosedCadence": "Frequency of closed GOPs. In streaming applications, it is recommended that this be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly as possible. Setting this value to 0 will break output segmenting.", @@ -4157,12 +4157,12 @@ "base": null, "refs": { "AudioDescription$AudioType": "Applies only if Follow Input Audio Type is unchecked (false). A number between 0 and 255. The following are defined in ISO-IEC 13818-1: 0 = Undefined, 1 = Clean Effects, 2 = Hearing Impaired, 3 = Visually Impaired Commentary, 4-255 = Reserved.", - "BurninDestinationSettings$BackgroundOpacity": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions.", + "BurninDestinationSettings$BackgroundOpacity": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions.", "BurninDestinationSettings$FontOpacity": "Specify the opacity of the burned-in captions. 255 is opaque; 0 is transparent.", - "BurninDestinationSettings$ShadowOpacity": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions.", - "DvbSubDestinationSettings$BackgroundOpacity": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions. Within your job settings, all of your DVB-Sub settings must be identical.", + "BurninDestinationSettings$ShadowOpacity": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions.", + "DvbSubDestinationSettings$BackgroundOpacity": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions. Within your job settings, all of your DVB-Sub settings must be identical.", "DvbSubDestinationSettings$FontOpacity": "Specify the opacity of the burned-in captions. 255 is opaque; 0 is transparent.\nWithin your job settings, all of your DVB-Sub settings must be identical.", - "DvbSubDestinationSettings$ShadowOpacity": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." + "DvbSubDestinationSettings$ShadowOpacity": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." } }, "__integerMin0Max3": { @@ -4175,9 +4175,9 @@ "__integerMin0Max30": { "base": null, "refs": { - "H264Settings$MinIInterval": "Use this setting only when you also enable Scene change detection (SceneChangeDetect). This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval (minIInterval) to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.", - "H265Settings$MinIInterval": "Use this setting only when you also enable Scene change detection (SceneChangeDetect). This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval (minIInterval) to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.", - "Mpeg2Settings$MinIInterval": "Use this setting only when you also enable Scene change detection (SceneChangeDetect). This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. When you specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval (minIInterval) to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." + "H264Settings$MinIInterval": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.", + "H265Settings$MinIInterval": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs.", + "Mpeg2Settings$MinIInterval": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. When you specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." } }, "__integerMin0Max30000": { @@ -4208,7 +4208,7 @@ "__integerMin0Max4194303": { "base": null, "refs": { - "NexGuardFileMarkerSettings$Payload": "Specify the payload ID that you want associated with this output. Valid values vary depending on your Nagra NexGuard forensic watermarking workflow. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 through 4,194,303. You must generate a unique ID for each asset you watermark, and keep a record of which ID you have assigned to each asset. Neither Nagra nor MediaConvert keep track of the relationship between output files and your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for each asset. Do this by setting up two output groups. For one output group, set the value of Payload ID (payload) to 0 in every output. For the other output group, set Payload ID (payload) to 1 in every output." + "NexGuardFileMarkerSettings$Payload": "Specify the payload ID that you want associated with this output. Valid values vary depending on your Nagra NexGuard forensic watermarking workflow. Required when you include Nagra NexGuard File Marker watermarking in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 through 4,194,303. You must generate a unique ID for each asset you watermark, and keep a record of which ID you have assigned to each asset. Neither Nagra nor MediaConvert keep track of the relationship between output files and your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for each asset. Do this by setting up two output groups. For one output group, set the value of Payload ID to 0 in every output. For the other output group, set Payload ID to 1 in every output." } }, "__integerMin0Max47185920": { @@ -4260,7 +4260,7 @@ "DvbNitSettings$NetworkId": "The numeric value placed in the Network Information Table (NIT).", "Hdr10Metadata$MaxContentLightLevel": "Maximum light level among all samples in the coded video sequence, in units of candelas per square meter. This setting doesn't have a default value; you must specify a value that is suitable for the content.", "Hdr10Metadata$MaxFrameAverageLightLevel": "Maximum average light level of any frame in the coded video sequence, in units of candelas per square meter. This setting doesn't have a default value; you must specify a value that is suitable for the content.", - "M2tsSettings$ProgramNumber": "Use Program number (programNumber) to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data.", + "M2tsSettings$ProgramNumber": "Use Program number to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data.", "M2tsSettings$TransportStreamId": "Specify the ID for the transport stream itself in the program map table for this output. Transport stream IDs and program map tables are parts of MPEG-2 transport stream containers, used for organizing data.", "M3u8Settings$ProgramNumber": "The value of the program number field in the Program Map Table.", "M3u8Settings$TransportStreamId": "The value of the transport stream ID field in the Program Map Table." @@ -4277,20 +4277,20 @@ "__integerMin0Max8": { "base": null, "refs": { - "AudioSelector$ProgramSelection": "Use this setting for input streams that contain Dolby E, to have the service extract specific program data from the track. To select multiple programs, create multiple selectors with the same Track and different Program numbers. In the console, this setting is visible when you set Selector type to Track. Choose the program number from the dropdown list. If you are sending a JSON file, provide the program ID, which is part of the audio metadata. If your input file has incorrect metadata, you can choose All channels instead of a program number to have the service ignore the program IDs and include all the programs in the track." + "AudioSelector$ProgramSelection": "Use this setting for input streams that contain Dolby E, to have the service extract specific program data from the track. To select multiple programs, create multiple selectors with the same Track and different Program numbers. In the console, this setting is visible when you set Selector type to Track. Choose the program number from the dropdown list. If your input file has incorrect metadata, you can choose All channels instead of a program number to have the service ignore the program IDs and include all the programs in the track." } }, "__integerMin0Max9": { "base": null, "refs": { - "Mp3Settings$VbrQuality": "Required when you set Bitrate control mode (rateControlMode) to VBR. Specify the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality)." + "Mp3Settings$VbrQuality": "Required when you set Bitrate control mode to VBR. Specify the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality)." } }, "__integerMin0Max96": { "base": null, "refs": { - "BurninDestinationSettings$FontSize": "Specify the Font size (FontSize) in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size.", - "DvbSubDestinationSettings$FontSize": "Specify the Font size (FontSize) in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size. Within your job settings, all of your DVB-Sub settings must be identical." + "BurninDestinationSettings$FontSize": "Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size.", + "DvbSubDestinationSettings$FontSize": "Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size. Within your job settings, all of your DVB-Sub settings must be identical." } }, "__integerMin0Max99": { @@ -4360,7 +4360,7 @@ "__integerMin10Max48": { "base": null, "refs": { - "TimecodeBurnin$FontSize": "Use Font Size (FontSize) to set the font size of any burned-in timecode. Valid values are 10, 16, 32, 48." + "TimecodeBurnin$FontSize": "Use Font size to set the font size of any burned-in timecode. Valid values are 10, 16, 32, 48." } }, "__integerMin16000Max320000": { @@ -4378,8 +4378,8 @@ "__integerMin16Max24": { "base": null, "refs": { - "AiffSettings$BitDepth": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track.", - "WavSettings$BitDepth": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." + "AiffSettings$BitDepth": "Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track.", + "WavSettings$BitDepth": "Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track." } }, "__integerMin1Max1": { @@ -4391,9 +4391,9 @@ "__integerMin1Max10": { "base": null, "refs": { - "Av1QvbrSettings$QvbrQualityLevel": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.", - "H264QvbrSettings$QvbrQualityLevel": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.", - "H265QvbrSettings$QvbrQualityLevel": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + "Av1QvbrSettings$QvbrQualityLevel": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.", + "H264QvbrSettings$QvbrQualityLevel": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.", + "H265QvbrSettings$QvbrQualityLevel": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." } }, "__integerMin1Max100": { @@ -4415,7 +4415,7 @@ "base": null, "refs": { "AvcIntraSettings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", - "CaptionSourceFramerate$FramerateDenominator": "Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate numerator (framerateNumerator).", + "CaptionSourceFramerate$FramerateDenominator": "Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate. Use this setting along with the setting Framerate numerator.", "Mpeg2Settings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", "Vc3Settings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", "XavcSettings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Frame rate. In this example, specify 23.976." @@ -4430,9 +4430,9 @@ "__integerMin1Max2": { "base": null, "refs": { - "Mp2Settings$Channels": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in the console will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2.", - "Mp3Settings$Channels": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2.", - "OpusSettings$Channels": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2.", + "Mp2Settings$Channels": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2.", + "Mp3Settings$Channels": "Specify the number of channels in this output audio track. Choosing Mono gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2.", + "OpusSettings$Channels": "Specify the number of channels in this output audio track. Choosing Mono on gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2.", "VorbisSettings$Channels": "Optional. Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2. The default value is 2." } }, @@ -4464,44 +4464,44 @@ "refs": { "Av1Settings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", "Av1Settings$FramerateNumerator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", - "CmafGroupSettings$FragmentLength": "Specify the length, in whole seconds, of the mp4 fragments. When you don't specify a value, MediaConvert defaults to 2. Related setting: Use Fragment length control (FragmentLengthControl) to specify whether the encoder enforces this value strictly.", - "CmafGroupSettings$SegmentLength": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (CmafSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.", + "CmafGroupSettings$FragmentLength": "Specify the length, in whole seconds, of the mp4 fragments. When you don't specify a value, MediaConvert defaults to 2. Related setting: Use Fragment length control to specify whether the encoder enforces this value strictly.", + "CmafGroupSettings$SegmentLength": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.", "DashIsoGroupSettings$FragmentLength": "Length of fragments to generate (in seconds). Fragment length must be compatible with GOP size and Framerate. Note that fragments will end on the next keyframe after this number of seconds, so actual fragment length may be longer. When Emit Single File is checked, the fragmentation is internal to a single output file and it does not cause the creation of many output files as in other output types.", - "DashIsoGroupSettings$SegmentLength": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (DashIsoSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.", - "DvbSubDestinationSettings$Height": "Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match.", - "DvbSubDestinationSettings$Width": "Specify the width, in pixels, of this set of DVB-Sub captions. The default value is 720 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match.", + "DashIsoGroupSettings$SegmentLength": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.", + "DvbSubDestinationSettings$Height": "Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling to a value other than None. All burn-in and DVB-Sub font settings must match.", + "DvbSubDestinationSettings$Width": "Specify the width, in pixels, of this set of DVB-Sub captions. The default value is 720 pixels. Related setting: When you use this setting, you must set DDS handling to a value other than None. All burn-in and DVB-Sub font settings must match.", "DvbSubSourceSettings$Pid": "When using DVB-Sub with Burn-in, use this PID for the source content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, regardless of selectors.", "FrameCaptureSettings$FramerateDenominator": "Frame capture will encode the first frame of the output stream, then one frame every framerateDenominator/framerateNumerator seconds. For example, settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of 1/3 frame per second) will capture the first frame, then 1 frame every 3s. Files will be named as filename.n.jpg where n is the 0-based sequence number of each Capture.", "FrameCaptureSettings$FramerateNumerator": "Frame capture will encode the first frame of the output stream, then one frame every framerateDenominator/framerateNumerator seconds. For example, settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of 1/3 frame per second) will capture the first frame, then 1 frame every 3s. Files will be named as filename.NNNNNNN.jpg where N is the 0-based frame sequence number zero padded to 7 decimal places.", "H264Settings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", "H264Settings$FramerateNumerator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", - "H264Settings$ParDenominator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", - "H264Settings$ParNumerator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", + "H264Settings$ParDenominator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", + "H264Settings$ParNumerator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", "H265Settings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", "H265Settings$FramerateNumerator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", - "H265Settings$ParDenominator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", - "H265Settings$ParNumerator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", - "HlsGroupSettings$SegmentLength": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (HlsSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.", - "HlsGroupSettings$SegmentsPerSubdirectory": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.", - "Input$ProgramNumber": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.", - "InputTemplate$ProgramNumber": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.", - "Mpeg2Settings$ParDenominator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", - "Mpeg2Settings$ParNumerator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", - "MsSmoothGroupSettings$FragmentLength": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "H265Settings$ParDenominator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", + "H265Settings$ParNumerator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", + "HlsGroupSettings$SegmentLength": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.", + "HlsGroupSettings$SegmentsPerSubdirectory": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.", + "Input$ProgramNumber": "Use Program to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.", + "InputTemplate$ProgramNumber": "Use Program to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.", + "Mpeg2Settings$ParDenominator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", + "Mpeg2Settings$ParNumerator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", + "MsSmoothGroupSettings$FragmentLength": "Specify how you want MediaConvert to determine the fragment length. Choose Exact to have the encoder use the exact length that you specify with the setting Fragment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "ProresSettings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", "ProresSettings$FramerateNumerator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", - "ProresSettings$ParDenominator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", - "ProresSettings$ParNumerator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", + "ProresSettings$ParDenominator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", + "ProresSettings$ParNumerator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", "TrackSourceSettings$TrackNumber": "Use this setting to select a single captions track from a source. Track numbers correspond to the order in the captions source file. For IMF sources, track numbering is based on the order that the captions appear in the CPL. For example, use 1 to select the captions asset that is listed first in the CPL. To include more than one captions track in your job outputs, create multiple input captions selectors. Specify one track per selector.", - "VideoSelector$Pid": "Use PID (Pid) to select specific video data from an input file. Specify this value as an integer; the system automatically converts it to the hexidecimal value. For example, 257 selects PID 0x101. A PID, or packet identifier, is an identifier for a set of data in an MPEG-2 transport stream container.", + "VideoSelector$Pid": "Use PID to select specific video data from an input file. Specify this value as an integer; the system automatically converts it to the hexidecimal value. For example, 257 selects PID 0x101. A PID, or packet identifier, is an identifier for a set of data in an MPEG-2 transport stream container.", "Vp8Settings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", "Vp8Settings$FramerateNumerator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", - "Vp8Settings$ParDenominator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", - "Vp8Settings$ParNumerator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", + "Vp8Settings$ParDenominator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", + "Vp8Settings$ParNumerator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", "Vp9Settings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", "Vp9Settings$FramerateNumerator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", - "Vp9Settings$ParDenominator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", - "Vp9Settings$ParNumerator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", + "Vp9Settings$ParDenominator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", + "Vp9Settings$ParNumerator": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", "__listOf__integerMin1Max2147483647$member": null } }, @@ -4545,7 +4545,7 @@ "__integerMin1Max6": { "base": null, "refs": { - "EmbeddedDestinationSettings$Destination708ServiceNumber": "Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.", + "EmbeddedDestinationSettings$Destination708ServiceNumber": "Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert to Upconvert in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.", "H264Settings$NumberReferenceFrames": "Number of reference frames to use. The encoder may use more than requested if using B-frames and/or interlaced encoding.", "H265Settings$NumberReferenceFrames": "Number of reference frames to use. The encoder may use more than requested if using B-frames and/or interlaced encoding." } @@ -4553,7 +4553,7 @@ "__integerMin1Max60000": { "base": null, "refs": { - "CaptionSourceFramerate$FramerateNumerator": "Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate denominator (framerateDenominator)." + "CaptionSourceFramerate$FramerateNumerator": "Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate. Use this setting along with the setting Framerate denominator." } }, "__integerMin1Max64": { @@ -4636,7 +4636,7 @@ "refs": { "M2tsScte35Esam$Scte35EsamPid": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated by ESAM.", "M2tsSettings$DvbTeletextPid": "Specify the packet identifier (PID) for DVB teletext data you include in this output. Default is 499.", - "M2tsSettings$PcrPid": "Specify the packet identifier (PID) for the program clock reference (PCR) in this output. If you do not specify a value, the service will use the value for Video PID (VideoPid).", + "M2tsSettings$PcrPid": "Specify the packet identifier (PID) for the program clock reference (PCR) in this output. If you do not specify a value, the service will use the value for Video PID.", "M2tsSettings$PmtPid": "Specify the packet identifier (PID) for the program map table (PMT) itself. Default is 480.", "M2tsSettings$PrivateMetadataPid": "Specify the packet identifier (PID) of the private metadata stream. Default is 503.", "M2tsSettings$Scte35Pid": "Specify the packet identifier (PID) of the SCTE-35 stream in the transport stream.", @@ -4701,7 +4701,7 @@ "__integerMin6000Max1024000": { "base": null, "refs": { - "AacSettings$Bitrate": "Specify the average bitrate in bits per second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values that you choose for Profile (codecProfile), Bitrate control mode (codingMode), and Sample rate (sampleRate). Default values depend on Bitrate control mode and Profile." + "AacSettings$Bitrate": "Specify the average bitrate in bits per second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values that you choose for Profile, Bitrate control mode, and Sample rate. Default values depend on Bitrate control mode and Profile." } }, "__integerMin64000Max640000": { @@ -4752,8 +4752,8 @@ "__integerMin96Max600": { "base": null, "refs": { - "BurninDestinationSettings$FontResolution": "Specify the Font resolution (FontResolution) in DPI (dots per inch).", - "DvbSubDestinationSettings$FontResolution": "Specify the Font resolution (FontResolution) in DPI (dots per inch).\nWithin your job settings, all of your DVB-Sub settings must be identical." + "BurninDestinationSettings$FontResolution": "Specify the Font resolution in DPI (dots per inch).", + "DvbSubDestinationSettings$FontResolution": "Specify the Font resolution in DPI (dots per inch).\nWithin your job settings, all of your DVB-Sub settings must be identical." } }, "__integerMinNegative1000Max1000": { @@ -4786,12 +4786,12 @@ "refs": { "AudioSelector$Offset": "Specifies a time delta in milliseconds to offset the audio from the input video.", "BurninDestinationSettings$ShadowXOffset": "Specify the horizontal offset of the shadow, relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left.", - "BurninDestinationSettings$ShadowYOffset": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset (ShadowYOffset) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow y-offset data from your input captions, if present.", + "BurninDestinationSettings$ShadowYOffset": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset blank and set Style passthrough to enabled to use the shadow y-offset data from your input captions, if present.", "DvbSubDestinationSettings$ShadowXOffset": "Specify the horizontal offset of the shadow, relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. Within your job settings, all of your DVB-Sub settings must be identical.", - "DvbSubDestinationSettings$ShadowYOffset": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset (ShadowYOffset) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow y-offset data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", - "FileSourceSettings$TimeDelta": "Optional. Use this setting when you need to adjust the sync between your sidecar captions and your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/time-delta-use-cases.html. Enter a positive or negative number to modify the times in the captions file. For example, type 15 to add 15 seconds to all the times in the captions file. Type -5 to subtract 5 seconds from the times in the captions file. You can optionally specify your time delta in milliseconds instead of seconds. When you do so, set the related setting, Time delta units (TimeDeltaUnits) to Milliseconds (MILLISECONDS). Note that, when you specify a time delta for timecode-based caption sources, such as SCC and STL, and your time delta isn't a multiple of the input frame rate, MediaConvert snaps the captions to the nearest frame. For example, when your input video frame rate is 25 fps and you specify 1010ms for time delta, MediaConvert delays your captions by 1000 ms.", + "DvbSubDestinationSettings$ShadowYOffset": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset blank and set Style passthrough to enabled to use the shadow y-offset data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "FileSourceSettings$TimeDelta": "Optional. Use this setting when you need to adjust the sync between your sidecar captions and your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/time-delta-use-cases.html. Enter a positive or negative number to modify the times in the captions file. For example, type 15 to add 15 seconds to all the times in the captions file. Type -5 to subtract 5 seconds from the times in the captions file. You can optionally specify your time delta in milliseconds instead of seconds. When you do so, set the related setting, Time delta units to Milliseconds. Note that, when you specify a time delta for timecode-based caption sources, such as SCC and STL, and your time delta isn't a multiple of the input frame rate, MediaConvert snaps the captions to the nearest frame. For example, when your input video frame rate is 25 fps and you specify 1010ms for time delta, MediaConvert delays your captions by 1000 ms.", "HlsCaptionLanguageMapping$CaptionChannel": "Caption channel.", - "HlsGroupSettings$TimedMetadataId3Period": "Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type (timedMetadataId3Frame) to PRIV (PRIV) or TDRL (TDRL), and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH).", + "HlsGroupSettings$TimedMetadataId3Period": "Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type to PRIV or TDRL, and set ID3 metadata to Passthrough.", "HlsGroupSettings$TimestampDeltaMilliseconds": "Provides an extra millisecond delta offset to fine tune the timestamps.", "VideoSelector$ProgramNumber": "Selects a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported." } @@ -4840,8 +4840,8 @@ "__listOfAudioDescription": { "base": null, "refs": { - "Output$AudioDescriptions": "(AudioDescriptions) contains groups of audio encoding settings organized by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) can contain multiple groups of encoding settings.", - "PresetSettings$AudioDescriptions": "(AudioDescriptions) contains groups of audio encoding settings organized by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) can contain multiple groups of encoding settings." + "Output$AudioDescriptions": "Contains groups of audio encoding settings organized by audio codec. Include one instance of per output. Can contain multiple groups of encoding settings.", + "PresetSettings$AudioDescriptions": "Contains groups of audio encoding settings organized by audio codec. Include one instance of per output. Can contain multiple groups of encoding settings." } }, "__listOfAutomatedAbrRule": { @@ -4853,7 +4853,7 @@ "__listOfCaptionDescription": { "base": null, "refs": { - "Output$CaptionDescriptions": "(CaptionDescriptions) contains groups of captions settings. For each output that has captions, include one instance of (CaptionDescriptions). (CaptionDescriptions) can contain multiple groups of captions settings." + "Output$CaptionDescriptions": "Contains groups of captions settings. For each output that has captions, include one instance of CaptionDescriptions. Can contain multiple groups of captions settings." } }, "__listOfCaptionDescriptionPreset": { @@ -4923,20 +4923,20 @@ "__listOfInput": { "base": null, "refs": { - "JobSettings$Inputs": "Use Inputs (inputs) to define source file used in the transcode job. There can be multiple inputs add in a job. These inputs will be concantenated together to create the output." + "JobSettings$Inputs": "Use Inputs to define source file used in the transcode job. There can be multiple inputs add in a job. These inputs will be concantenated together to create the output." } }, "__listOfInputClipping": { "base": null, "refs": { - "Input$InputClippings": "(InputClippings) contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them.", - "InputTemplate$InputClippings": "(InputClippings) contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." + "Input$InputClippings": "Contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them.", + "InputTemplate$InputClippings": "Contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." } }, "__listOfInputTemplate": { "base": null, "refs": { - "JobTemplateSettings$Inputs": "Use Inputs (inputs) to define the source file used in the transcode job. There can only be one input in a job template. Using the API, you can include multiple inputs when referencing a job template." + "JobTemplateSettings$Inputs": "Use Inputs to define the source file used in the transcode job. There can only be one input in a job template. Using the API, you can include multiple inputs when referencing a job template." } }, "__listOfInsertableImage": { @@ -4984,8 +4984,8 @@ "__listOfOutputGroup": { "base": null, "refs": { - "JobSettings$OutputGroups": "(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings", - "JobTemplateSettings$OutputGroups": "(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" + "JobSettings$OutputGroups": "Contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in is a group of settings that apply to the whole group. This required object depends on the value you set for Type. Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings", + "JobTemplateSettings$OutputGroups": "Contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in is a group of settings that apply to the whole group. This required object depends on the value you set for Type. Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" } }, "__listOfOutputGroupDetail": { @@ -5015,7 +5015,7 @@ "__listOfTeletextPageType": { "base": null, "refs": { - "TeletextDestinationSettings$PageTypes": "Specify the page types for this Teletext page. If you don't specify a value here, the service sets the page type to the default value Subtitle (PAGE_TYPE_SUBTITLE). If you pass through the entire set of Teletext data, don't use this field. When you pass through a set of Teletext pages, your output has the same page types as your input." + "TeletextDestinationSettings$PageTypes": "Specify the page types for this Teletext page. If you don't specify a value here, the service sets the page type to the default value Subtitle. If you pass through the entire set of Teletext data, don't use this field. When you pass through a set of Teletext pages, your output has the same page types as your input." } }, "__listOfWarningGroup": { @@ -5034,7 +5034,7 @@ "base": null, "refs": { "AudioSelector$Pids": "Selects a specific PID from within an audio source (e.g. 257 selects PID 0x101).", - "AudioSelector$Tracks": "Identify a track from the input audio to include in this selector by entering the track index number. To include several tracks in a single audio selector, specify multiple tracks as follows. Using the console, enter a comma-separated list. For examle, type \"1,2,3\" to include tracks 1 through 3. Specifying directly in your JSON job file, provide the track numbers in an array. For example, \"tracks\": [1,2,3]." + "AudioSelector$Tracks": "Identify a track from the input audio to include in this selector by entering the track index number. To include several tracks in a single audio selector, specify multiple tracks as follows. Using the console, enter a comma-separated list. For example, type \"1,2,3\" to include tracks 1 through 3." } }, "__listOf__integerMin32Max8182": { @@ -5091,15 +5091,15 @@ "__mapOfAudioSelector": { "base": null, "refs": { - "Input$AudioSelectors": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input.", - "InputTemplate$AudioSelectors": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." + "Input$AudioSelectors": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input.", + "InputTemplate$AudioSelectors": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." } }, "__mapOfAudioSelectorGroup": { "base": null, "refs": { - "Input$AudioSelectorGroups": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group.", - "InputTemplate$AudioSelectorGroups": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." + "Input$AudioSelectorGroups": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group.", + "InputTemplate$AudioSelectorGroups": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." } }, "__mapOfCaptionSelector": { @@ -5131,8 +5131,8 @@ "CaptionDescription$LanguageDescription": "Specify a label for this set of output captions. For example, \"English\", \"Director commentary\", or \"track_2\". For streaming outputs, MediaConvert passes this information into destination manifests for display on the end-viewer's player device. For outputs in other output groups, the service ignores this setting.", "CaptionDescriptionPreset$LanguageDescription": "Specify a label for this set of output captions. For example, \"English\", \"Director commentary\", or \"track_2\". For streaming outputs, MediaConvert passes this information into destination manifests for display on the end-viewer's player device. For outputs in other output groups, the service ignores this setting.", "CmafGroupSettings$BaseUrl": "A partial URI prefix that will be put in the manifest file at the top level BaseURL element. Can be used if streams are delivered from a different URL than the manifest file.", - "CmfcSettings$AudioGroupId": "Specify the audio rendition group for this audio rendition. Specify up to one value for each audio output in your output group. This value appears in your HLS parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the value for the GROUP-ID attribute. For example, if you specify \"audio_aac_1\" for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID=\"audio_aac_1\". Related setting: To associate the rendition group that this audio track belongs to with a video rendition, include the same value that you provide here for that video output's setting Audio rendition sets (audioRenditionSets).", - "CmfcSettings$AudioRenditionSets": "List the audio rendition groups that you want included with this video rendition. Use a comma-separated list. For example, say you want to include the audio rendition groups that have the audio group IDs \"audio_aac_1\" and \"audio_dolby\". Then you would specify this value: \"audio_aac_1,audio_dolby\". Related setting: The rendition groups that you include in your comma-separated list should all match values that you specify in the setting Audio group ID (AudioGroupId) for audio renditions in the same output group as this video rendition. Default behavior: If you don't specify anything here and for Audio group ID, MediaConvert puts each audio variant in its own audio rendition group and associates it with every video variant. Each value in your list appears in your HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. To continue the previous example, say that the file name for the child manifest for your video rendition is \"amazing_video_1.m3u8\". Then, in your parent manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO=\"audio_aac_1\"... amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO=\"audio_dolby\"... amazing_video_1.m3u8", + "CmfcSettings$AudioGroupId": "Specify the audio rendition group for this audio rendition. Specify up to one value for each audio output in your output group. This value appears in your HLS parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the value for the GROUP-ID attribute. For example, if you specify \"audio_aac_1\" for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID=\"audio_aac_1\". Related setting: To associate the rendition group that this audio track belongs to with a video rendition, include the same value that you provide here for that video output's setting Audio rendition sets.", + "CmfcSettings$AudioRenditionSets": "List the audio rendition groups that you want included with this video rendition. Use a comma-separated list. For example, say you want to include the audio rendition groups that have the audio group IDs \"audio_aac_1\" and \"audio_dolby\". Then you would specify this value: \"audio_aac_1,audio_dolby\". Related setting: The rendition groups that you include in your comma-separated list should all match values that you specify in the setting Audio group ID for audio renditions in the same output group as this video rendition. Default behavior: If you don't specify anything here and for Audio group ID, MediaConvert puts each audio variant in its own audio rendition group and associates it with every video variant. Each value in your list appears in your HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. To continue the previous example, say that the file name for the child manifest for your video rendition is \"amazing_video_1.m3u8\". Then, in your parent manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO=\"audio_aac_1\"... amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO=\"audio_dolby\"... amazing_video_1.m3u8", "CreateJobRequest$ClientRequestToken": "Prevent duplicate jobs from being created and ensure idempotency for your requests. A client request token can be any string that includes up to 64 ASCII characters. If you reuse a client request token within one minute of a successful request, the API returns the job details of the original request instead. For more information see https://docs.aws.amazon.com/mediaconvert/latest/apireference/idempotency.html.", "CreateJobRequest$JobTemplate": "Optional. When you create a job, you can either specify a job template or specify the transcoding settings individually.", "CreateJobRequest$Queue": "Optional. When you create a job, you can specify a queue to send it to. If you don't specify, the job will go to the default queue. For more about queues, see the User Guide topic at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html.", @@ -5192,9 +5192,9 @@ "ListQueuesResponse$NextToken": "Use this string to request the next batch of queues.", "ListTagsForResourceRequest$Arn": "The Amazon Resource Name (ARN) of the resource that you want to list tags for. To get the ARN, send a GET request with the resource name.", "Mp4Settings$Mp4MajorBrand": "Overrides the \"Major Brand\" field in the output file. Usually not necessary to specify.", - "NielsenConfiguration$DistributorId": "Use Distributor ID (DistributorID) to specify the distributor ID that is assigned to your organization by Neilsen.", - "Output$Extension": "Use Extension (Extension) to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)", - "OutputGroup$CustomName": "Use Custom Group Name (CustomName) to specify a name for the output group. This value is displayed on the console and can make your job settings JSON more human-readable. It does not affect your outputs. Use up to twelve characters that are either letters, numbers, spaces, or underscores.", + "NielsenConfiguration$DistributorId": "Use Distributor ID to specify the distributor ID that is assigned to your organization by Nielsen.", + "Output$Extension": "Use Extension to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)", + "OutputGroup$CustomName": "Use Custom Group Name to specify a name for the output group. This value is displayed on the console and can make your job settings JSON more human-readable. It does not affect your outputs. Use up to twelve characters that are either letters, numbers, spaces, or underscores.", "OutputGroup$Name": "Name of the output group", "Preset$Arn": "An identifier for this resource that is unique within all of AWS.", "Preset$Category": "An optional category you create to organize your presets.", @@ -5228,16 +5228,16 @@ "__stringMax1000": { "base": null, "refs": { - "CmfcSettings$TimedMetadataSchemeIdUri": "Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata (timedMetadata) to Passthrough.", - "CmfcSettings$TimedMetadataValue": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata (timedMetadata) to Passthrough.", - "MpdSettings$TimedMetadataSchemeIdUri": "Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata (timedMetadata) to Passthrough.", - "MpdSettings$TimedMetadataValue": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata (timedMetadata) to Passthrough." + "CmfcSettings$TimedMetadataSchemeIdUri": "Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata to Passthrough.", + "CmfcSettings$TimedMetadataValue": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata to Passthrough.", + "MpdSettings$TimedMetadataSchemeIdUri": "Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata to Passthrough.", + "MpdSettings$TimedMetadataValue": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata to Passthrough." } }, "__stringMin0": { "base": null, "refs": { - "Output$Preset": "Use Preset (Preset) to specify a preset for your transcoding settings. Provide the system or custom preset name. You can specify either Preset (Preset) or Container settings (ContainerSettings), but not both." + "Output$Preset": "Use Preset to specify a preset for your transcoding settings. Provide the system or custom preset name. You can specify either Preset or Container settings, but not both." } }, "__stringMin1": { @@ -5248,16 +5248,16 @@ "DashAdditionalManifest$ManifestNameModifier": "Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your DASH group is film-name.mpd. If you enter \"-no-premium\" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.mpd.", "HlsAdditionalManifest$ManifestNameModifier": "Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your HLS group is film-name.m3u8. If you enter \"-no-premium\" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. For HLS output groups, specify a manifestNameModifier that is different from the nameModifier of the output. The service uses the output name modifier to create unique names for the individual variant manifests.", "MsSmoothAdditionalManifest$ManifestNameModifier": "Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your Microsoft Smooth group is film-name.ismv. If you enter \"-no-premium\" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.ismv.", - "Output$NameModifier": "Use Name modifier (NameModifier) to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier (NameModifier) is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group.", + "Output$NameModifier": "Use Name modifier to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group.", "__listOf__stringMin1$member": null } }, "__stringMin11Max11Pattern01D20305D205D": { "base": null, "refs": { - "Input$TimecodeStart": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", - "InputTemplate$TimecodeStart": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", - "MotionImageInserter$StartTime": "Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or HH:MM:SS;FF). Make sure that the timecode you provide here takes into account how you have set up your timecode configuration under both job settings and input settings. The simplest way to do that is to set both to start at 0. If you need to set up your job to follow timecodes embedded in your source that don't start at zero, make sure that you specify a start time that is after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html Find job-wide and input timecode configuration settings in your JSON job settings specification at settings>timecodeConfig>source and settings>inputs>timecodeSource." + "Input$TimecodeStart": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", + "InputTemplate$TimecodeStart": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", + "MotionImageInserter$StartTime": "Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or HH:MM:SS;FF). Make sure that the timecode you provide here takes into account how you have set up your timecode configuration under both job settings and input settings. The simplest way to do that is to set both to start at 0. If you need to set up your job to follow timecodes embedded in your source that don't start at zero, make sure that you specify a start time that is after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html" } }, "__stringMin14PatternS3BmpBMPPngPNGHttpsBmpBMPPngPNG": { @@ -5300,7 +5300,7 @@ "__stringMin1Max100000": { "base": null, "refs": { - "NexGuardFileMarkerSettings$License": "Use the base64 license string that Nagra provides you. Enter it directly in your JSON job specification or in the console. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job." + "NexGuardFileMarkerSettings$License": "Use the base64 license string that Nagra provides you. Enter it directly in your JSON job specification or in the console. Required when you include Nagra NexGuard File Marker watermarking in your job." } }, "__stringMin1Max20": { @@ -5317,7 +5317,7 @@ "DvbNitSettings$NetworkName": "The network name text placed in the network_name_descriptor inside the Network Information Table. Maximum length is 256 characters.", "DvbSdtSettings$ServiceName": "The service name placed in the service_descriptor in the Service Description Table. Maximum length is 256 characters.", "DvbSdtSettings$ServiceProviderName": "The service provider name placed in the service_descriptor in the Service Description Table. Maximum length is 256 characters.", - "NexGuardFileMarkerSettings$Preset": "Enter one of the watermarking preset strings that Nagra provides you. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job." + "NexGuardFileMarkerSettings$Preset": "Enter one of the watermarking preset strings that Nagra provides you. Required when you include Nagra NexGuard File Marker watermarking in your job." } }, "__stringMin1Max50": { @@ -5367,7 +5367,7 @@ "base": null, "refs": { "TeletextDestinationSettings$PageNumber": "Set pageNumber to the Teletext page number for the destination captions for this output. This value must be a three-digit hexadecimal string; strings ending in -FF are invalid. If you are passing through the entire set of Teletext data, do not use this field.", - "TeletextSourceSettings$PageNumber": "Use Page Number (PageNumber) to specify the three-digit hexadecimal page number that will be used for Teletext captions. Do not use this setting if you are passing through teletext from the input source to output." + "TeletextSourceSettings$PageNumber": "Use Page Number to specify the three-digit hexadecimal page number that will be used for Teletext captions. Do not use this setting if you are passing through teletext from the input source to output." } }, "__stringMin3Max3PatternAZaZ3": { @@ -5394,17 +5394,17 @@ "__stringPattern": { "base": null, "refs": { - "TimecodeBurnin$Prefix": "Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. For example, a prefix of \"EZ-\" will result in the timecode \"EZ-00:00:00:00\". Provide either the characters themselves or the ASCII code equivalents. The supported range of characters is 0x20 through 0x7e. This includes letters, numbers, and all special characters represented on a standard English keyboard." + "TimecodeBurnin$Prefix": "Use Prefix to place ASCII characters before any burned-in timecode. For example, a prefix of \"EZ-\" will result in the timecode \"EZ-00:00:00:00\". Provide either the characters themselves or the ASCII code equivalents. The supported range of characters is 0x20 through 0x7e. This includes letters, numbers, and all special characters represented on a standard English keyboard." } }, "__stringPattern010920405090509092": { "base": null, "refs": { - "Id3Insertion$Timecode": "Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format.", - "InputClipping$EndTimecode": "Set End timecode (EndTimecode) to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings (InputTimecodeSource). For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00.", - "InputClipping$StartTimecode": "Set Start timecode (StartTimecode) to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00.", - "TimecodeConfig$Anchor": "If you use an editing platform that relies on an anchor timecode, use Anchor Timecode (Anchor) to specify a timecode that will match the input video frame to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior for Anchor Timecode varies depending on your setting for Source (TimecodeSource). * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), the first input frame is the specified value in Start Timecode (Start). Anchor Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), the first frame is the timecode value on the first input frame of the input.", - "TimecodeConfig$Start": "Only use when you set Source (TimecodeSource) to Specified start (SPECIFIEDSTART). Use Start timecode (Start) to specify the timecode for the initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF)." + "Id3Insertion$Timecode": "Provide a Timecode in HH:MM:SS:FF or HH:MM:SS;FF format.", + "InputClipping$EndTimecode": "Set End timecode to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00.", + "InputClipping$StartTimecode": "Set Start timecode to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00.", + "TimecodeConfig$Anchor": "If you use an editing platform that relies on an anchor timecode, use Anchor Timecode to specify a timecode that will match the input video frame to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior for Anchor Timecode varies depending on your setting for Source. * If Source is set to Specified Start, the first input frame is the specified value in Start Timecode. Anchor Timecode and Start Timecode are used calculate output timecode. * If Source is set to Start at 0 the first frame is 00:00:00:00. * If Source is set to Embedded, the first frame is the timecode value on the first input frame of the input.", + "TimecodeConfig$Start": "Only use when you set Source to Specified start. Use Start timecode to specify the timecode for the initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF)." } }, "__stringPattern01D20305D205D": { @@ -5416,7 +5416,7 @@ "__stringPattern0940191020191209301": { "base": null, "refs": { - "TimecodeConfig$TimestampOffset": "Only applies to outputs that support program-date-time stamp. Use Timestamp offset (TimestampOffset) to overwrite the timecode date without affecting the time and frame number. Provide the new date as a string in the format \"yyyy-mm-dd\". To use Time stamp offset, you must also enable Insert program-date-time (InsertProgramDateTime) in the output settings. For example, if the date part of your timecodes is 2002-1-25 and you want to change it to one year later, set Timestamp offset (TimestampOffset) to 2003-1-25." + "TimecodeConfig$TimestampOffset": "Only applies to outputs that support program-date-time stamp. Use Timestamp offset to overwrite the timecode date without affecting the time and frame number. Provide the new date as a string in the format \"yyyy-mm-dd\". To use Timestamp offset, you must also enable Insert program-date-time in the output settings. For example, if the date part of your timecodes is 2002-1-25 and you want to change it to one year later, set Timestamp offset to 2003-1-25." } }, "__stringPattern09aFAF809aFAF409aFAF409aFAF409aFAF12": { @@ -5428,26 +5428,26 @@ "__stringPattern0xAFaF0908190908": { "base": null, "refs": { - "NielsenNonLinearWatermarkSettings$CbetSourceId": "Use the CSID that Nielsen provides to you. This CBET source ID should be unique to your Nielsen account but common to all of your output assets that have CBET watermarking. Required when you choose a value for the setting Watermark types (ActiveWatermarkProcess) that includes CBET." + "NielsenNonLinearWatermarkSettings$CbetSourceId": "Use the CSID that Nielsen provides to you. This CBET source ID should be unique to your Nielsen account but common to all of your output assets that have CBET watermarking. Required when you choose a value for the setting Watermark types that includes CBET." } }, "__stringPatternAZaZ0902": { "base": null, "refs": { - "Id3Insertion$Id3": "Use ID3 tag (Id3) to provide a fully formed ID3 tag in base64-encode format.", - "S3EncryptionSettings$KmsEncryptionContext": "Optionally, specify the encryption context that you want to use alongside your KMS key. AWS KMS uses this encryption context as additional authenticated data (AAD) to support authenticated encryption. This value must be a base64-encoded UTF-8 string holding JSON which represents a string-string map. To use this setting, you must also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). For more information about encryption context, see: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context." + "Id3Insertion$Id3": "Use ID3 tag to provide a fully formed ID3 tag in base64-encode format.", + "S3EncryptionSettings$KmsEncryptionContext": "Optionally, specify the encryption context that you want to use alongside your KMS key. AWS KMS uses this encryption context as additional authenticated data (AAD) to support authenticated encryption. This value must be a base64-encoded UTF-8 string holding JSON which represents a string-string map. To use this setting, you must also set Server-side encryption to AWS KMS. For more information about encryption context, see: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context." } }, "__stringPatternAZaZ0932": { "base": null, "refs": { - "StaticKeyProvider$StaticKeyValue": "Relates to DRM implementation. Use a 32-character hexidecimal string to specify Key Value (StaticKeyValue)." + "StaticKeyProvider$StaticKeyValue": "Relates to DRM implementation. Use a 32-character hexidecimal string to specify Key Value." } }, "__stringPatternAZaZ23AZaZ": { "base": null, "refs": { - "AudioDescription$CustomLanguageCode": "Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control (AudioLanguageCodeControl) to Use configured (USE_CONFIGURED). The service also uses your specified custom language code when you set Language code control (AudioLanguageCodeControl) to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.", + "AudioDescription$CustomLanguageCode": "Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control to Use configured. The service also uses your specified custom language code when you set Language code control to Follow input, but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.", "CaptionDescription$CustomLanguageCode": "Specify the language for this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information when automatically selecting the font script for rendering the captions text. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.", "CaptionDescriptionPreset$CustomLanguageCode": "Specify the language for this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information when automatically selecting the font script for rendering the captions text. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming." } @@ -5462,7 +5462,7 @@ "__stringPatternArnAwsUsGovCnKmsAZ26EastWestCentralNorthSouthEastWest1912D12KeyAFAF098AFAF094AFAF094AFAF094AFAF0912MrkAFAF0932": { "base": null, "refs": { - "S3EncryptionSettings$KmsKeyArn": "Optionally, specify the customer master key (CMK) that you want to use to encrypt the data key that AWS uses to encrypt your output content. Enter the Amazon Resource Name (ARN) of the CMK. To use this setting, you must also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). If you set Server-side encryption to AWS KMS but don't specify a CMK here, AWS uses the AWS managed CMK associated with Amazon S3." + "S3EncryptionSettings$KmsKeyArn": "Optionally, specify the customer master key (CMK) that you want to use to encrypt the data key that AWS uses to encrypt your output content. Enter the Amazon Resource Name (ARN) of the CMK. To use this setting, you must also set Server-side encryption to AWS KMS. If you set Server-side encryption to AWS KMS but don't specify a CMK here, AWS uses the AWS managed CMK associated with Amazon S3." } }, "__stringPatternDD": { @@ -5494,14 +5494,14 @@ "__stringPatternS3": { "base": null, "refs": { - "CmafGroupSettings$Destination": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", - "DashIsoGroupSettings$Destination": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", - "FileGroupSettings$Destination": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", - "HlsGroupSettings$Destination": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", + "CmafGroupSettings$Destination": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", + "DashIsoGroupSettings$Destination": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", + "FileGroupSettings$Destination": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", + "HlsGroupSettings$Destination": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", "KantarWatermarkSettings$LogDestination": "Optional. Specify the Amazon S3 bucket where you want MediaConvert to store your Kantar watermark XML logs. When you don't specify a bucket, MediaConvert doesn't save these logs. Note that your MediaConvert service role must provide access to this location. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html", - "MsSmoothGroupSettings$Destination": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", - "NielsenNonLinearWatermarkSettings$AdiFilename": "Optional. Use this setting when you want the service to include an ADI file in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon S3 and provide a URL to it here. The URL should be in the following format: S3://bucket/path/ADI-file. For more information about the metadata .zip file, see the setting Metadata destination (metadataDestination).", - "NielsenNonLinearWatermarkSettings$MetadataDestination": "Specify the Amazon S3 location where you want MediaConvert to save your Nielsen non-linear metadata .zip file. This Amazon S3 bucket must be in the same Region as the one where you do your MediaConvert transcoding. If you want to include an ADI file in this .zip file, use the setting ADI file (adiFilename) to specify it. MediaConvert delivers the Nielsen metadata .zip files only to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. You are responsible for delivering the metadata .zip files to Nielsen." + "MsSmoothGroupSettings$Destination": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.", + "NielsenNonLinearWatermarkSettings$AdiFilename": "Optional. Use this setting when you want the service to include an ADI file in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon S3 and provide a URL to it here. The URL should be in the following format: S3://bucket/path/ADI-file. For more information about the metadata .zip file, see the setting Metadata destination.", + "NielsenNonLinearWatermarkSettings$MetadataDestination": "Specify the Amazon S3 location where you want MediaConvert to save your Nielsen non-linear metadata .zip file. This Amazon S3 bucket must be in the same Region as the one where you do your MediaConvert transcoding. If you want to include an ADI file in this .zip file, use the setting ADI file to specify it. MediaConvert delivers the Nielsen metadata .zip files only to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. You are responsible for delivering the metadata .zip files to Nielsen." } }, "__stringPatternS3ASSETMAPXml": { @@ -5514,7 +5514,7 @@ "base": null, "refs": { "AudioSelector$ExternalAudioFileInput": "Specifies audio data from an external file source.", - "Input$FileInput": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs that contain assets referenced by the CPL." + "Input$FileInput": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL." } }, "__stringPatternSNManifestConfirmConditionNotificationNS": { @@ -5526,7 +5526,7 @@ "__stringPatternSNSignalProcessingNotificationNS": { "base": null, "refs": { - "EsamSignalProcessingNotification$SccXml": "Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both." + "EsamSignalProcessingNotification$SccXml": "Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM. Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both." } }, "__stringPatternW": { @@ -5560,4 +5560,4 @@ } } } -} \ No newline at end of file +} diff --git a/models/apis/omics/2022-11-28/docs-2.json b/models/apis/omics/2022-11-28/docs-2.json index 47277a27153..bb9dfa1fb29 100644 --- a/models/apis/omics/2022-11-28/docs-2.json +++ b/models/apis/omics/2022-11-28/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "

This is the Amazon Omics API Reference. For an introduction to the service, see What is Amazon Omics? in the Amazon Omics User Guide.

", + "service": "

This is the AWS HealthOmics API Reference. For an introduction to the service, see What is AWS HealthOmics? in the AWS HealthOmics User Guide.

", "operations": { "AbortMultipartReadSetUpload": "

Stops a multipart upload.

", "BatchDeleteReadSet": "

Deletes one or more read sets.

", diff --git a/models/apis/omics/2022-11-28/endpoint-tests-1.json b/models/apis/omics/2022-11-28/endpoint-tests-1.json index 8ff05e264f8..de9c6b65001 100644 --- a/models/apis/omics/2022-11-28/endpoint-tests-1.json +++ b/models/apis/omics/2022-11-28/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseFIPS": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseFIPS": true } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseFIPS": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseFIPS": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseFIPS": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseFIPS": true } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseFIPS": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseFIPS": false } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseFIPS": true } }, { @@ -123,9 +123,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseFIPS": true } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseFIPS": false } }, { @@ -147,9 +147,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseFIPS": false } }, { @@ -160,9 +160,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseFIPS": true } }, { @@ -173,9 +173,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseFIPS": true } }, { @@ -186,9 +186,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseFIPS": false } }, { @@ -199,9 +199,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseFIPS": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseFIPS": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseFIPS": true } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseFIPS": false } }, { @@ -247,9 +247,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseFIPS": false } }, { @@ -260,9 +260,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, "Region": "us-east-1", + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "UseDualStack": false, "Region": "us-east-1", + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "UseDualStack": true, "Region": "us-east-1", + "UseFIPS": false, "Endpoint": "https://example.com" } } diff --git a/models/apis/opensearchserverless/2021-11-01/api-2.json b/models/apis/opensearchserverless/2021-11-01/api-2.json index 42cb440fb74..7d2c9256f75 100644 --- a/models/apis/opensearchserverless/2021-11-01/api-2.json +++ b/models/apis/opensearchserverless/2021-11-01/api-2.json @@ -1074,7 +1074,8 @@ "CollectionType": { "enum": [ "SEARCH", - "TIMESERIES" + "TIMESERIES", + "VECTORSEARCH" ], "type": "string" }, @@ -2505,7 +2506,7 @@ "type": "string" }, "samlMetadata": { - "max": 20480, + "max": 51200, "min": 1, "pattern": "[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A1-\\u00FF]+", "type": "string" diff --git a/models/apis/opensearchserverless/2021-11-01/docs-2.json b/models/apis/opensearchserverless/2021-11-01/docs-2.json index 82005ac2c44..efb00e97528 100644 --- a/models/apis/opensearchserverless/2021-11-01/docs-2.json +++ b/models/apis/opensearchserverless/2021-11-01/docs-2.json @@ -588,7 +588,7 @@ } }, "OcuLimitExceededException": { - "base": "

OCU Limit Exceeded for service limits

", + "base": "

Thrown when the collection you're attempting to create results in a number of search or indexing OCUs that exceeds the account limit.

", "refs": { } }, diff --git a/models/apis/polly/2016-06-10/api-2.json b/models/apis/polly/2016-06-10/api-2.json index a16374dee0b..ddda560128b 100644 --- a/models/apis/polly/2016-06-10/api-2.json +++ b/models/apis/polly/2016-06-10/api-2.json @@ -382,7 +382,8 @@ "yue-CN", "ar-AE", "fi-FI", - "en-IE" + "en-IE", + "nl-BE" ] }, "LanguageCodeList":{ @@ -860,7 +861,8 @@ "Kazuha", "Tomoko", "Niamh", - "Sofie" + "Sofie", + "Lisa" ] }, "VoiceList":{ diff --git a/models/apis/route53/2013-04-01/docs-2.json b/models/apis/route53/2013-04-01/docs-2.json index fd3bc9480ff..8a048a2234e 100644 --- a/models/apis/route53/2013-04-01/docs-2.json +++ b/models/apis/route53/2013-04-01/docs-2.json @@ -5,7 +5,7 @@ "ActivateKeySigningKey": "

Activates a key-signing key (KSK) so that it can be used for signing by DNSSEC. This operation changes the KSK status to ACTIVE.

", "AssociateVPCWithHostedZone": "

Associates an Amazon VPC with a private hosted zone.

To perform the association, the VPC and the private hosted zone must already exist. You can't convert a public hosted zone into a private hosted zone.

If you want to associate a VPC that was created by using one Amazon Web Services account with a private hosted zone that was created by using a different account, the Amazon Web Services account that created the private hosted zone must first submit a CreateVPCAssociationAuthorization request. Then the account that created the VPC must submit an AssociateVPCWithHostedZone request.

When granting access, the hosted zone and the Amazon VPC must belong to the same partition. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.

The following are the supported partitions:

  • aws - Amazon Web Services Regions

  • aws-cn - China Regions

  • aws-us-gov - Amazon Web Services GovCloud (US) Region

For more information, see Access Management in the Amazon Web Services General Reference.

", "ChangeCidrCollection": "

Creates, changes, or deletes CIDR blocks within a collection. Contains authoritative IP information mapping blocks to one or multiple locations.

A change request can update multiple locations in a collection at a time, which is helpful if you want to move one or more CIDR blocks from one location to another in one transaction, without downtime.

Limits

The max number of CIDR blocks included in the request is 1000. As a result, big updates require multiple API calls.

PUT and DELETE_IF_EXISTS

Use ChangeCidrCollection to perform the following actions:

  • PUT: Create a CIDR block within the specified collection.

  • DELETE_IF_EXISTS: Delete an existing CIDR block from the collection.

", - "ChangeResourceRecordSets": "

Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.

Deleting Resource Record Sets

To delete a resource record set, you must specify all the same values that you specified when you created it.

Change Batches and Transactional Changes

The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.

For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.

If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.

Traffic Flow

To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.

Create, Delete, and Upsert

Use ChangeResourceRecordsSetsRequest to perform the following actions:

  • CREATE: Creates a resource record set that has the specified values.

  • DELETE: Deletes an existing resource record set that has the specified values.

  • UPSERT: If a resource set exists Route 53 updates it with the values in the request.

Syntaxes for Creating, Updating, and Deleting Resource Record Sets

The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.

For an example for each type of resource record set, see \"Examples.\"

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.

Change Propagation to Route 53 DNS Servers

When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.

Limits on ChangeResourceRecordSets Requests

For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.

", + "ChangeResourceRecordSets": "

Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.

Deleting Resource Record Sets

To delete a resource record set, you must specify all the same values that you specified when you created it.

Change Batches and Transactional Changes

The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.

For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.

If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.

Traffic Flow

To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.

Create, Delete, and Upsert

Use ChangeResourceRecordsSetsRequest to perform the following actions:

  • CREATE: Creates a resource record set that has the specified values.

  • DELETE: Deletes an existing resource record set that has the specified values.

  • UPSERT: If a resource set exists Route 53 updates it with the values in the request.

Syntaxes for Creating, Updating, and Deleting Resource Record Sets

The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.

For an example for each type of resource record set, see \"Examples.\"

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.

Change Propagation to Route 53 DNS Servers

When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers managing the hosted zone. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers managing the hosted zone within 60 seconds. For more information, see GetChange.

Limits on ChangeResourceRecordSets Requests

For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.

", "ChangeTagsForResource": "

Adds, edits, or deletes tags for a health check or a hosted zone.

For information about using tags for cost allocation, see Using Cost Allocation Tags in the Billing and Cost Management User Guide.

", "CreateCidrCollection": "

Creates a CIDR collection in the current Amazon Web Services account.

", "CreateHealthCheck": "

Creates a new health check.

For information about adding health checks to resource record sets, see HealthCheckId in ChangeResourceRecordSets.

ELB Load Balancers

If you're registering EC2 instances with an Elastic Load Balancing (ELB) load balancer, do not create Amazon Route 53 health checks for the EC2 instances. When you register an EC2 instance with a load balancer, you configure settings for an ELB health check, which performs a similar function to a Route 53 health check.

Private Hosted Zones

You can associate health checks with failover resource record sets in a private hosted zone. Note the following:

  • Route 53 health checkers are outside the VPC. To check the health of an endpoint within a VPC by IP address, you must assign a public IP address to the instance in the VPC.

  • You can configure a health checker to check the health of an external resource that the instance relies on, such as a database server.

  • You can create a CloudWatch metric, associate an alarm with the metric, and then create a health check that is based on the state of the alarm. For example, you might create a CloudWatch metric that checks the status of the Amazon EC2 StatusCheckFailed metric, add an alarm to the metric, and then create a health check that is based on the state of the alarm. For information about creating CloudWatch metrics and alarms by using the CloudWatch console, see the Amazon CloudWatch User Guide.

", @@ -14,7 +14,7 @@ "CreateQueryLoggingConfig": "

Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.

DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:

  • Route 53 edge location that responded to the DNS query

  • Domain or subdomain that was requested

  • DNS record type, such as A or AAAA

  • DNS response code, such as NoError or ServFail

Log Group and Resource Policy

Before you create a query logging configuration, perform the following operations.

If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically.

  1. Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:

    • You must create the log group in the us-east-1 region.

    • You must use the same Amazon Web Services account to create the log group and the hosted zone that you want to configure query logging for.

    • When you create log groups for query logging, we recommend that you use a consistent prefix, for example:

      /aws/route53/hosted zone name

      In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated Amazon Web Services resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.

  2. Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of Resource, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *, for example:

    arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*

    To avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:

    • For aws:SourceArn, supply the hosted zone ARN used in creating the query logging configuration. For example, aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID.

    • For aws:SourceAccount, supply the account ID for the account that creates the query logging configuration. For example, aws:SourceAccount:111111111111.

    For more information, see The confused deputy problem in the Amazon Web Services IAM User Guide.

    You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the Amazon Web Services SDKs, or the CLI.

Log Streams and Edge Locations

When Route 53 finishes creating the configuration for DNS query logging, it does the following:

  • Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.

  • Begins to send query logs to the applicable log stream.

The name of each log stream is in the following format:

hosted zone ID/edge location code

The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page.

Queries That Are Logged

Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide.

Log File Format

For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide.

Pricing

For information about charges for query logs, see Amazon CloudWatch Pricing.

How to Stop Logging

If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig.

", "CreateReusableDelegationSet": "

Creates a delegation set (a group of four name servers) that can be reused by multiple hosted zones that were created by the same Amazon Web Services account.

You can also create a reusable delegation set that uses the four name servers that are associated with an existing hosted zone. Specify the hosted zone ID in the CreateReusableDelegationSet request.

You can't associate a reusable delegation set with a private hosted zone.

For information about using a reusable delegation set to configure white label name servers, see Configuring White Label Name Servers.

The process for migrating existing hosted zones to use a reusable delegation set is comparable to the process for configuring white label name servers. You need to perform the following steps:

  1. Create a reusable delegation set.

  2. Recreate hosted zones, and reduce the TTL to 60 seconds or less.

  3. Recreate resource record sets in the new hosted zones.

  4. Change the registrar's name servers to use the name servers for the new hosted zones.

  5. Monitor traffic for the website or application.

  6. Change TTLs back to their original values.

If you want to migrate existing hosted zones to use a reusable delegation set, the existing hosted zones can't use any of the name servers that are assigned to the reusable delegation set. If one or more hosted zones do use one or more name servers that are assigned to the reusable delegation set, you can do one of the following:

  • For small numbers of hosted zones—up to a few hundred—it's relatively easy to create reusable delegation sets until you get one that has four name servers that don't overlap with any of the name servers in your hosted zones.

  • For larger numbers of hosted zones, the easiest solution is to use more than one reusable delegation set.

  • For larger numbers of hosted zones, you can also migrate hosted zones that have overlapping name servers to hosted zones that don't have overlapping name servers, then migrate the hosted zones again to use the reusable delegation set.

", "CreateTrafficPolicy": "

Creates a traffic policy, which you use to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com).

", - "CreateTrafficPolicyInstance": "

Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.

", + "CreateTrafficPolicyInstance": "

Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.

After you submit an CreateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance request completed successfully. For more information, see the State response element.

", "CreateTrafficPolicyVersion": "

Creates a new version of an existing traffic policy. When you create a new version of a traffic policy, you specify the ID of the traffic policy that you want to update and a JSON-formatted document that describes the new version. You use traffic policies to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com). You can create a maximum of 1000 versions of a traffic policy. If you reach the limit and need to create another version, you'll need to start a new traffic policy.

", "CreateVPCAssociationAuthorization": "

Authorizes the Amazon Web Services account that created a specified VPC to submit an AssociateVPCWithHostedZone request to associate the VPC with a specified hosted zone that was created by a different account. To submit a CreateVPCAssociationAuthorization request, you must use the account that created the hosted zone. After you authorize the association, use the account that created the VPC to submit an AssociateVPCWithHostedZone request.

If you want to associate multiple VPCs that you created by using one account with a hosted zone that you created by using a different account, you must submit one authorization request for each VPC.

", "DeactivateKeySigningKey": "

Deactivates a key-signing key (KSK) so that it will not be used for signing by DNSSEC. This operation changes the KSK status to INACTIVE.

", @@ -31,7 +31,7 @@ "DisassociateVPCFromHostedZone": "

Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon Route 53 private hosted zone. Note the following:

  • You can't disassociate the last Amazon VPC from a private hosted zone.

  • You can't convert a private hosted zone into a public hosted zone.

  • You can submit a DisassociateVPCFromHostedZone request using either the account that created the hosted zone or the account that created the Amazon VPC.

  • Some services, such as Cloud Map and Amazon Elastic File System (Amazon EFS) automatically create hosted zones and associate VPCs with the hosted zones. A service can create a hosted zone using your account or using its own account. You can disassociate a VPC from a hosted zone only if the service created the hosted zone using your account.

    When you run DisassociateVPCFromHostedZone, if the hosted zone has a value for OwningAccount, you can use DisassociateVPCFromHostedZone. If the hosted zone has a value for OwningService, you can't use DisassociateVPCFromHostedZone.

When revoking access, the hosted zone and the Amazon VPC must belong to the same partition. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.

The following are the supported partitions:

  • aws - Amazon Web Services Regions

  • aws-cn - China Regions

  • aws-us-gov - Amazon Web Services GovCloud (US) Region

For more information, see Access Management in the Amazon Web Services General Reference.

", "EnableHostedZoneDNSSEC": "

Enables DNSSEC signing in a specific hosted zone.

", "GetAccountLimit": "

Gets the specified limit for the current account, for example, the maximum number of health checks that you can create using the account.

For the default limit, see Limits in the Amazon Route 53 Developer Guide. To request a higher limit, open a case.

You can also view account limits in Amazon Web Services Trusted Advisor. Sign in to the Amazon Web Services Management Console and open the Trusted Advisor console at https://console.aws.amazon.com/trustedadvisor/. Then choose Service limits in the navigation pane.

", - "GetChange": "

Returns the current status of a change batch request. The status is one of the following values:

  • PENDING indicates that the changes in this request have not propagated to all Amazon Route 53 DNS servers. This is the initial status of all change batch requests.

  • INSYNC indicates that the changes have propagated to all Route 53 DNS servers.

", + "GetChange": "

Returns the current status of a change batch request. The status is one of the following values:

  • PENDING indicates that the changes in this request have not propagated to all Amazon Route 53 DNS servers managing the hosted zone. This is the initial status of all change batch requests.

  • INSYNC indicates that the changes have propagated to all Route 53 DNS servers managing the hosted zone.

", "GetCheckerIpRanges": "

Route 53 does not perform authorization for this API because it retrieves information that is already available to the public.

GetCheckerIpRanges still works, but we recommend that you download ip-ranges.json, which includes IP address ranges for all Amazon Web Services services. For more information, see IP Address Ranges of Amazon Route 53 Servers in the Amazon Route 53 Developer Guide.

", "GetDNSSEC": "

Returns information about DNSSEC for a specific hosted zone, including the key-signing keys (KSKs) in the hosted zone.

", "GetGeoLocation": "

Gets information about whether a specified geographic location is supported for Amazon Route 53 geolocation resource record sets.

Route 53 does not perform authorization for this API because it retrieves information that is already available to the public.

Use the following syntax to determine whether a continent is supported for geolocation:

GET /2013-04-01/geolocation?continentcode=two-letter abbreviation for a continent

Use the following syntax to determine whether a country is supported for geolocation:

GET /2013-04-01/geolocation?countrycode=two-character country code

Use the following syntax to determine whether a subdivision of a country is supported for geolocation:

GET /2013-04-01/geolocation?countrycode=two-character country code&subdivisioncode=subdivision code

", @@ -46,7 +46,7 @@ "GetReusableDelegationSet": "

Retrieves information about a specified reusable delegation set, including the four name servers that are assigned to the delegation set.

", "GetReusableDelegationSetLimit": "

Gets the maximum number of hosted zones that you can associate with the specified reusable delegation set.

For the default limit, see Limits in the Amazon Route 53 Developer Guide. To request a higher limit, open a case.

", "GetTrafficPolicy": "

Gets information about a specific traffic policy version.

For information about how of deleting a traffic policy affects the response from GetTrafficPolicy, see DeleteTrafficPolicy.

", - "GetTrafficPolicyInstance": "

Gets information about a specified traffic policy instance.

After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

In the Route 53 console, traffic policy instances are known as policy records.

", + "GetTrafficPolicyInstance": "

Gets information about a specified traffic policy instance.

Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element.

In the Route 53 console, traffic policy instances are known as policy records.

", "GetTrafficPolicyInstanceCount": "

Gets the number of traffic policy instances that are associated with the current Amazon Web Services account.

", "ListCidrBlocks": "

Returns a paginated list of location objects and their CIDR blocks.

", "ListCidrCollections": "

Returns a paginated list of CIDR collections in the Amazon Web Services account (metadata only).

", @@ -67,11 +67,11 @@ "ListTrafficPolicyInstancesByPolicy": "

Gets information about the traffic policy instances that you created by using a specify traffic policy version.

After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.

Route 53 returns a maximum of 100 items in each response. If you have a lot of traffic policy instances, you can use the MaxItems parameter to list them in groups of up to 100.

", "ListTrafficPolicyVersions": "

Gets information about all of the versions for a specified traffic policy.

Traffic policy versions are listed in numerical order by VersionNumber.

", "ListVPCAssociationAuthorizations": "

Gets a list of the VPCs that were created by other accounts and that can be associated with a specified hosted zone because you've submitted one or more CreateVPCAssociationAuthorization requests.

The response includes a VPCs element with a VPC child element for each VPC that can be associated with the hosted zone.

", - "TestDNSAnswer": "

Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask.

This call only supports querying public hosted zones.

", + "TestDNSAnswer": "

Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask.

This call only supports querying public hosted zones.

The TestDnsAnswer returns information similar to what you would expect from the answer section of the dig command. Therefore, if you query for the name servers of a subdomain that point to the parent name servers, those will not be returned.

", "UpdateHealthCheck": "

Updates an existing health check. Note that some values can't be updated.

For more information about updating health checks, see Creating, Updating, and Deleting Health Checks in the Amazon Route 53 Developer Guide.

", "UpdateHostedZoneComment": "

Updates the comment for a specified hosted zone.

", "UpdateTrafficPolicyComment": "

Updates the comment for a specified traffic policy version.

", - "UpdateTrafficPolicyInstance": "

Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.

When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Route 53 performs the following operations:

  1. Route 53 creates a new group of resource record sets based on the specified traffic policy. This is true regardless of how significant the differences are between the existing resource record sets and the new resource record sets.

  2. When all of the new resource record sets have been created, Route 53 starts to respond to DNS queries for the root resource record set name (such as example.com) by using the new resource record sets.

  3. Route 53 deletes the old group of resource record sets that are associated with the root resource record set name.

" + "UpdateTrafficPolicyInstance": "

After you submit a UpdateTrafficPolicyInstance request, there's a brief delay while Route 53 creates the resource record sets that are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of updated traffic policy instance confirm that the UpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element.

Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.

When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Route 53 performs the following operations:

  1. Route 53 creates a new group of resource record sets based on the specified traffic policy. This is true regardless of how significant the differences are between the existing resource record sets and the new resource record sets.

  2. When all of the new resource record sets have been created, Route 53 starts to respond to DNS queries for the root resource record set name (such as example.com) by using the new resource record sets.

  3. Route 53 deletes the old group of resource record sets that are associated with the root resource record set name.

" }, "shapes": { "ARN": { @@ -2001,7 +2001,7 @@ "AssociateVPCWithHostedZoneRequest$HostedZoneId": "

The ID of the private hosted zone that you want to associate an Amazon VPC with.

Note that you can't associate a VPC with a hosted zone that doesn't have an existing VPC association.

", "ChangeInfo$Id": "

This element contains an ID that you use when performing a GetChange action to get detailed information about the change.

", "ChangeResourceRecordSetsRequest$HostedZoneId": "

The ID of the hosted zone that contains the resource record sets that you want to change.

", - "CreateHostedZoneRequest$DelegationSetId": "

If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet.

", + "CreateHostedZoneRequest$DelegationSetId": "

If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet.

If you are using a reusable delegation set to create a public hosted zone for a subdomain, make sure that the parent hosted zone doesn't use one or more of the same name servers. If you have overlapping nameservers, the operation will cause a ConflictingDomainsExist error.

", "CreateKeySigningKeyRequest$HostedZoneId": "

The unique string (ID) used to identify a hosted zone.

", "CreateQueryLoggingConfigRequest$HostedZoneId": "

The ID of the hosted zone that you want to log queries for. You can log queries only for public hosted zones.

", "CreateReusableDelegationSetRequest$HostedZoneId": "

If you want to mark the delegation set for an existing hosted zone as reusable, the ID for that hosted zone.

", diff --git a/models/apis/route53/2013-04-01/endpoint-rule-set-1.json b/models/apis/route53/2013-04-01/endpoint-rule-set-1.json index e956969175b..eef1699a130 100644 --- a/models/apis/route53/2013-04-01/endpoint-rule-set-1.json +++ b/models/apis/route53/2013-04-01/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,64 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws" + "ref": "Region" + } ] } ], @@ -128,22 +111,13 @@ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -151,1276 +125,581 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.api.aws", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + true + ] + }, { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53-fips.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws-cn" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://route53-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + false + ] + }, { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "cn-northwest-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://route53.us-gov.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "endpoint": { - "url": "https://route-53.{Region}.api.aws", - "properties": { - "authSchemes": [ + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } + "ref": "PartitionResult" + }, + "name" ] }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" + "aws-us-gov" + ] }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53.us-gov.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws-iso" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-iso-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-iso-b" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } - ] + ], + "endpoint": { + "url": "https://route53.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-isob-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.amazonaws.com.cn", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] } ] }, - "headers": {} - }, - "type": "endpoint" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://route-53.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] } ] - }, - "headers": {} - }, - "type": "endpoint" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-us-gov" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.api.aws", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] } ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + } + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "PartitionResult" - }, - "supportsDualStack" + "conditions": [], + "endpoint": { + "url": "https://route53.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://route-53.{Region}.api.aws", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-iso" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://route-53-fips.{Region}.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "route53" - } - ] - }, + "url": "https://route53.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-iso-b" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://route53-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://route53.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://route53.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://route53.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://route53.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-iso-global" - ] - } - ], - "endpoint": { - "url": "https://route53.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-iso-b-global" - ] - } - ], - "endpoint": { - "url": "https://route53.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/route53/2013-04-01/endpoint-tests-1.json b/models/apis/route53/2013-04-01/endpoint-tests-1.json index b5c63bf822e..fe838c14551 100644 --- a/models/apis/route53/2013-04-01/endpoint-tests-1.json +++ b/models/apis/route53/2013-04-01/endpoint-tests-1.json @@ -1,5 +1,119 @@ { "testCases": [ + { + "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://route53.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region aws-global with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://route53-fips.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://route53-fips.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://route53.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", "expect": { @@ -17,13 +131,52 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-cn-global", "UseFIPS": false, - "Region": "aws-cn-global" + "UseDualStack": false } }, { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { @@ -31,17 +184,131 @@ { "name": "sigv4", "signingName": "route53", - "signingRegion": "us-east-1" + "signingRegion": "cn-northwest-1" } ] }, - "url": "https://route53.amazonaws.com" + "url": "https://route53.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region aws-us-gov-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53.us-gov.amazonaws.com" + } + }, + "params": { + "Region": "aws-us-gov-global", "UseFIPS": false, - "Region": "aws-global" + "UseDualStack": false + } + }, + { + "documentation": "For region aws-us-gov-global with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53.us-gov.amazonaws.com" + } + }, + "params": { + "Region": "aws-us-gov-global", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53.us-gov.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53.us-gov.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,9 +328,66 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-iso-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "aws-iso-global" + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://route53.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -83,13 +407,48 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-iso-b-global", "UseFIPS": false, - "Region": "aws-iso-b-global" + "UseDualStack": false } }, { - "documentation": "For region aws-us-gov-global with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { @@ -97,30 +456,43 @@ { "name": "sigv4", "signingName": "route53", - "signingRegion": "us-gov-west-1" + "signingRegion": "us-isob-east-1" } ] }, - "url": "https://route53.us-gov.amazonaws.com" + "url": "https://route53.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -130,9 +502,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -142,11 +514,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 33033479758..186f785124e 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -14430,67 +14430,67 @@ "endpoints" : { "af-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.af-south-1.amazonaws.com", + "hostname" : "servicediscovery.af-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-east-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-east-1.amazonaws.com", + "hostname" : "servicediscovery.ap-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-1.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-2.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-3" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-3.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-3.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-south-1.amazonaws.com", + "hostname" : "servicediscovery.ap-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-south-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-south-2.amazonaws.com", + "hostname" : "servicediscovery.ap-south-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-1.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-2.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-3" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-3.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-3.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-4" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-4.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-4.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14499,7 +14499,10 @@ "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.ca-central-1.amazonaws.com", + "hostname" : "servicediscovery-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14512,67 +14515,67 @@ }, "eu-central-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-central-1.amazonaws.com", + "hostname" : "servicediscovery.eu-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-central-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-central-2.amazonaws.com", + "hostname" : "servicediscovery.eu-central-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-north-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-north-1.amazonaws.com", + "hostname" : "servicediscovery.eu-north-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-south-1.amazonaws.com", + "hostname" : "servicediscovery.eu-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-south-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-south-2.amazonaws.com", + "hostname" : "servicediscovery.eu-south-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-1.amazonaws.com", + "hostname" : "servicediscovery.eu-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-2.amazonaws.com", + "hostname" : "servicediscovery.eu-west-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-3" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-3.amazonaws.com", + "hostname" : "servicediscovery.eu-west-3.api.aws", "tags" : [ "dualstack" ] } ] }, "me-central-1" : { "variants" : [ { - "hostname" : "servicediscovery.me-central-1.amazonaws.com", + "hostname" : "servicediscovery.me-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "me-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.me-south-1.amazonaws.com", + "hostname" : "servicediscovery.me-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "sa-east-1" : { "variants" : [ { - "hostname" : "servicediscovery.sa-east-1.amazonaws.com", + "hostname" : "servicediscovery.sa-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14581,7 +14584,10 @@ "hostname" : "servicediscovery-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-east-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14597,7 +14603,10 @@ "hostname" : "servicediscovery-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-east-2.amazonaws.com", + "hostname" : "servicediscovery-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-east-2.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14613,7 +14622,10 @@ "hostname" : "servicediscovery-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-west-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14629,7 +14641,10 @@ "hostname" : "servicediscovery-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-west-2.amazonaws.com", + "hostname" : "servicediscovery-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-west-2.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -18996,13 +19011,13 @@ "endpoints" : { "cn-north-1" : { "variants" : [ { - "hostname" : "servicediscovery.cn-north-1.amazonaws.com.cn", + "hostname" : "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", "tags" : [ "dualstack" ] } ] }, "cn-northwest-1" : { "variants" : [ { - "hostname" : "servicediscovery.cn-northwest-1.amazonaws.com.cn", + "hostname" : "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", "tags" : [ "dualstack" ] } ] } @@ -22879,6 +22894,9 @@ }, "us-gov-east-1" : { "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] }, { @@ -22895,6 +22913,9 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] }, { diff --git a/service/cloudcontrolapi/api.go b/service/cloudcontrolapi/api.go index b732003fbb1..c9c40121fc1 100644 --- a/service/cloudcontrolapi/api.go +++ b/service/cloudcontrolapi/api.go @@ -1515,14 +1515,19 @@ type CreateResourceInput struct { // // Cloud Control API currently supports JSON as a structured data format. // - //

Specify the desired state as one of the following:

  • A - // JSON blob

  • A local path containing the desired state - // in JSON data format

For more information, see Composing - // the desired state of the resource in the Amazon Web Services Cloud - // Control API User Guide.

For more information about the properties - // of a specific resource, refer to the related topic for the resource in - // the Resource - // and property types reference in the CloudFormation Users Guide.

+ // Specify the desired state as one of the following: + // + // * A JSON blob + // + // * A local path containing the desired state in JSON data format + // + // For more information, see Composing the desired state of the resource (https://docs.aws.amazon.com/cloudcontrolapi/latest/userguide/resource-operations-create.html#resource-operations-create-desiredstate) + // in the Amazon Web Services Cloud Control API User Guide. + // + // For more information about the properties of a specific resource, refer to + // the related topic for the resource in the Resource and property types reference + // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the CloudFormation Users Guide. // // DesiredState is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateResourceInput's diff --git a/service/entityresolution/api.go b/service/entityresolution/api.go new file mode 100644 index 00000000000..8b6773f4dda --- /dev/null +++ b/service/entityresolution/api.go @@ -0,0 +1,5232 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package entityresolution + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateMatchingWorkflow = "CreateMatchingWorkflow" + +// CreateMatchingWorkflowRequest generates a "aws/request.Request" representing the +// client's request for the CreateMatchingWorkflow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMatchingWorkflow for more information on using the CreateMatchingWorkflow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateMatchingWorkflowRequest method. +// req, resp := client.CreateMatchingWorkflowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/CreateMatchingWorkflow +func (c *EntityResolution) CreateMatchingWorkflowRequest(input *CreateMatchingWorkflowInput) (req *request.Request, output *CreateMatchingWorkflowOutput) { + op := &request.Operation{ + Name: opCreateMatchingWorkflow, + HTTPMethod: "POST", + HTTPPath: "/matchingworkflows", + } + + if input == nil { + input = &CreateMatchingWorkflowInput{} + } + + output = &CreateMatchingWorkflowOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMatchingWorkflow API operation for AWS EntityResolution. +// +// Creates a MatchingWorkflow object which stores the configuration of the data +// processing job to be run. It is important to note that there should not be +// a pre-existing MatchingWorkflow with the same name. To modify an existing +// workflow, utilize the UpdateMatchingWorkflow API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation CreateMatchingWorkflow for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ExceedsLimitException +// The request was rejected because it attempted to create resources beyond +// the current AWS Entity Resolution account limits. The error message describes +// the limit exceeded. HTTP Status Code: 402 +// +// - ConflictException +// The request could not be processed because of conflict in the current state +// of the resource. Example: Workflow already exists, Schema already exists, +// Workflow is currently running, etc. HTTP Status Code: 400 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/CreateMatchingWorkflow +func (c *EntityResolution) CreateMatchingWorkflow(input *CreateMatchingWorkflowInput) (*CreateMatchingWorkflowOutput, error) { + req, out := c.CreateMatchingWorkflowRequest(input) + return out, req.Send() +} + +// CreateMatchingWorkflowWithContext is the same as CreateMatchingWorkflow with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMatchingWorkflow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) CreateMatchingWorkflowWithContext(ctx aws.Context, input *CreateMatchingWorkflowInput, opts ...request.Option) (*CreateMatchingWorkflowOutput, error) { + req, out := c.CreateMatchingWorkflowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSchemaMapping = "CreateSchemaMapping" + +// CreateSchemaMappingRequest generates a "aws/request.Request" representing the +// client's request for the CreateSchemaMapping operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSchemaMapping for more information on using the CreateSchemaMapping +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateSchemaMappingRequest method. +// req, resp := client.CreateSchemaMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/CreateSchemaMapping +func (c *EntityResolution) CreateSchemaMappingRequest(input *CreateSchemaMappingInput) (req *request.Request, output *CreateSchemaMappingOutput) { + op := &request.Operation{ + Name: opCreateSchemaMapping, + HTTPMethod: "POST", + HTTPPath: "/schemas", + } + + if input == nil { + input = &CreateSchemaMappingInput{} + } + + output = &CreateSchemaMappingOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSchemaMapping API operation for AWS EntityResolution. +// +// Creates a schema mapping, which defines the schema of the input customer +// records table. The SchemaMapping also provides Entity Resolution with some +// metadata about the table, such as the attribute types of the columns and +// which columns to match on. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation CreateSchemaMapping for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ExceedsLimitException +// The request was rejected because it attempted to create resources beyond +// the current AWS Entity Resolution account limits. The error message describes +// the limit exceeded. HTTP Status Code: 402 +// +// - ConflictException +// The request could not be processed because of conflict in the current state +// of the resource. Example: Workflow already exists, Schema already exists, +// Workflow is currently running, etc. HTTP Status Code: 400 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/CreateSchemaMapping +func (c *EntityResolution) CreateSchemaMapping(input *CreateSchemaMappingInput) (*CreateSchemaMappingOutput, error) { + req, out := c.CreateSchemaMappingRequest(input) + return out, req.Send() +} + +// CreateSchemaMappingWithContext is the same as CreateSchemaMapping with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSchemaMapping for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) CreateSchemaMappingWithContext(ctx aws.Context, input *CreateSchemaMappingInput, opts ...request.Option) (*CreateSchemaMappingOutput, error) { + req, out := c.CreateSchemaMappingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteMatchingWorkflow = "DeleteMatchingWorkflow" + +// DeleteMatchingWorkflowRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMatchingWorkflow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteMatchingWorkflow for more information on using the DeleteMatchingWorkflow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteMatchingWorkflowRequest method. +// req, resp := client.DeleteMatchingWorkflowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/DeleteMatchingWorkflow +func (c *EntityResolution) DeleteMatchingWorkflowRequest(input *DeleteMatchingWorkflowInput) (req *request.Request, output *DeleteMatchingWorkflowOutput) { + op := &request.Operation{ + Name: opDeleteMatchingWorkflow, + HTTPMethod: "DELETE", + HTTPPath: "/matchingworkflows/{workflowName}", + } + + if input == nil { + input = &DeleteMatchingWorkflowInput{} + } + + output = &DeleteMatchingWorkflowOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteMatchingWorkflow API operation for AWS EntityResolution. +// +// Deletes the MatchingWorkflow with a given name. This operation will succeed +// even if a workflow with the given name does not exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation DeleteMatchingWorkflow for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/DeleteMatchingWorkflow +func (c *EntityResolution) DeleteMatchingWorkflow(input *DeleteMatchingWorkflowInput) (*DeleteMatchingWorkflowOutput, error) { + req, out := c.DeleteMatchingWorkflowRequest(input) + return out, req.Send() +} + +// DeleteMatchingWorkflowWithContext is the same as DeleteMatchingWorkflow with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteMatchingWorkflow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) DeleteMatchingWorkflowWithContext(ctx aws.Context, input *DeleteMatchingWorkflowInput, opts ...request.Option) (*DeleteMatchingWorkflowOutput, error) { + req, out := c.DeleteMatchingWorkflowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSchemaMapping = "DeleteSchemaMapping" + +// DeleteSchemaMappingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSchemaMapping operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSchemaMapping for more information on using the DeleteSchemaMapping +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteSchemaMappingRequest method. +// req, resp := client.DeleteSchemaMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/DeleteSchemaMapping +func (c *EntityResolution) DeleteSchemaMappingRequest(input *DeleteSchemaMappingInput) (req *request.Request, output *DeleteSchemaMappingOutput) { + op := &request.Operation{ + Name: opDeleteSchemaMapping, + HTTPMethod: "DELETE", + HTTPPath: "/schemas/{schemaName}", + } + + if input == nil { + input = &DeleteSchemaMappingInput{} + } + + output = &DeleteSchemaMappingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteSchemaMapping API operation for AWS EntityResolution. +// +// Deletes the SchemaMapping with a given name. This operation will succeed +// even if a schema with the given name does not exist. This operation will +// fail if there is a DataIntegrationWorkflow object that references the SchemaMapping +// in the workflow's InputSourceConfig. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation DeleteSchemaMapping for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ConflictException +// The request could not be processed because of conflict in the current state +// of the resource. Example: Workflow already exists, Schema already exists, +// Workflow is currently running, etc. HTTP Status Code: 400 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/DeleteSchemaMapping +func (c *EntityResolution) DeleteSchemaMapping(input *DeleteSchemaMappingInput) (*DeleteSchemaMappingOutput, error) { + req, out := c.DeleteSchemaMappingRequest(input) + return out, req.Send() +} + +// DeleteSchemaMappingWithContext is the same as DeleteSchemaMapping with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSchemaMapping for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) DeleteSchemaMappingWithContext(ctx aws.Context, input *DeleteSchemaMappingInput, opts ...request.Option) (*DeleteSchemaMappingOutput, error) { + req, out := c.DeleteSchemaMappingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMatchId = "GetMatchId" + +// GetMatchIdRequest generates a "aws/request.Request" representing the +// client's request for the GetMatchId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMatchId for more information on using the GetMatchId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetMatchIdRequest method. +// req, resp := client.GetMatchIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/GetMatchId +func (c *EntityResolution) GetMatchIdRequest(input *GetMatchIdInput) (req *request.Request, output *GetMatchIdOutput) { + op := &request.Operation{ + Name: opGetMatchId, + HTTPMethod: "POST", + HTTPPath: "/matchingworkflows/{workflowName}/matches", + } + + if input == nil { + input = &GetMatchIdInput{} + } + + output = &GetMatchIdOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMatchId API operation for AWS EntityResolution. +// +// Returns the corresponding Match ID of a customer record if the record has +// been processed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation GetMatchId for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/GetMatchId +func (c *EntityResolution) GetMatchId(input *GetMatchIdInput) (*GetMatchIdOutput, error) { + req, out := c.GetMatchIdRequest(input) + return out, req.Send() +} + +// GetMatchIdWithContext is the same as GetMatchId with the addition of +// the ability to pass a context and additional request options. +// +// See GetMatchId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) GetMatchIdWithContext(ctx aws.Context, input *GetMatchIdInput, opts ...request.Option) (*GetMatchIdOutput, error) { + req, out := c.GetMatchIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMatchingJob = "GetMatchingJob" + +// GetMatchingJobRequest generates a "aws/request.Request" representing the +// client's request for the GetMatchingJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMatchingJob for more information on using the GetMatchingJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetMatchingJobRequest method. +// req, resp := client.GetMatchingJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/GetMatchingJob +func (c *EntityResolution) GetMatchingJobRequest(input *GetMatchingJobInput) (req *request.Request, output *GetMatchingJobOutput) { + op := &request.Operation{ + Name: opGetMatchingJob, + HTTPMethod: "GET", + HTTPPath: "/matchingworkflows/{workflowName}/jobs/{jobId}", + } + + if input == nil { + input = &GetMatchingJobInput{} + } + + output = &GetMatchingJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMatchingJob API operation for AWS EntityResolution. +// +// Gets the status, metrics, and errors (if there are any) that are associated +// with a job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation GetMatchingJob for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/GetMatchingJob +func (c *EntityResolution) GetMatchingJob(input *GetMatchingJobInput) (*GetMatchingJobOutput, error) { + req, out := c.GetMatchingJobRequest(input) + return out, req.Send() +} + +// GetMatchingJobWithContext is the same as GetMatchingJob with the addition of +// the ability to pass a context and additional request options. +// +// See GetMatchingJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) GetMatchingJobWithContext(ctx aws.Context, input *GetMatchingJobInput, opts ...request.Option) (*GetMatchingJobOutput, error) { + req, out := c.GetMatchingJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMatchingWorkflow = "GetMatchingWorkflow" + +// GetMatchingWorkflowRequest generates a "aws/request.Request" representing the +// client's request for the GetMatchingWorkflow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMatchingWorkflow for more information on using the GetMatchingWorkflow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetMatchingWorkflowRequest method. +// req, resp := client.GetMatchingWorkflowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/GetMatchingWorkflow +func (c *EntityResolution) GetMatchingWorkflowRequest(input *GetMatchingWorkflowInput) (req *request.Request, output *GetMatchingWorkflowOutput) { + op := &request.Operation{ + Name: opGetMatchingWorkflow, + HTTPMethod: "GET", + HTTPPath: "/matchingworkflows/{workflowName}", + } + + if input == nil { + input = &GetMatchingWorkflowInput{} + } + + output = &GetMatchingWorkflowOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMatchingWorkflow API operation for AWS EntityResolution. +// +// Returns the MatchingWorkflow with a given name, if it exists. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation GetMatchingWorkflow for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/GetMatchingWorkflow +func (c *EntityResolution) GetMatchingWorkflow(input *GetMatchingWorkflowInput) (*GetMatchingWorkflowOutput, error) { + req, out := c.GetMatchingWorkflowRequest(input) + return out, req.Send() +} + +// GetMatchingWorkflowWithContext is the same as GetMatchingWorkflow with the addition of +// the ability to pass a context and additional request options. +// +// See GetMatchingWorkflow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) GetMatchingWorkflowWithContext(ctx aws.Context, input *GetMatchingWorkflowInput, opts ...request.Option) (*GetMatchingWorkflowOutput, error) { + req, out := c.GetMatchingWorkflowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSchemaMapping = "GetSchemaMapping" + +// GetSchemaMappingRequest generates a "aws/request.Request" representing the +// client's request for the GetSchemaMapping operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSchemaMapping for more information on using the GetSchemaMapping +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetSchemaMappingRequest method. +// req, resp := client.GetSchemaMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/GetSchemaMapping +func (c *EntityResolution) GetSchemaMappingRequest(input *GetSchemaMappingInput) (req *request.Request, output *GetSchemaMappingOutput) { + op := &request.Operation{ + Name: opGetSchemaMapping, + HTTPMethod: "GET", + HTTPPath: "/schemas/{schemaName}", + } + + if input == nil { + input = &GetSchemaMappingInput{} + } + + output = &GetSchemaMappingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSchemaMapping API operation for AWS EntityResolution. +// +// Returns the SchemaMapping of a given name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation GetSchemaMapping for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/GetSchemaMapping +func (c *EntityResolution) GetSchemaMapping(input *GetSchemaMappingInput) (*GetSchemaMappingOutput, error) { + req, out := c.GetSchemaMappingRequest(input) + return out, req.Send() +} + +// GetSchemaMappingWithContext is the same as GetSchemaMapping with the addition of +// the ability to pass a context and additional request options. +// +// See GetSchemaMapping for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) GetSchemaMappingWithContext(ctx aws.Context, input *GetSchemaMappingInput, opts ...request.Option) (*GetSchemaMappingOutput, error) { + req, out := c.GetSchemaMappingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMatchingJobs = "ListMatchingJobs" + +// ListMatchingJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListMatchingJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMatchingJobs for more information on using the ListMatchingJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListMatchingJobsRequest method. +// req, resp := client.ListMatchingJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/ListMatchingJobs +func (c *EntityResolution) ListMatchingJobsRequest(input *ListMatchingJobsInput) (req *request.Request, output *ListMatchingJobsOutput) { + op := &request.Operation{ + Name: opListMatchingJobs, + HTTPMethod: "GET", + HTTPPath: "/matchingworkflows/{workflowName}/jobs", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMatchingJobsInput{} + } + + output = &ListMatchingJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMatchingJobs API operation for AWS EntityResolution. +// +// Lists all jobs for a given workflow. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation ListMatchingJobs for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/ListMatchingJobs +func (c *EntityResolution) ListMatchingJobs(input *ListMatchingJobsInput) (*ListMatchingJobsOutput, error) { + req, out := c.ListMatchingJobsRequest(input) + return out, req.Send() +} + +// ListMatchingJobsWithContext is the same as ListMatchingJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListMatchingJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) ListMatchingJobsWithContext(ctx aws.Context, input *ListMatchingJobsInput, opts ...request.Option) (*ListMatchingJobsOutput, error) { + req, out := c.ListMatchingJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMatchingJobsPages iterates over the pages of a ListMatchingJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMatchingJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMatchingJobs operation. +// pageNum := 0 +// err := client.ListMatchingJobsPages(params, +// func(page *entityresolution.ListMatchingJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EntityResolution) ListMatchingJobsPages(input *ListMatchingJobsInput, fn func(*ListMatchingJobsOutput, bool) bool) error { + return c.ListMatchingJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMatchingJobsPagesWithContext same as ListMatchingJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) ListMatchingJobsPagesWithContext(ctx aws.Context, input *ListMatchingJobsInput, fn func(*ListMatchingJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMatchingJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMatchingJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMatchingJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListMatchingWorkflows = "ListMatchingWorkflows" + +// ListMatchingWorkflowsRequest generates a "aws/request.Request" representing the +// client's request for the ListMatchingWorkflows operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMatchingWorkflows for more information on using the ListMatchingWorkflows +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListMatchingWorkflowsRequest method. +// req, resp := client.ListMatchingWorkflowsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/ListMatchingWorkflows +func (c *EntityResolution) ListMatchingWorkflowsRequest(input *ListMatchingWorkflowsInput) (req *request.Request, output *ListMatchingWorkflowsOutput) { + op := &request.Operation{ + Name: opListMatchingWorkflows, + HTTPMethod: "GET", + HTTPPath: "/matchingworkflows", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMatchingWorkflowsInput{} + } + + output = &ListMatchingWorkflowsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMatchingWorkflows API operation for AWS EntityResolution. +// +// Returns a list of all the MatchingWorkflows that have been created for an +// AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation ListMatchingWorkflows for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/ListMatchingWorkflows +func (c *EntityResolution) ListMatchingWorkflows(input *ListMatchingWorkflowsInput) (*ListMatchingWorkflowsOutput, error) { + req, out := c.ListMatchingWorkflowsRequest(input) + return out, req.Send() +} + +// ListMatchingWorkflowsWithContext is the same as ListMatchingWorkflows with the addition of +// the ability to pass a context and additional request options. +// +// See ListMatchingWorkflows for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) ListMatchingWorkflowsWithContext(ctx aws.Context, input *ListMatchingWorkflowsInput, opts ...request.Option) (*ListMatchingWorkflowsOutput, error) { + req, out := c.ListMatchingWorkflowsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMatchingWorkflowsPages iterates over the pages of a ListMatchingWorkflows operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMatchingWorkflows method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMatchingWorkflows operation. +// pageNum := 0 +// err := client.ListMatchingWorkflowsPages(params, +// func(page *entityresolution.ListMatchingWorkflowsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EntityResolution) ListMatchingWorkflowsPages(input *ListMatchingWorkflowsInput, fn func(*ListMatchingWorkflowsOutput, bool) bool) error { + return c.ListMatchingWorkflowsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMatchingWorkflowsPagesWithContext same as ListMatchingWorkflowsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) ListMatchingWorkflowsPagesWithContext(ctx aws.Context, input *ListMatchingWorkflowsInput, fn func(*ListMatchingWorkflowsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMatchingWorkflowsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMatchingWorkflowsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMatchingWorkflowsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListSchemaMappings = "ListSchemaMappings" + +// ListSchemaMappingsRequest generates a "aws/request.Request" representing the +// client's request for the ListSchemaMappings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSchemaMappings for more information on using the ListSchemaMappings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListSchemaMappingsRequest method. +// req, resp := client.ListSchemaMappingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/ListSchemaMappings +func (c *EntityResolution) ListSchemaMappingsRequest(input *ListSchemaMappingsInput) (req *request.Request, output *ListSchemaMappingsOutput) { + op := &request.Operation{ + Name: opListSchemaMappings, + HTTPMethod: "GET", + HTTPPath: "/schemas", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSchemaMappingsInput{} + } + + output = &ListSchemaMappingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSchemaMappings API operation for AWS EntityResolution. +// +// Returns a list of all the SchemaMappings that have been created for an AWS +// account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation ListSchemaMappings for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/ListSchemaMappings +func (c *EntityResolution) ListSchemaMappings(input *ListSchemaMappingsInput) (*ListSchemaMappingsOutput, error) { + req, out := c.ListSchemaMappingsRequest(input) + return out, req.Send() +} + +// ListSchemaMappingsWithContext is the same as ListSchemaMappings with the addition of +// the ability to pass a context and additional request options. +// +// See ListSchemaMappings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) ListSchemaMappingsWithContext(ctx aws.Context, input *ListSchemaMappingsInput, opts ...request.Option) (*ListSchemaMappingsOutput, error) { + req, out := c.ListSchemaMappingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSchemaMappingsPages iterates over the pages of a ListSchemaMappings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSchemaMappings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSchemaMappings operation. +// pageNum := 0 +// err := client.ListSchemaMappingsPages(params, +// func(page *entityresolution.ListSchemaMappingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EntityResolution) ListSchemaMappingsPages(input *ListSchemaMappingsInput, fn func(*ListSchemaMappingsOutput, bool) bool) error { + return c.ListSchemaMappingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSchemaMappingsPagesWithContext same as ListSchemaMappingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) ListSchemaMappingsPagesWithContext(ctx aws.Context, input *ListSchemaMappingsInput, fn func(*ListSchemaMappingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSchemaMappingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSchemaMappingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSchemaMappingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/ListTagsForResource +func (c *EntityResolution) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS EntityResolution. +// +// Displays the tags associated with an AWS Entity Resolution resource. In Entity +// Resolution, SchemaMapping, and MatchingWorkflow can be tagged. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/ListTagsForResource +func (c *EntityResolution) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartMatchingJob = "StartMatchingJob" + +// StartMatchingJobRequest generates a "aws/request.Request" representing the +// client's request for the StartMatchingJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartMatchingJob for more information on using the StartMatchingJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartMatchingJobRequest method. +// req, resp := client.StartMatchingJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/StartMatchingJob +func (c *EntityResolution) StartMatchingJobRequest(input *StartMatchingJobInput) (req *request.Request, output *StartMatchingJobOutput) { + op := &request.Operation{ + Name: opStartMatchingJob, + HTTPMethod: "POST", + HTTPPath: "/matchingworkflows/{workflowName}/jobs", + } + + if input == nil { + input = &StartMatchingJobInput{} + } + + output = &StartMatchingJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartMatchingJob API operation for AWS EntityResolution. +// +// Starts the MatchingJob of a workflow. The workflow must have previously been +// created using the CreateMatchingWorkflow endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation StartMatchingJob for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ExceedsLimitException +// The request was rejected because it attempted to create resources beyond +// the current AWS Entity Resolution account limits. The error message describes +// the limit exceeded. HTTP Status Code: 402 +// +// - ConflictException +// The request could not be processed because of conflict in the current state +// of the resource. Example: Workflow already exists, Schema already exists, +// Workflow is currently running, etc. HTTP Status Code: 400 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/StartMatchingJob +func (c *EntityResolution) StartMatchingJob(input *StartMatchingJobInput) (*StartMatchingJobOutput, error) { + req, out := c.StartMatchingJobRequest(input) + return out, req.Send() +} + +// StartMatchingJobWithContext is the same as StartMatchingJob with the addition of +// the ability to pass a context and additional request options. +// +// See StartMatchingJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) StartMatchingJobWithContext(ctx aws.Context, input *StartMatchingJobInput, opts ...request.Option) (*StartMatchingJobOutput, error) { + req, out := c.StartMatchingJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/TagResource +func (c *EntityResolution) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS EntityResolution. +// +// Assigns one or more tags (key-value pairs) to the specified AWS Entity Resolution +// resource. Tags can help you organize and categorize your resources. You can +// also use them to scope user permissions by granting a user permission to +// access or change only resources with certain tag values. In Entity Resolution, +// SchemaMapping, and MatchingWorkflow can be tagged. Tags don't have any semantic +// meaning to AWS and are interpreted strictly as strings of characters. You +// can use the TagResource action with a resource that already has tags. If +// you specify a new tag key, this tag is appended to the list of tags associated +// with the resource. If you specify a tag key that is already associated with +// the resource, the new tag value that you specify replaces the previous value +// for that tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/TagResource +func (c *EntityResolution) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/UntagResource +func (c *EntityResolution) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS EntityResolution. +// +// Removes one or more tags from the specified AWS Entity Resolution resource. +// In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/UntagResource +func (c *EntityResolution) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateMatchingWorkflow = "UpdateMatchingWorkflow" + +// UpdateMatchingWorkflowRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMatchingWorkflow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateMatchingWorkflow for more information on using the UpdateMatchingWorkflow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateMatchingWorkflowRequest method. +// req, resp := client.UpdateMatchingWorkflowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/UpdateMatchingWorkflow +func (c *EntityResolution) UpdateMatchingWorkflowRequest(input *UpdateMatchingWorkflowInput) (req *request.Request, output *UpdateMatchingWorkflowOutput) { + op := &request.Operation{ + Name: opUpdateMatchingWorkflow, + HTTPMethod: "PUT", + HTTPPath: "/matchingworkflows/{workflowName}", + } + + if input == nil { + input = &UpdateMatchingWorkflowInput{} + } + + output = &UpdateMatchingWorkflowOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateMatchingWorkflow API operation for AWS EntityResolution. +// +// Updates an existing MatchingWorkflow. This method is identical to CreateMatchingWorkflow, +// except it uses an HTTP PUT request instead of a POST request, and the MatchingWorkflow +// must already exist for the method to succeed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS EntityResolution's +// API operation UpdateMatchingWorkflow for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request was denied due to request throttling. HTTP Status Code: 429 +// +// - InternalServerException +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +// +// - ResourceNotFoundException +// The resource could not be found. HTTP Status Code: 404 +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +// +// - ValidationException +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10/UpdateMatchingWorkflow +func (c *EntityResolution) UpdateMatchingWorkflow(input *UpdateMatchingWorkflowInput) (*UpdateMatchingWorkflowOutput, error) { + req, out := c.UpdateMatchingWorkflowRequest(input) + return out, req.Send() +} + +// UpdateMatchingWorkflowWithContext is the same as UpdateMatchingWorkflow with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMatchingWorkflow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EntityResolution) UpdateMatchingWorkflowWithContext(ctx aws.Context, input *UpdateMatchingWorkflowInput, opts ...request.Option) (*UpdateMatchingWorkflowOutput, error) { + req, out := c.UpdateMatchingWorkflowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have sufficient access to perform this action. HTTP Status Code: +// 403 +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request could not be processed because of conflict in the current state +// of the resource. Example: Workflow already exists, Schema already exists, +// Workflow is currently running, etc. HTTP Status Code: 400 +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateMatchingWorkflowInput struct { + _ struct{} `type:"structure"` + + // A description of the workflow. + Description *string `locationName:"description" type:"string"` + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *IncrementalRunConfig `locationName:"incrementalRunConfig" type:"structure"` + + // A list of InputSource objects, which have the fields InputSourceARN and SchemaName. + // + // InputSourceConfig is a required field + InputSourceConfig []*InputSource `locationName:"inputSourceConfig" min:"1" type:"list" required:"true"` + + // A list of OutputSource objects, each of which contains fields OutputS3Path, + // ApplyNormalization, and Output. + // + // OutputSourceConfig is a required field + OutputSourceConfig []*OutputSource `locationName:"outputSourceConfig" min:"1" type:"list" required:"true"` + + // An object which defines the resolutionType and the ruleBasedProperties + // + // ResolutionTechniques is a required field + ResolutionTechniques *ResolutionTechniques `locationName:"resolutionTechniques" type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to create resources on your behalf as part of workflow execution. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The tags used to organize, track, or control access for this resource. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The name of the workflow. There cannot be multiple DataIntegrationWorkflows + // with the same name. + // + // WorkflowName is a required field + WorkflowName *string `locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMatchingWorkflowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMatchingWorkflowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMatchingWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMatchingWorkflowInput"} + if s.InputSourceConfig == nil { + invalidParams.Add(request.NewErrParamRequired("InputSourceConfig")) + } + if s.InputSourceConfig != nil && len(s.InputSourceConfig) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputSourceConfig", 1)) + } + if s.OutputSourceConfig == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSourceConfig")) + } + if s.OutputSourceConfig != nil && len(s.OutputSourceConfig) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OutputSourceConfig", 1)) + } + if s.ResolutionTechniques == nil { + invalidParams.Add(request.NewErrParamRequired("ResolutionTechniques")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.WorkflowName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowName")) + } + if s.InputSourceConfig != nil { + for i, v := range s.InputSourceConfig { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputSourceConfig", i), err.(request.ErrInvalidParams)) + } + } + } + if s.OutputSourceConfig != nil { + for i, v := range s.OutputSourceConfig { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSourceConfig", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ResolutionTechniques != nil { + if err := s.ResolutionTechniques.Validate(); err != nil { + invalidParams.AddNested("ResolutionTechniques", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateMatchingWorkflowInput) SetDescription(v string) *CreateMatchingWorkflowInput { + s.Description = &v + return s +} + +// SetIncrementalRunConfig sets the IncrementalRunConfig field's value. +func (s *CreateMatchingWorkflowInput) SetIncrementalRunConfig(v *IncrementalRunConfig) *CreateMatchingWorkflowInput { + s.IncrementalRunConfig = v + return s +} + +// SetInputSourceConfig sets the InputSourceConfig field's value. +func (s *CreateMatchingWorkflowInput) SetInputSourceConfig(v []*InputSource) *CreateMatchingWorkflowInput { + s.InputSourceConfig = v + return s +} + +// SetOutputSourceConfig sets the OutputSourceConfig field's value. +func (s *CreateMatchingWorkflowInput) SetOutputSourceConfig(v []*OutputSource) *CreateMatchingWorkflowInput { + s.OutputSourceConfig = v + return s +} + +// SetResolutionTechniques sets the ResolutionTechniques field's value. +func (s *CreateMatchingWorkflowInput) SetResolutionTechniques(v *ResolutionTechniques) *CreateMatchingWorkflowInput { + s.ResolutionTechniques = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateMatchingWorkflowInput) SetRoleArn(v string) *CreateMatchingWorkflowInput { + s.RoleArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateMatchingWorkflowInput) SetTags(v map[string]*string) *CreateMatchingWorkflowInput { + s.Tags = v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *CreateMatchingWorkflowInput) SetWorkflowName(v string) *CreateMatchingWorkflowInput { + s.WorkflowName = &v + return s +} + +type CreateMatchingWorkflowOutput struct { + _ struct{} `type:"structure"` + + // A description of the workflow. + Description *string `locationName:"description" type:"string"` + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *IncrementalRunConfig `locationName:"incrementalRunConfig" type:"structure"` + + // A list of InputSource objects, which have the fields InputSourceARN and SchemaName. + // + // InputSourceConfig is a required field + InputSourceConfig []*InputSource `locationName:"inputSourceConfig" min:"1" type:"list" required:"true"` + + // A list of OutputSource objects, each of which contains fields OutputS3Path, + // ApplyNormalization, and Output. + // + // OutputSourceConfig is a required field + OutputSourceConfig []*OutputSource `locationName:"outputSourceConfig" min:"1" type:"list" required:"true"` + + // An object which defines the resolutionType and the ruleBasedProperties + // + // ResolutionTechniques is a required field + ResolutionTechniques *ResolutionTechniques `locationName:"resolutionTechniques" type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to create resources on your behalf as part of workflow execution. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow. + // + // WorkflowArn is a required field + WorkflowArn *string `locationName:"workflowArn" type:"string" required:"true"` + + // The name of the workflow. + // + // WorkflowName is a required field + WorkflowName *string `locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMatchingWorkflowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMatchingWorkflowOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *CreateMatchingWorkflowOutput) SetDescription(v string) *CreateMatchingWorkflowOutput { + s.Description = &v + return s +} + +// SetIncrementalRunConfig sets the IncrementalRunConfig field's value. +func (s *CreateMatchingWorkflowOutput) SetIncrementalRunConfig(v *IncrementalRunConfig) *CreateMatchingWorkflowOutput { + s.IncrementalRunConfig = v + return s +} + +// SetInputSourceConfig sets the InputSourceConfig field's value. +func (s *CreateMatchingWorkflowOutput) SetInputSourceConfig(v []*InputSource) *CreateMatchingWorkflowOutput { + s.InputSourceConfig = v + return s +} + +// SetOutputSourceConfig sets the OutputSourceConfig field's value. +func (s *CreateMatchingWorkflowOutput) SetOutputSourceConfig(v []*OutputSource) *CreateMatchingWorkflowOutput { + s.OutputSourceConfig = v + return s +} + +// SetResolutionTechniques sets the ResolutionTechniques field's value. +func (s *CreateMatchingWorkflowOutput) SetResolutionTechniques(v *ResolutionTechniques) *CreateMatchingWorkflowOutput { + s.ResolutionTechniques = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateMatchingWorkflowOutput) SetRoleArn(v string) *CreateMatchingWorkflowOutput { + s.RoleArn = &v + return s +} + +// SetWorkflowArn sets the WorkflowArn field's value. +func (s *CreateMatchingWorkflowOutput) SetWorkflowArn(v string) *CreateMatchingWorkflowOutput { + s.WorkflowArn = &v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *CreateMatchingWorkflowOutput) SetWorkflowName(v string) *CreateMatchingWorkflowOutput { + s.WorkflowName = &v + return s +} + +type CreateSchemaMappingInput struct { + _ struct{} `type:"structure"` + + // A description of the schema. + Description *string `locationName:"description" type:"string"` + + // A list of MappedInputFields. Each MappedInputField corresponds to a column + // the source data table, and contains column name plus additional information + // that Entity Resolution uses for matching. + MappedInputFields []*SchemaInputAttribute `locationName:"mappedInputFields" min:"2" type:"list"` + + // The name of the schema. There cannot be multiple SchemaMappings with the + // same name. + // + // SchemaName is a required field + SchemaName *string `locationName:"schemaName" type:"string" required:"true"` + + // The tags used to organize, track, or control access for this resource. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSchemaMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSchemaMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSchemaMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSchemaMappingInput"} + if s.MappedInputFields != nil && len(s.MappedInputFields) < 2 { + invalidParams.Add(request.NewErrParamMinLen("MappedInputFields", 2)) + } + if s.SchemaName == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaName")) + } + if s.MappedInputFields != nil { + for i, v := range s.MappedInputFields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MappedInputFields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateSchemaMappingInput) SetDescription(v string) *CreateSchemaMappingInput { + s.Description = &v + return s +} + +// SetMappedInputFields sets the MappedInputFields field's value. +func (s *CreateSchemaMappingInput) SetMappedInputFields(v []*SchemaInputAttribute) *CreateSchemaMappingInput { + s.MappedInputFields = v + return s +} + +// SetSchemaName sets the SchemaName field's value. +func (s *CreateSchemaMappingInput) SetSchemaName(v string) *CreateSchemaMappingInput { + s.SchemaName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateSchemaMappingInput) SetTags(v map[string]*string) *CreateSchemaMappingInput { + s.Tags = v + return s +} + +type CreateSchemaMappingOutput struct { + _ struct{} `type:"structure"` + + // A description of the schema. + // + // Description is a required field + Description *string `locationName:"description" type:"string" required:"true"` + + // A list of MappedInputFields. Each MappedInputField corresponds to a column + // the source data table, and contains column name plus additional information + // that Entity Resolution uses for matching. + // + // MappedInputFields is a required field + MappedInputFields []*SchemaInputAttribute `locationName:"mappedInputFields" min:"2" type:"list" required:"true"` + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping. + // + // SchemaArn is a required field + SchemaArn *string `locationName:"schemaArn" type:"string" required:"true"` + + // The name of the schema. + // + // SchemaName is a required field + SchemaName *string `locationName:"schemaName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSchemaMappingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSchemaMappingOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *CreateSchemaMappingOutput) SetDescription(v string) *CreateSchemaMappingOutput { + s.Description = &v + return s +} + +// SetMappedInputFields sets the MappedInputFields field's value. +func (s *CreateSchemaMappingOutput) SetMappedInputFields(v []*SchemaInputAttribute) *CreateSchemaMappingOutput { + s.MappedInputFields = v + return s +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *CreateSchemaMappingOutput) SetSchemaArn(v string) *CreateSchemaMappingOutput { + s.SchemaArn = &v + return s +} + +// SetSchemaName sets the SchemaName field's value. +func (s *CreateSchemaMappingOutput) SetSchemaName(v string) *CreateSchemaMappingOutput { + s.SchemaName = &v + return s +} + +type DeleteMatchingWorkflowInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The name of the workflow to be retrieved. + // + // WorkflowName is a required field + WorkflowName *string `location:"uri" locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMatchingWorkflowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMatchingWorkflowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMatchingWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMatchingWorkflowInput"} + if s.WorkflowName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowName")) + } + if s.WorkflowName != nil && len(*s.WorkflowName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *DeleteMatchingWorkflowInput) SetWorkflowName(v string) *DeleteMatchingWorkflowInput { + s.WorkflowName = &v + return s +} + +type DeleteMatchingWorkflowOutput struct { + _ struct{} `type:"structure"` + + // A successful operation message. + // + // Message is a required field + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMatchingWorkflowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMatchingWorkflowOutput) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *DeleteMatchingWorkflowOutput) SetMessage(v string) *DeleteMatchingWorkflowOutput { + s.Message = &v + return s +} + +type DeleteSchemaMappingInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The name of the schema to delete. + // + // SchemaName is a required field + SchemaName *string `location:"uri" locationName:"schemaName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSchemaMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSchemaMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSchemaMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSchemaMappingInput"} + if s.SchemaName == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaName")) + } + if s.SchemaName != nil && len(*s.SchemaName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SchemaName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSchemaName sets the SchemaName field's value. +func (s *DeleteSchemaMappingInput) SetSchemaName(v string) *DeleteSchemaMappingInput { + s.SchemaName = &v + return s +} + +type DeleteSchemaMappingOutput struct { + _ struct{} `type:"structure"` + + // A successful operation message. + // + // Message is a required field + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSchemaMappingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSchemaMappingOutput) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *DeleteSchemaMappingOutput) SetMessage(v string) *DeleteSchemaMappingOutput { + s.Message = &v + return s +} + +// An object containing an error message, if there was an error. +type ErrorDetails struct { + _ struct{} `type:"structure"` + + // The error message from the job, if there is one. + ErrorMessage *string `locationName:"errorMessage" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorDetails) GoString() string { + return s.String() +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ErrorDetails) SetErrorMessage(v string) *ErrorDetails { + s.ErrorMessage = &v + return s +} + +// The request was rejected because it attempted to create resources beyond +// the current AWS Entity Resolution account limits. The error message describes +// the limit exceeded. HTTP Status Code: 402 +type ExceedsLimitException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExceedsLimitException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExceedsLimitException) GoString() string { + return s.String() +} + +func newErrorExceedsLimitException(v protocol.ResponseMetadata) error { + return &ExceedsLimitException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExceedsLimitException) Code() string { + return "ExceedsLimitException" +} + +// Message returns the exception's message. +func (s *ExceedsLimitException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExceedsLimitException) OrigErr() error { + return nil +} + +func (s *ExceedsLimitException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExceedsLimitException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExceedsLimitException) RequestID() string { + return s.RespMetadata.RequestID +} + +type GetMatchIdInput struct { + _ struct{} `type:"structure"` + + // The record to fetch the Match ID for. + // + // Record is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetMatchIdInput's + // String and GoString methods. + // + // Record is a required field + Record map[string]*string `locationName:"record" type:"map" required:"true" sensitive:"true"` + + // The name of the workflow. + // + // WorkflowName is a required field + WorkflowName *string `location:"uri" locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchIdInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMatchIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMatchIdInput"} + if s.Record == nil { + invalidParams.Add(request.NewErrParamRequired("Record")) + } + if s.WorkflowName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowName")) + } + if s.WorkflowName != nil && len(*s.WorkflowName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRecord sets the Record field's value. +func (s *GetMatchIdInput) SetRecord(v map[string]*string) *GetMatchIdInput { + s.Record = v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *GetMatchIdInput) SetWorkflowName(v string) *GetMatchIdInput { + s.WorkflowName = &v + return s +} + +type GetMatchIdOutput struct { + _ struct{} `type:"structure"` + + // The unique identifiers for this group of match records. + MatchId *string `locationName:"matchId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchIdOutput) GoString() string { + return s.String() +} + +// SetMatchId sets the MatchId field's value. +func (s *GetMatchIdOutput) SetMatchId(v string) *GetMatchIdOutput { + s.MatchId = &v + return s +} + +type GetMatchingJobInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The ID of the job. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` + + // The name of the workflow. + // + // WorkflowName is a required field + WorkflowName *string `location:"uri" locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchingJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchingJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMatchingJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMatchingJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + if s.WorkflowName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowName")) + } + if s.WorkflowName != nil && len(*s.WorkflowName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *GetMatchingJobInput) SetJobId(v string) *GetMatchingJobInput { + s.JobId = &v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *GetMatchingJobInput) SetWorkflowName(v string) *GetMatchingJobInput { + s.WorkflowName = &v + return s +} + +type GetMatchingJobOutput struct { + _ struct{} `type:"structure"` + + // The time at which the job has finished. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // An object containing an error message, if there was an error. + ErrorDetails *ErrorDetails `locationName:"errorDetails" type:"structure"` + + // The ID of the job. + // + // JobId is a required field + JobId *string `locationName:"jobId" type:"string" required:"true"` + + // Metrics associated with the execution, specifically total records processed, + // unique IDs generated, and records the execution skipped. + Metrics *JobMetrics `locationName:"metrics" type:"structure"` + + // The time at which the job was started. + // + // StartTime is a required field + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` + + // The current status of the job. Either running, succeeded, queued, or failed. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchingJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchingJobOutput) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *GetMatchingJobOutput) SetEndTime(v time.Time) *GetMatchingJobOutput { + s.EndTime = &v + return s +} + +// SetErrorDetails sets the ErrorDetails field's value. +func (s *GetMatchingJobOutput) SetErrorDetails(v *ErrorDetails) *GetMatchingJobOutput { + s.ErrorDetails = v + return s +} + +// SetJobId sets the JobId field's value. +func (s *GetMatchingJobOutput) SetJobId(v string) *GetMatchingJobOutput { + s.JobId = &v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *GetMatchingJobOutput) SetMetrics(v *JobMetrics) *GetMatchingJobOutput { + s.Metrics = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetMatchingJobOutput) SetStartTime(v time.Time) *GetMatchingJobOutput { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetMatchingJobOutput) SetStatus(v string) *GetMatchingJobOutput { + s.Status = &v + return s +} + +type GetMatchingWorkflowInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The name of the workflow. + // + // WorkflowName is a required field + WorkflowName *string `location:"uri" locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchingWorkflowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchingWorkflowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMatchingWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMatchingWorkflowInput"} + if s.WorkflowName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowName")) + } + if s.WorkflowName != nil && len(*s.WorkflowName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *GetMatchingWorkflowInput) SetWorkflowName(v string) *GetMatchingWorkflowInput { + s.WorkflowName = &v + return s +} + +type GetMatchingWorkflowOutput struct { + _ struct{} `type:"structure"` + + // The timestamp of when the workflow was created. + // + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // A description of the workflow. + Description *string `locationName:"description" type:"string"` + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *IncrementalRunConfig `locationName:"incrementalRunConfig" type:"structure"` + + // A list of InputSource objects, which have the fields InputSourceARN and SchemaName. + // + // InputSourceConfig is a required field + InputSourceConfig []*InputSource `locationName:"inputSourceConfig" min:"1" type:"list" required:"true"` + + // A list of OutputSource objects, each of which contains fields OutputS3Path, + // ApplyNormalization, and Output. + // + // OutputSourceConfig is a required field + OutputSourceConfig []*OutputSource `locationName:"outputSourceConfig" min:"1" type:"list" required:"true"` + + // An object which defines the resolutionType and the ruleBasedProperties + // + // ResolutionTechniques is a required field + ResolutionTechniques *ResolutionTechniques `locationName:"resolutionTechniques" type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to access resources on your behalf. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The tags used to organize, track, or control access for this resource. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The timestamp of when the workflow was last updated. + // + // UpdatedAt is a required field + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" required:"true"` + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow. + // + // WorkflowArn is a required field + WorkflowArn *string `locationName:"workflowArn" type:"string" required:"true"` + + // The name of the workflow. + // + // WorkflowName is a required field + WorkflowName *string `locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchingWorkflowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMatchingWorkflowOutput) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *GetMatchingWorkflowOutput) SetCreatedAt(v time.Time) *GetMatchingWorkflowOutput { + s.CreatedAt = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *GetMatchingWorkflowOutput) SetDescription(v string) *GetMatchingWorkflowOutput { + s.Description = &v + return s +} + +// SetIncrementalRunConfig sets the IncrementalRunConfig field's value. +func (s *GetMatchingWorkflowOutput) SetIncrementalRunConfig(v *IncrementalRunConfig) *GetMatchingWorkflowOutput { + s.IncrementalRunConfig = v + return s +} + +// SetInputSourceConfig sets the InputSourceConfig field's value. +func (s *GetMatchingWorkflowOutput) SetInputSourceConfig(v []*InputSource) *GetMatchingWorkflowOutput { + s.InputSourceConfig = v + return s +} + +// SetOutputSourceConfig sets the OutputSourceConfig field's value. +func (s *GetMatchingWorkflowOutput) SetOutputSourceConfig(v []*OutputSource) *GetMatchingWorkflowOutput { + s.OutputSourceConfig = v + return s +} + +// SetResolutionTechniques sets the ResolutionTechniques field's value. +func (s *GetMatchingWorkflowOutput) SetResolutionTechniques(v *ResolutionTechniques) *GetMatchingWorkflowOutput { + s.ResolutionTechniques = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *GetMatchingWorkflowOutput) SetRoleArn(v string) *GetMatchingWorkflowOutput { + s.RoleArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetMatchingWorkflowOutput) SetTags(v map[string]*string) *GetMatchingWorkflowOutput { + s.Tags = v + return s +} + +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *GetMatchingWorkflowOutput) SetUpdatedAt(v time.Time) *GetMatchingWorkflowOutput { + s.UpdatedAt = &v + return s +} + +// SetWorkflowArn sets the WorkflowArn field's value. +func (s *GetMatchingWorkflowOutput) SetWorkflowArn(v string) *GetMatchingWorkflowOutput { + s.WorkflowArn = &v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *GetMatchingWorkflowOutput) SetWorkflowName(v string) *GetMatchingWorkflowOutput { + s.WorkflowName = &v + return s +} + +type GetSchemaMappingInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The name of the schema to be retrieved. + // + // SchemaName is a required field + SchemaName *string `location:"uri" locationName:"schemaName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSchemaMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSchemaMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSchemaMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSchemaMappingInput"} + if s.SchemaName == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaName")) + } + if s.SchemaName != nil && len(*s.SchemaName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SchemaName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSchemaName sets the SchemaName field's value. +func (s *GetSchemaMappingInput) SetSchemaName(v string) *GetSchemaMappingInput { + s.SchemaName = &v + return s +} + +type GetSchemaMappingOutput struct { + _ struct{} `type:"structure"` + + // The timestamp of when the SchemaMapping was created. + // + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // A description of the schema. + Description *string `locationName:"description" type:"string"` + + // A list of MappedInputFields. Each MappedInputField corresponds to a column + // the source data table, and contains column name plus additional information + // Venice uses for matching. + // + // MappedInputFields is a required field + MappedInputFields []*SchemaInputAttribute `locationName:"mappedInputFields" min:"2" type:"list" required:"true"` + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping. + // + // SchemaArn is a required field + SchemaArn *string `locationName:"schemaArn" type:"string" required:"true"` + + // The name of the schema. + // + // SchemaName is a required field + SchemaName *string `locationName:"schemaName" type:"string" required:"true"` + + // The tags used to organize, track, or control access for this resource. + Tags map[string]*string `locationName:"tags" type:"map"` + + // The timestamp of when the SchemaMapping was last updated. + // + // UpdatedAt is a required field + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSchemaMappingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSchemaMappingOutput) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *GetSchemaMappingOutput) SetCreatedAt(v time.Time) *GetSchemaMappingOutput { + s.CreatedAt = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *GetSchemaMappingOutput) SetDescription(v string) *GetSchemaMappingOutput { + s.Description = &v + return s +} + +// SetMappedInputFields sets the MappedInputFields field's value. +func (s *GetSchemaMappingOutput) SetMappedInputFields(v []*SchemaInputAttribute) *GetSchemaMappingOutput { + s.MappedInputFields = v + return s +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *GetSchemaMappingOutput) SetSchemaArn(v string) *GetSchemaMappingOutput { + s.SchemaArn = &v + return s +} + +// SetSchemaName sets the SchemaName field's value. +func (s *GetSchemaMappingOutput) SetSchemaName(v string) *GetSchemaMappingOutput { + s.SchemaName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetSchemaMappingOutput) SetTags(v map[string]*string) *GetSchemaMappingOutput { + s.Tags = v + return s +} + +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *GetSchemaMappingOutput) SetUpdatedAt(v time.Time) *GetSchemaMappingOutput { + s.UpdatedAt = &v + return s +} + +// An object which defines an incremental run type and has only incrementalRunType +// as a field. +type IncrementalRunConfig struct { + _ struct{} `type:"structure"` + + // The type of incremental run. It takes only one value: IMMEDIATE. + IncrementalRunType *string `locationName:"incrementalRunType" type:"string" enum:"IncrementalRunType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IncrementalRunConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IncrementalRunConfig) GoString() string { + return s.String() +} + +// SetIncrementalRunType sets the IncrementalRunType field's value. +func (s *IncrementalRunConfig) SetIncrementalRunType(v string) *IncrementalRunConfig { + s.IncrementalRunType = &v + return s +} + +// An object containing InputSourceARN, SchemaName, and ApplyNormalization. +type InputSource struct { + _ struct{} `type:"structure"` + + // Normalizes the attributes defined in the schema in the input data. For example, + // if an attribute has an AttributeType of PHONE_NUMBER, and the data in the + // input table is in a format of 1234567890, Entity Resolution will normalize + // this field in the output to (123)-456-7890. + ApplyNormalization *bool `locationName:"applyNormalization" type:"boolean"` + + // An Glue table ARN for the input source table. + // + // InputSourceARN is a required field + InputSourceARN *string `locationName:"inputSourceARN" type:"string" required:"true"` + + // The name of the schema to be retrieved. + // + // SchemaName is a required field + SchemaName *string `locationName:"schemaName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputSource"} + if s.InputSourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("InputSourceARN")) + } + if s.SchemaName == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyNormalization sets the ApplyNormalization field's value. +func (s *InputSource) SetApplyNormalization(v bool) *InputSource { + s.ApplyNormalization = &v + return s +} + +// SetInputSourceARN sets the InputSourceARN field's value. +func (s *InputSource) SetInputSourceARN(v string) *InputSource { + s.InputSourceARN = &v + return s +} + +// SetSchemaName sets the SchemaName field's value. +func (s *InputSource) SetSchemaName(v string) *InputSource { + s.SchemaName = &v + return s +} + +// This exception occurs when there is an internal failure in the AWS Entity +// Resolution service. HTTP Status Code: 500 +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object containing InputRecords, TotalRecordsProcessed, MatchIDs, and RecordsNotProcessed. +type JobMetrics struct { + _ struct{} `type:"structure"` + + // The total number of input records. + InputRecords *int64 `locationName:"inputRecords" type:"integer"` + + // The total number of matchIDs generated. + MatchIDs *int64 `locationName:"matchIDs" type:"integer"` + + // The total number of records that did not get processed, + RecordsNotProcessed *int64 `locationName:"recordsNotProcessed" type:"integer"` + + // The total number of records processed. + TotalRecordsProcessed *int64 `locationName:"totalRecordsProcessed" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JobMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JobMetrics) GoString() string { + return s.String() +} + +// SetInputRecords sets the InputRecords field's value. +func (s *JobMetrics) SetInputRecords(v int64) *JobMetrics { + s.InputRecords = &v + return s +} + +// SetMatchIDs sets the MatchIDs field's value. +func (s *JobMetrics) SetMatchIDs(v int64) *JobMetrics { + s.MatchIDs = &v + return s +} + +// SetRecordsNotProcessed sets the RecordsNotProcessed field's value. +func (s *JobMetrics) SetRecordsNotProcessed(v int64) *JobMetrics { + s.RecordsNotProcessed = &v + return s +} + +// SetTotalRecordsProcessed sets the TotalRecordsProcessed field's value. +func (s *JobMetrics) SetTotalRecordsProcessed(v int64) *JobMetrics { + s.TotalRecordsProcessed = &v + return s +} + +// An object containing the JobId, Status, StartTime, and EndTime of a job. +type JobSummary struct { + _ struct{} `type:"structure"` + + // The time at which the job has finished. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The ID of the job. + // + // JobId is a required field + JobId *string `locationName:"jobId" type:"string" required:"true"` + + // The time at which the job was started. + // + // StartTime is a required field + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` + + // The current status of the job. Either running, succeeded, queued, or failed. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JobSummary) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *JobSummary) SetEndTime(v time.Time) *JobSummary { + s.EndTime = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *JobSummary) SetJobId(v string) *JobSummary { + s.JobId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *JobSummary) SetStartTime(v time.Time) *JobSummary { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *JobSummary) SetStatus(v string) *JobSummary { + s.Status = &v + return s +} + +type ListMatchingJobsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The maximum number of objects returned per page. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` + + // The name of the workflow to be retrieved. + // + // WorkflowName is a required field + WorkflowName *string `location:"uri" locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMatchingJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMatchingJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMatchingJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMatchingJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.WorkflowName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowName")) + } + if s.WorkflowName != nil && len(*s.WorkflowName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListMatchingJobsInput) SetMaxResults(v int64) *ListMatchingJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMatchingJobsInput) SetNextToken(v string) *ListMatchingJobsInput { + s.NextToken = &v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *ListMatchingJobsInput) SetWorkflowName(v string) *ListMatchingJobsInput { + s.WorkflowName = &v + return s +} + +type ListMatchingJobsOutput struct { + _ struct{} `type:"structure"` + + // A list of JobSummary objects, each of which contain the ID, status, start + // time, and end time of a job. + Jobs []*JobSummary `locationName:"jobs" type:"list"` + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMatchingJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMatchingJobsOutput) GoString() string { + return s.String() +} + +// SetJobs sets the Jobs field's value. +func (s *ListMatchingJobsOutput) SetJobs(v []*JobSummary) *ListMatchingJobsOutput { + s.Jobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMatchingJobsOutput) SetNextToken(v string) *ListMatchingJobsOutput { + s.NextToken = &v + return s +} + +type ListMatchingWorkflowsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The maximum number of objects returned per page. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMatchingWorkflowsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMatchingWorkflowsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMatchingWorkflowsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMatchingWorkflowsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListMatchingWorkflowsInput) SetMaxResults(v int64) *ListMatchingWorkflowsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMatchingWorkflowsInput) SetNextToken(v string) *ListMatchingWorkflowsInput { + s.NextToken = &v + return s +} + +type ListMatchingWorkflowsOutput struct { + _ struct{} `type:"structure"` + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A list of MatchingWorkflowSummary objects, each of which contain the fields + // WorkflowName, WorkflowArn, CreatedAt, and UpdatedAt. + WorkflowSummaries []*MatchingWorkflowSummary `locationName:"workflowSummaries" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMatchingWorkflowsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMatchingWorkflowsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMatchingWorkflowsOutput) SetNextToken(v string) *ListMatchingWorkflowsOutput { + s.NextToken = &v + return s +} + +// SetWorkflowSummaries sets the WorkflowSummaries field's value. +func (s *ListMatchingWorkflowsOutput) SetWorkflowSummaries(v []*MatchingWorkflowSummary) *ListMatchingWorkflowsOutput { + s.WorkflowSummaries = v + return s +} + +type ListSchemaMappingsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The maximum number of objects returned per page. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // The pagination token from the previous ListSchemaMappings API call. + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListSchemaMappingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListSchemaMappingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSchemaMappingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSchemaMappingsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSchemaMappingsInput) SetMaxResults(v int64) *ListSchemaMappingsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSchemaMappingsInput) SetNextToken(v string) *ListSchemaMappingsInput { + s.NextToken = &v + return s +} + +type ListSchemaMappingsOutput struct { + _ struct{} `type:"structure"` + + // The pagination token from the previous ListDomains API call. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // A list of SchemaMappingSummary objects, each of which contain the fields + // SchemaName, SchemaArn, CreatedAt, UpdatedAt. + SchemaList []*SchemaMappingSummary `locationName:"schemaList" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListSchemaMappingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListSchemaMappingsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSchemaMappingsOutput) SetNextToken(v string) *ListSchemaMappingsOutput { + s.NextToken = &v + return s +} + +// SetSchemaList sets the SchemaList field's value. +func (s *ListSchemaMappingsOutput) SetSchemaList(v []*SchemaMappingSummary) *ListSchemaMappingsOutput { + s.SchemaList = v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The ARN of the resource for which you want to view tags. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags used to organize, track, or control access for this resource. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// A list of MatchingWorkflowSummary objects, each of which contain the fields +// WorkflowName, WorkflowArn, CreatedAt, UpdatedAt. +type MatchingWorkflowSummary struct { + _ struct{} `type:"structure"` + + // The timestamp of when the workflow was created. + // + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // The timestamp of when the workflow was last updated. + // + // UpdatedAt is a required field + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" required:"true"` + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow. + // + // WorkflowArn is a required field + WorkflowArn *string `locationName:"workflowArn" type:"string" required:"true"` + + // The name of the workflow. + // + // WorkflowName is a required field + WorkflowName *string `locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MatchingWorkflowSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MatchingWorkflowSummary) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *MatchingWorkflowSummary) SetCreatedAt(v time.Time) *MatchingWorkflowSummary { + s.CreatedAt = &v + return s +} + +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *MatchingWorkflowSummary) SetUpdatedAt(v time.Time) *MatchingWorkflowSummary { + s.UpdatedAt = &v + return s +} + +// SetWorkflowArn sets the WorkflowArn field's value. +func (s *MatchingWorkflowSummary) SetWorkflowArn(v string) *MatchingWorkflowSummary { + s.WorkflowArn = &v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *MatchingWorkflowSummary) SetWorkflowName(v string) *MatchingWorkflowSummary { + s.WorkflowName = &v + return s +} + +// A list of OutputAttribute objects, each of which have the fields Name and +// Hashed. Each of these objects selects a column to be included in the output +// table, and whether the values of the column should be hashed. +type OutputAttribute struct { + _ struct{} `type:"structure"` + + // Enables the ability to hash the column values in the output. + Hashed *bool `locationName:"hashed" type:"boolean"` + + // A name of a column to be written to the output. This must be an InputField + // name in the schema mapping. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputAttribute"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHashed sets the Hashed field's value. +func (s *OutputAttribute) SetHashed(v bool) *OutputAttribute { + s.Hashed = &v + return s +} + +// SetName sets the Name field's value. +func (s *OutputAttribute) SetName(v string) *OutputAttribute { + s.Name = &v + return s +} + +// A list of OutputAttribute objects, each of which have the fields Name and +// Hashed. Each of these objects selects a column to be included in the output +// table, and whether the values of the column should be hashed. +type OutputSource struct { + _ struct{} `type:"structure"` + + // Normalizes the attributes defined in the schema in the input data. For example, + // if an attribute has an AttributeType of PHONE_NUMBER, and the data in the + // input table is in a format of 1234567890, Entity Resolution will normalize + // this field in the output to (123)-456-7890. + ApplyNormalization *bool `locationName:"applyNormalization" type:"boolean"` + + // Customer KMS ARN for encryption at rest. If not provided, system will use + // an Entity Resolution managed KMS key. + KMSArn *string `type:"string"` + + // A list of OutputAttribute objects, each of which have the fields Name and + // Hashed. Each of these objects selects a column to be included in the output + // table, and whether the values of the column should be hashed. + // + // Output is a required field + Output []*OutputAttribute `locationName:"output" type:"list" required:"true"` + + // The S3 path to which Entity Resolution will write the output table. + // + // OutputS3Path is a required field + OutputS3Path *string `locationName:"outputS3Path" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputSource"} + if s.Output == nil { + invalidParams.Add(request.NewErrParamRequired("Output")) + } + if s.OutputS3Path == nil { + invalidParams.Add(request.NewErrParamRequired("OutputS3Path")) + } + if s.Output != nil { + for i, v := range s.Output { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Output", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyNormalization sets the ApplyNormalization field's value. +func (s *OutputSource) SetApplyNormalization(v bool) *OutputSource { + s.ApplyNormalization = &v + return s +} + +// SetKMSArn sets the KMSArn field's value. +func (s *OutputSource) SetKMSArn(v string) *OutputSource { + s.KMSArn = &v + return s +} + +// SetOutput sets the Output field's value. +func (s *OutputSource) SetOutput(v []*OutputAttribute) *OutputSource { + s.Output = v + return s +} + +// SetOutputS3Path sets the OutputS3Path field's value. +func (s *OutputSource) SetOutputS3Path(v string) *OutputSource { + s.OutputS3Path = &v + return s +} + +// An object which defines the resolutionType and the ruleBasedProperties +type ResolutionTechniques struct { + _ struct{} `type:"structure"` + + // There are two types of matching, RULE_MATCHING and ML_MATCHING + ResolutionType *string `locationName:"resolutionType" type:"string" enum:"ResolutionType"` + + // An object which defines the list of matching rules to run and has a field + // Rules, which is a list of rule objects. + RuleBasedProperties *RuleBasedProperties `locationName:"ruleBasedProperties" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResolutionTechniques) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResolutionTechniques) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResolutionTechniques) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResolutionTechniques"} + if s.RuleBasedProperties != nil { + if err := s.RuleBasedProperties.Validate(); err != nil { + invalidParams.AddNested("RuleBasedProperties", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResolutionType sets the ResolutionType field's value. +func (s *ResolutionTechniques) SetResolutionType(v string) *ResolutionTechniques { + s.ResolutionType = &v + return s +} + +// SetRuleBasedProperties sets the RuleBasedProperties field's value. +func (s *ResolutionTechniques) SetRuleBasedProperties(v *RuleBasedProperties) *ResolutionTechniques { + s.RuleBasedProperties = v + return s +} + +// The resource could not be found. HTTP Status Code: 404 +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object containing RuleName, and MatchingKeys. +type Rule struct { + _ struct{} `type:"structure"` + + // A list of MatchingKeys. The MatchingKeys must have been defined in the SchemaMapping. + // Two records are considered to match according to this rule if all of the + // MatchingKeys match. + // + // MatchingKeys is a required field + MatchingKeys []*string `locationName:"matchingKeys" min:"1" type:"list" required:"true"` + + // A name for the matching rule. + // + // RuleName is a required field + RuleName *string `locationName:"ruleName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.MatchingKeys == nil { + invalidParams.Add(request.NewErrParamRequired("MatchingKeys")) + } + if s.MatchingKeys != nil && len(s.MatchingKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MatchingKeys", 1)) + } + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMatchingKeys sets the MatchingKeys field's value. +func (s *Rule) SetMatchingKeys(v []*string) *Rule { + s.MatchingKeys = v + return s +} + +// SetRuleName sets the RuleName field's value. +func (s *Rule) SetRuleName(v string) *Rule { + s.RuleName = &v + return s +} + +// An object which defines the list of matching rules to run and has a field +// Rules, which is a list of rule objects. +type RuleBasedProperties struct { + _ struct{} `type:"structure"` + + // You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. + // When choosing MANY_TO_MANY, the system can match attribute across the sub-types + // of an attribute type. For example, if the value of the Email field of Profile + // A and the value of BusinessEmail field of Profile B matches, the two profiles + // are matched on the Email type. When choosing ONE_TO_ONE the system can only + // match if the sub-types are exact matches. For example, only when the value + // of the Email field of Profile A and the value of the Email field of Profile + // B matches, the two profiles are matched on the Email type. + // + // AttributeMatchingModel is a required field + AttributeMatchingModel *string `locationName:"attributeMatchingModel" type:"string" required:"true" enum:"AttributeMatchingModel"` + + // A list of Rule objects, each of which have fields RuleName and MatchingKeys. + // + // Rules is a required field + Rules []*Rule `locationName:"rules" min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RuleBasedProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RuleBasedProperties) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RuleBasedProperties) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RuleBasedProperties"} + if s.AttributeMatchingModel == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeMatchingModel")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil && len(s.Rules) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Rules", 1)) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeMatchingModel sets the AttributeMatchingModel field's value. +func (s *RuleBasedProperties) SetAttributeMatchingModel(v string) *RuleBasedProperties { + s.AttributeMatchingModel = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *RuleBasedProperties) SetRules(v []*Rule) *RuleBasedProperties { + s.Rules = v + return s +} + +// An object containing FieldField, Type, GroupName, and MatchKey. +type SchemaInputAttribute struct { + _ struct{} `type:"structure"` + + // A string containing the field name. + // + // FieldName is a required field + FieldName *string `locationName:"fieldName" type:"string" required:"true"` + + // Instruct Entity Resolution to combine several columns into a unified column + // with the identical attribute type. For example, when working with columns + // such as first_name, middle_name, and last_name, assigning them a common GroupName + // will prompt Entity Resolution to concatenate them into a single value. + GroupName *string `locationName:"groupName" type:"string"` + + // A key that allows grouping of multiple input attributes into a unified matching + // group. For example, let's consider a scenario where the source table contains + // various addresses, such as business_address and shipping_address. By assigning + // the MatchKey Address' to both attributes, Entity Resolution will match records + // across these fields to create a consolidated matching group. If no MatchKey + // is specified for a column, it won't be utilized for matching purposes but + // will still be included in the output table. + MatchKey *string `locationName:"matchKey" type:"string"` + + // The type of the attribute, selected from a list of values. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"SchemaAttributeType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SchemaInputAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SchemaInputAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SchemaInputAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SchemaInputAttribute"} + if s.FieldName == nil { + invalidParams.Add(request.NewErrParamRequired("FieldName")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFieldName sets the FieldName field's value. +func (s *SchemaInputAttribute) SetFieldName(v string) *SchemaInputAttribute { + s.FieldName = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *SchemaInputAttribute) SetGroupName(v string) *SchemaInputAttribute { + s.GroupName = &v + return s +} + +// SetMatchKey sets the MatchKey field's value. +func (s *SchemaInputAttribute) SetMatchKey(v string) *SchemaInputAttribute { + s.MatchKey = &v + return s +} + +// SetType sets the Type field's value. +func (s *SchemaInputAttribute) SetType(v string) *SchemaInputAttribute { + s.Type = &v + return s +} + +// An object containing SchemaName, SchemaArn, CreatedAt, andUpdatedAt. +type SchemaMappingSummary struct { + _ struct{} `type:"structure"` + + // The timestamp of when the SchemaMapping was created. + // + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping. + // + // SchemaArn is a required field + SchemaArn *string `locationName:"schemaArn" type:"string" required:"true"` + + // The name of the schema. + // + // SchemaName is a required field + SchemaName *string `locationName:"schemaName" type:"string" required:"true"` + + // The timestamp of when the SchemaMapping was last updated. + // + // UpdatedAt is a required field + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SchemaMappingSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SchemaMappingSummary) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *SchemaMappingSummary) SetCreatedAt(v time.Time) *SchemaMappingSummary { + s.CreatedAt = &v + return s +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *SchemaMappingSummary) SetSchemaArn(v string) *SchemaMappingSummary { + s.SchemaArn = &v + return s +} + +// SetSchemaName sets the SchemaName field's value. +func (s *SchemaMappingSummary) SetSchemaName(v string) *SchemaMappingSummary { + s.SchemaName = &v + return s +} + +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *SchemaMappingSummary) SetUpdatedAt(v time.Time) *SchemaMappingSummary { + s.UpdatedAt = &v + return s +} + +type StartMatchingJobInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The name of the matching job to be retrieved. + // + // WorkflowName is a required field + WorkflowName *string `location:"uri" locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartMatchingJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartMatchingJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMatchingJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMatchingJobInput"} + if s.WorkflowName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowName")) + } + if s.WorkflowName != nil && len(*s.WorkflowName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *StartMatchingJobInput) SetWorkflowName(v string) *StartMatchingJobInput { + s.WorkflowName = &v + return s +} + +type StartMatchingJobOutput struct { + _ struct{} `type:"structure"` + + // The ID of the job. + // + // JobId is a required field + JobId *string `locationName:"jobId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartMatchingJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartMatchingJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *StartMatchingJobOutput) SetJobId(v string) *StartMatchingJobOutput { + s.JobId = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource for which you want to view tags. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The tags used to organize, track, or control access for this resource. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The request was denied due to request throttling. HTTP Status Code: 429 +type ThrottlingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ThrottlingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ThrottlingException) GoString() string { + return s.String() +} + +func newErrorThrottlingException(v protocol.ResponseMetadata) error { + return &ThrottlingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ThrottlingException) Code() string { + return "ThrottlingException" +} + +// Message returns the exception's message. +func (s *ThrottlingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ThrottlingException) OrigErr() error { + return nil +} + +func (s *ThrottlingException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The ARN of the resource for which you want to untag. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The list of tag keys to remove from the resource. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateMatchingWorkflowInput struct { + _ struct{} `type:"structure"` + + // A description of the workflow. + Description *string `locationName:"description" type:"string"` + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *IncrementalRunConfig `locationName:"incrementalRunConfig" type:"structure"` + + // A list of InputSource objects, which have the fields InputSourceARN and SchemaName. + // + // InputSourceConfig is a required field + InputSourceConfig []*InputSource `locationName:"inputSourceConfig" min:"1" type:"list" required:"true"` + + // A list of OutputSource objects, each of which contains fields OutputS3Path, + // ApplyNormalization, and Output. + // + // OutputSourceConfig is a required field + OutputSourceConfig []*OutputSource `locationName:"outputSourceConfig" min:"1" type:"list" required:"true"` + + // An object which defines the resolutionType and the ruleBasedProperties + // + // ResolutionTechniques is a required field + ResolutionTechniques *ResolutionTechniques `locationName:"resolutionTechniques" type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to create resources on your behalf as part of workflow execution. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the workflow to be retrieved. + // + // WorkflowName is a required field + WorkflowName *string `location:"uri" locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateMatchingWorkflowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateMatchingWorkflowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMatchingWorkflowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMatchingWorkflowInput"} + if s.InputSourceConfig == nil { + invalidParams.Add(request.NewErrParamRequired("InputSourceConfig")) + } + if s.InputSourceConfig != nil && len(s.InputSourceConfig) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputSourceConfig", 1)) + } + if s.OutputSourceConfig == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSourceConfig")) + } + if s.OutputSourceConfig != nil && len(s.OutputSourceConfig) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OutputSourceConfig", 1)) + } + if s.ResolutionTechniques == nil { + invalidParams.Add(request.NewErrParamRequired("ResolutionTechniques")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.WorkflowName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkflowName")) + } + if s.WorkflowName != nil && len(*s.WorkflowName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkflowName", 1)) + } + if s.InputSourceConfig != nil { + for i, v := range s.InputSourceConfig { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputSourceConfig", i), err.(request.ErrInvalidParams)) + } + } + } + if s.OutputSourceConfig != nil { + for i, v := range s.OutputSourceConfig { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSourceConfig", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ResolutionTechniques != nil { + if err := s.ResolutionTechniques.Validate(); err != nil { + invalidParams.AddNested("ResolutionTechniques", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateMatchingWorkflowInput) SetDescription(v string) *UpdateMatchingWorkflowInput { + s.Description = &v + return s +} + +// SetIncrementalRunConfig sets the IncrementalRunConfig field's value. +func (s *UpdateMatchingWorkflowInput) SetIncrementalRunConfig(v *IncrementalRunConfig) *UpdateMatchingWorkflowInput { + s.IncrementalRunConfig = v + return s +} + +// SetInputSourceConfig sets the InputSourceConfig field's value. +func (s *UpdateMatchingWorkflowInput) SetInputSourceConfig(v []*InputSource) *UpdateMatchingWorkflowInput { + s.InputSourceConfig = v + return s +} + +// SetOutputSourceConfig sets the OutputSourceConfig field's value. +func (s *UpdateMatchingWorkflowInput) SetOutputSourceConfig(v []*OutputSource) *UpdateMatchingWorkflowInput { + s.OutputSourceConfig = v + return s +} + +// SetResolutionTechniques sets the ResolutionTechniques field's value. +func (s *UpdateMatchingWorkflowInput) SetResolutionTechniques(v *ResolutionTechniques) *UpdateMatchingWorkflowInput { + s.ResolutionTechniques = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateMatchingWorkflowInput) SetRoleArn(v string) *UpdateMatchingWorkflowInput { + s.RoleArn = &v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *UpdateMatchingWorkflowInput) SetWorkflowName(v string) *UpdateMatchingWorkflowInput { + s.WorkflowName = &v + return s +} + +type UpdateMatchingWorkflowOutput struct { + _ struct{} `type:"structure"` + + // A description of the workflow. + Description *string `locationName:"description" type:"string"` + + // An object which defines an incremental run type and has only incrementalRunType + // as a field. + IncrementalRunConfig *IncrementalRunConfig `locationName:"incrementalRunConfig" type:"structure"` + + // A list of InputSource objects, which have the fields InputSourceARN and SchemaName. + // + // InputSourceConfig is a required field + InputSourceConfig []*InputSource `locationName:"inputSourceConfig" min:"1" type:"list" required:"true"` + + // A list of OutputSource objects, each of which contains fields OutputS3Path, + // ApplyNormalization, and Output. + // + // OutputSourceConfig is a required field + OutputSourceConfig []*OutputSource `locationName:"outputSourceConfig" min:"1" type:"list" required:"true"` + + // An object which defines the resolutionType and the ruleBasedProperties + // + // ResolutionTechniques is a required field + ResolutionTechniques *ResolutionTechniques `locationName:"resolutionTechniques" type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + // this role to create resources on your behalf as part of workflow execution. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the workflow. + // + // WorkflowName is a required field + WorkflowName *string `locationName:"workflowName" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateMatchingWorkflowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateMatchingWorkflowOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *UpdateMatchingWorkflowOutput) SetDescription(v string) *UpdateMatchingWorkflowOutput { + s.Description = &v + return s +} + +// SetIncrementalRunConfig sets the IncrementalRunConfig field's value. +func (s *UpdateMatchingWorkflowOutput) SetIncrementalRunConfig(v *IncrementalRunConfig) *UpdateMatchingWorkflowOutput { + s.IncrementalRunConfig = v + return s +} + +// SetInputSourceConfig sets the InputSourceConfig field's value. +func (s *UpdateMatchingWorkflowOutput) SetInputSourceConfig(v []*InputSource) *UpdateMatchingWorkflowOutput { + s.InputSourceConfig = v + return s +} + +// SetOutputSourceConfig sets the OutputSourceConfig field's value. +func (s *UpdateMatchingWorkflowOutput) SetOutputSourceConfig(v []*OutputSource) *UpdateMatchingWorkflowOutput { + s.OutputSourceConfig = v + return s +} + +// SetResolutionTechniques sets the ResolutionTechniques field's value. +func (s *UpdateMatchingWorkflowOutput) SetResolutionTechniques(v *ResolutionTechniques) *UpdateMatchingWorkflowOutput { + s.ResolutionTechniques = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateMatchingWorkflowOutput) SetRoleArn(v string) *UpdateMatchingWorkflowOutput { + s.RoleArn = &v + return s +} + +// SetWorkflowName sets the WorkflowName field's value. +func (s *UpdateMatchingWorkflowOutput) SetWorkflowName(v string) *UpdateMatchingWorkflowOutput { + s.WorkflowName = &v + return s +} + +// The input fails to satisfy the constraints specified by AWS Entity Resolution. +// HTTP Status Code: 400 +type ValidationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidationException) GoString() string { + return s.String() +} + +func newErrorValidationException(v protocol.ResponseMetadata) error { + return &ValidationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ValidationException) Code() string { + return "ValidationException" +} + +// Message returns the exception's message. +func (s *ValidationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ValidationException) OrigErr() error { + return nil +} + +func (s *ValidationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID +} + +const ( + // AttributeMatchingModelOneToOne is a AttributeMatchingModel enum value + AttributeMatchingModelOneToOne = "ONE_TO_ONE" + + // AttributeMatchingModelManyToMany is a AttributeMatchingModel enum value + AttributeMatchingModelManyToMany = "MANY_TO_MANY" +) + +// AttributeMatchingModel_Values returns all elements of the AttributeMatchingModel enum +func AttributeMatchingModel_Values() []string { + return []string{ + AttributeMatchingModelOneToOne, + AttributeMatchingModelManyToMany, + } +} + +const ( + // IncrementalRunTypeImmediate is a IncrementalRunType enum value + IncrementalRunTypeImmediate = "IMMEDIATE" +) + +// IncrementalRunType_Values returns all elements of the IncrementalRunType enum +func IncrementalRunType_Values() []string { + return []string{ + IncrementalRunTypeImmediate, + } +} + +const ( + // JobStatusRunning is a JobStatus enum value + JobStatusRunning = "RUNNING" + + // JobStatusSucceeded is a JobStatus enum value + JobStatusSucceeded = "SUCCEEDED" + + // JobStatusFailed is a JobStatus enum value + JobStatusFailed = "FAILED" + + // JobStatusQueued is a JobStatus enum value + JobStatusQueued = "QUEUED" +) + +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusRunning, + JobStatusSucceeded, + JobStatusFailed, + JobStatusQueued, + } +} + +const ( + // ResolutionTypeRuleMatching is a ResolutionType enum value + ResolutionTypeRuleMatching = "RULE_MATCHING" + + // ResolutionTypeMlMatching is a ResolutionType enum value + ResolutionTypeMlMatching = "ML_MATCHING" +) + +// ResolutionType_Values returns all elements of the ResolutionType enum +func ResolutionType_Values() []string { + return []string{ + ResolutionTypeRuleMatching, + ResolutionTypeMlMatching, + } +} + +const ( + // SchemaAttributeTypeName is a SchemaAttributeType enum value + SchemaAttributeTypeName = "NAME" + + // SchemaAttributeTypeNameFirst is a SchemaAttributeType enum value + SchemaAttributeTypeNameFirst = "NAME_FIRST" + + // SchemaAttributeTypeNameMiddle is a SchemaAttributeType enum value + SchemaAttributeTypeNameMiddle = "NAME_MIDDLE" + + // SchemaAttributeTypeNameLast is a SchemaAttributeType enum value + SchemaAttributeTypeNameLast = "NAME_LAST" + + // SchemaAttributeTypeAddress is a SchemaAttributeType enum value + SchemaAttributeTypeAddress = "ADDRESS" + + // SchemaAttributeTypeAddressStreet1 is a SchemaAttributeType enum value + SchemaAttributeTypeAddressStreet1 = "ADDRESS_STREET1" + + // SchemaAttributeTypeAddressStreet2 is a SchemaAttributeType enum value + SchemaAttributeTypeAddressStreet2 = "ADDRESS_STREET2" + + // SchemaAttributeTypeAddressStreet3 is a SchemaAttributeType enum value + SchemaAttributeTypeAddressStreet3 = "ADDRESS_STREET3" + + // SchemaAttributeTypeAddressCity is a SchemaAttributeType enum value + SchemaAttributeTypeAddressCity = "ADDRESS_CITY" + + // SchemaAttributeTypeAddressState is a SchemaAttributeType enum value + SchemaAttributeTypeAddressState = "ADDRESS_STATE" + + // SchemaAttributeTypeAddressCountry is a SchemaAttributeType enum value + SchemaAttributeTypeAddressCountry = "ADDRESS_COUNTRY" + + // SchemaAttributeTypeAddressPostalcode is a SchemaAttributeType enum value + SchemaAttributeTypeAddressPostalcode = "ADDRESS_POSTALCODE" + + // SchemaAttributeTypePhone is a SchemaAttributeType enum value + SchemaAttributeTypePhone = "PHONE" + + // SchemaAttributeTypePhoneNumber is a SchemaAttributeType enum value + SchemaAttributeTypePhoneNumber = "PHONE_NUMBER" + + // SchemaAttributeTypePhoneCountrycode is a SchemaAttributeType enum value + SchemaAttributeTypePhoneCountrycode = "PHONE_COUNTRYCODE" + + // SchemaAttributeTypeEmailAddress is a SchemaAttributeType enum value + SchemaAttributeTypeEmailAddress = "EMAIL_ADDRESS" + + // SchemaAttributeTypeUniqueId is a SchemaAttributeType enum value + SchemaAttributeTypeUniqueId = "UNIQUE_ID" + + // SchemaAttributeTypeDate is a SchemaAttributeType enum value + SchemaAttributeTypeDate = "DATE" + + // SchemaAttributeTypeString is a SchemaAttributeType enum value + SchemaAttributeTypeString = "STRING" +) + +// SchemaAttributeType_Values returns all elements of the SchemaAttributeType enum +func SchemaAttributeType_Values() []string { + return []string{ + SchemaAttributeTypeName, + SchemaAttributeTypeNameFirst, + SchemaAttributeTypeNameMiddle, + SchemaAttributeTypeNameLast, + SchemaAttributeTypeAddress, + SchemaAttributeTypeAddressStreet1, + SchemaAttributeTypeAddressStreet2, + SchemaAttributeTypeAddressStreet3, + SchemaAttributeTypeAddressCity, + SchemaAttributeTypeAddressState, + SchemaAttributeTypeAddressCountry, + SchemaAttributeTypeAddressPostalcode, + SchemaAttributeTypePhone, + SchemaAttributeTypePhoneNumber, + SchemaAttributeTypePhoneCountrycode, + SchemaAttributeTypeEmailAddress, + SchemaAttributeTypeUniqueId, + SchemaAttributeTypeDate, + SchemaAttributeTypeString, + } +} diff --git a/service/entityresolution/doc.go b/service/entityresolution/doc.go new file mode 100644 index 00000000000..3e9a9c5435e --- /dev/null +++ b/service/entityresolution/doc.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package entityresolution provides the client and types for making API +// requests to AWS EntityResolution. +// +// Welcome to the AWS Entity Resolution API Reference. +// +// AWS Entity Resolution is an AWS service that provides pre-configured entity +// resolution capabilities that enable developers and analysts at advertising +// and marketing companies to build an accurate and complete view of their consumers. +// +// With AWS Entity Resolution, you have the ability to match source records +// containing consumer identifiers, such as name, email address, and phone number. +// This holds true even when these records have incomplete or conflicting identifiers. +// For example, AWS Entity Resolution can effectively match a source record +// from a customer relationship management (CRM) system, which includes account +// information like first name, last name, postal address, phone number, and +// email address, with a source record from a marketing system containing campaign +// information, such as username and email address. +// +// To learn more about AWS Entity Resolution concepts, procedures, and best +// practices, see the AWS Entity Resolution User Guide (https://docs.aws.amazon.com/entityresolution/latest/userguide/what-is-service.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/entityresolution-2018-05-10 for more information on this service. +// +// See entityresolution package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/entityresolution/ +// +// # Using the Client +// +// To contact AWS EntityResolution with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS EntityResolution client EntityResolution for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/entityresolution/#New +package entityresolution diff --git a/service/entityresolution/entityresolutioniface/interface.go b/service/entityresolution/entityresolutioniface/interface.go new file mode 100644 index 00000000000..e9573d0469b --- /dev/null +++ b/service/entityresolution/entityresolutioniface/interface.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package entityresolutioniface provides an interface to enable mocking the AWS EntityResolution service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package entityresolutioniface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/entityresolution" +) + +// EntityResolutionAPI provides an interface to enable mocking the +// entityresolution.EntityResolution service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS EntityResolution. +// func myFunc(svc entityresolutioniface.EntityResolutionAPI) bool { +// // Make svc.CreateMatchingWorkflow request +// } +// +// func main() { +// sess := session.New() +// svc := entityresolution.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockEntityResolutionClient struct { +// entityresolutioniface.EntityResolutionAPI +// } +// func (m *mockEntityResolutionClient) CreateMatchingWorkflow(input *entityresolution.CreateMatchingWorkflowInput) (*entityresolution.CreateMatchingWorkflowOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockEntityResolutionClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type EntityResolutionAPI interface { + CreateMatchingWorkflow(*entityresolution.CreateMatchingWorkflowInput) (*entityresolution.CreateMatchingWorkflowOutput, error) + CreateMatchingWorkflowWithContext(aws.Context, *entityresolution.CreateMatchingWorkflowInput, ...request.Option) (*entityresolution.CreateMatchingWorkflowOutput, error) + CreateMatchingWorkflowRequest(*entityresolution.CreateMatchingWorkflowInput) (*request.Request, *entityresolution.CreateMatchingWorkflowOutput) + + CreateSchemaMapping(*entityresolution.CreateSchemaMappingInput) (*entityresolution.CreateSchemaMappingOutput, error) + CreateSchemaMappingWithContext(aws.Context, *entityresolution.CreateSchemaMappingInput, ...request.Option) (*entityresolution.CreateSchemaMappingOutput, error) + CreateSchemaMappingRequest(*entityresolution.CreateSchemaMappingInput) (*request.Request, *entityresolution.CreateSchemaMappingOutput) + + DeleteMatchingWorkflow(*entityresolution.DeleteMatchingWorkflowInput) (*entityresolution.DeleteMatchingWorkflowOutput, error) + DeleteMatchingWorkflowWithContext(aws.Context, *entityresolution.DeleteMatchingWorkflowInput, ...request.Option) (*entityresolution.DeleteMatchingWorkflowOutput, error) + DeleteMatchingWorkflowRequest(*entityresolution.DeleteMatchingWorkflowInput) (*request.Request, *entityresolution.DeleteMatchingWorkflowOutput) + + DeleteSchemaMapping(*entityresolution.DeleteSchemaMappingInput) (*entityresolution.DeleteSchemaMappingOutput, error) + DeleteSchemaMappingWithContext(aws.Context, *entityresolution.DeleteSchemaMappingInput, ...request.Option) (*entityresolution.DeleteSchemaMappingOutput, error) + DeleteSchemaMappingRequest(*entityresolution.DeleteSchemaMappingInput) (*request.Request, *entityresolution.DeleteSchemaMappingOutput) + + GetMatchId(*entityresolution.GetMatchIdInput) (*entityresolution.GetMatchIdOutput, error) + GetMatchIdWithContext(aws.Context, *entityresolution.GetMatchIdInput, ...request.Option) (*entityresolution.GetMatchIdOutput, error) + GetMatchIdRequest(*entityresolution.GetMatchIdInput) (*request.Request, *entityresolution.GetMatchIdOutput) + + GetMatchingJob(*entityresolution.GetMatchingJobInput) (*entityresolution.GetMatchingJobOutput, error) + GetMatchingJobWithContext(aws.Context, *entityresolution.GetMatchingJobInput, ...request.Option) (*entityresolution.GetMatchingJobOutput, error) + GetMatchingJobRequest(*entityresolution.GetMatchingJobInput) (*request.Request, *entityresolution.GetMatchingJobOutput) + + GetMatchingWorkflow(*entityresolution.GetMatchingWorkflowInput) (*entityresolution.GetMatchingWorkflowOutput, error) + GetMatchingWorkflowWithContext(aws.Context, *entityresolution.GetMatchingWorkflowInput, ...request.Option) (*entityresolution.GetMatchingWorkflowOutput, error) + GetMatchingWorkflowRequest(*entityresolution.GetMatchingWorkflowInput) (*request.Request, *entityresolution.GetMatchingWorkflowOutput) + + GetSchemaMapping(*entityresolution.GetSchemaMappingInput) (*entityresolution.GetSchemaMappingOutput, error) + GetSchemaMappingWithContext(aws.Context, *entityresolution.GetSchemaMappingInput, ...request.Option) (*entityresolution.GetSchemaMappingOutput, error) + GetSchemaMappingRequest(*entityresolution.GetSchemaMappingInput) (*request.Request, *entityresolution.GetSchemaMappingOutput) + + ListMatchingJobs(*entityresolution.ListMatchingJobsInput) (*entityresolution.ListMatchingJobsOutput, error) + ListMatchingJobsWithContext(aws.Context, *entityresolution.ListMatchingJobsInput, ...request.Option) (*entityresolution.ListMatchingJobsOutput, error) + ListMatchingJobsRequest(*entityresolution.ListMatchingJobsInput) (*request.Request, *entityresolution.ListMatchingJobsOutput) + + ListMatchingJobsPages(*entityresolution.ListMatchingJobsInput, func(*entityresolution.ListMatchingJobsOutput, bool) bool) error + ListMatchingJobsPagesWithContext(aws.Context, *entityresolution.ListMatchingJobsInput, func(*entityresolution.ListMatchingJobsOutput, bool) bool, ...request.Option) error + + ListMatchingWorkflows(*entityresolution.ListMatchingWorkflowsInput) (*entityresolution.ListMatchingWorkflowsOutput, error) + ListMatchingWorkflowsWithContext(aws.Context, *entityresolution.ListMatchingWorkflowsInput, ...request.Option) (*entityresolution.ListMatchingWorkflowsOutput, error) + ListMatchingWorkflowsRequest(*entityresolution.ListMatchingWorkflowsInput) (*request.Request, *entityresolution.ListMatchingWorkflowsOutput) + + ListMatchingWorkflowsPages(*entityresolution.ListMatchingWorkflowsInput, func(*entityresolution.ListMatchingWorkflowsOutput, bool) bool) error + ListMatchingWorkflowsPagesWithContext(aws.Context, *entityresolution.ListMatchingWorkflowsInput, func(*entityresolution.ListMatchingWorkflowsOutput, bool) bool, ...request.Option) error + + ListSchemaMappings(*entityresolution.ListSchemaMappingsInput) (*entityresolution.ListSchemaMappingsOutput, error) + ListSchemaMappingsWithContext(aws.Context, *entityresolution.ListSchemaMappingsInput, ...request.Option) (*entityresolution.ListSchemaMappingsOutput, error) + ListSchemaMappingsRequest(*entityresolution.ListSchemaMappingsInput) (*request.Request, *entityresolution.ListSchemaMappingsOutput) + + ListSchemaMappingsPages(*entityresolution.ListSchemaMappingsInput, func(*entityresolution.ListSchemaMappingsOutput, bool) bool) error + ListSchemaMappingsPagesWithContext(aws.Context, *entityresolution.ListSchemaMappingsInput, func(*entityresolution.ListSchemaMappingsOutput, bool) bool, ...request.Option) error + + ListTagsForResource(*entityresolution.ListTagsForResourceInput) (*entityresolution.ListTagsForResourceOutput, error) + ListTagsForResourceWithContext(aws.Context, *entityresolution.ListTagsForResourceInput, ...request.Option) (*entityresolution.ListTagsForResourceOutput, error) + ListTagsForResourceRequest(*entityresolution.ListTagsForResourceInput) (*request.Request, *entityresolution.ListTagsForResourceOutput) + + StartMatchingJob(*entityresolution.StartMatchingJobInput) (*entityresolution.StartMatchingJobOutput, error) + StartMatchingJobWithContext(aws.Context, *entityresolution.StartMatchingJobInput, ...request.Option) (*entityresolution.StartMatchingJobOutput, error) + StartMatchingJobRequest(*entityresolution.StartMatchingJobInput) (*request.Request, *entityresolution.StartMatchingJobOutput) + + TagResource(*entityresolution.TagResourceInput) (*entityresolution.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *entityresolution.TagResourceInput, ...request.Option) (*entityresolution.TagResourceOutput, error) + TagResourceRequest(*entityresolution.TagResourceInput) (*request.Request, *entityresolution.TagResourceOutput) + + UntagResource(*entityresolution.UntagResourceInput) (*entityresolution.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *entityresolution.UntagResourceInput, ...request.Option) (*entityresolution.UntagResourceOutput, error) + UntagResourceRequest(*entityresolution.UntagResourceInput) (*request.Request, *entityresolution.UntagResourceOutput) + + UpdateMatchingWorkflow(*entityresolution.UpdateMatchingWorkflowInput) (*entityresolution.UpdateMatchingWorkflowOutput, error) + UpdateMatchingWorkflowWithContext(aws.Context, *entityresolution.UpdateMatchingWorkflowInput, ...request.Option) (*entityresolution.UpdateMatchingWorkflowOutput, error) + UpdateMatchingWorkflowRequest(*entityresolution.UpdateMatchingWorkflowInput) (*request.Request, *entityresolution.UpdateMatchingWorkflowOutput) +} + +var _ EntityResolutionAPI = (*entityresolution.EntityResolution)(nil) diff --git a/service/entityresolution/errors.go b/service/entityresolution/errors.go new file mode 100644 index 00000000000..b2f1fe07866 --- /dev/null +++ b/service/entityresolution/errors.go @@ -0,0 +1,69 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package entityresolution + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. HTTP Status Code: + // 403 + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // The request could not be processed because of conflict in the current state + // of the resource. Example: Workflow already exists, Schema already exists, + // Workflow is currently running, etc. HTTP Status Code: 400 + ErrCodeConflictException = "ConflictException" + + // ErrCodeExceedsLimitException for service response error code + // "ExceedsLimitException". + // + // The request was rejected because it attempted to create resources beyond + // the current AWS Entity Resolution account limits. The error message describes + // the limit exceeded. HTTP Status Code: 402 + ErrCodeExceedsLimitException = "ExceedsLimitException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // This exception occurs when there is an internal failure in the AWS Entity + // Resolution service. HTTP Status Code: 500 + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource could not be found. HTTP Status Code: 404 + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request was denied due to request throttling. HTTP Status Code: 429 + ErrCodeThrottlingException = "ThrottlingException" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // The input fails to satisfy the constraints specified by AWS Entity Resolution. + // HTTP Status Code: 400 + ErrCodeValidationException = "ValidationException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "ConflictException": newErrorConflictException, + "ExceedsLimitException": newErrorExceedsLimitException, + "InternalServerException": newErrorInternalServerException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ThrottlingException": newErrorThrottlingException, + "ValidationException": newErrorValidationException, +} diff --git a/service/entityresolution/service.go b/service/entityresolution/service.go new file mode 100644 index 00000000000..1b31b2c8138 --- /dev/null +++ b/service/entityresolution/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package entityresolution + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// EntityResolution provides the API operation methods for making requests to +// AWS EntityResolution. See this package's package overview docs +// for details on the service. +// +// EntityResolution methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type EntityResolution struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "EntityResolution" // Name of service. + EndpointsID = "entityresolution" // ID to lookup a service endpoint with. + ServiceID = "EntityResolution" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the EntityResolution client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a EntityResolution client from just a session. +// svc := entityresolution.New(mySession) +// +// // Create a EntityResolution client with additional configuration +// svc := entityresolution.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EntityResolution { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "entityresolution" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *EntityResolution { + svc := &EntityResolution{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2018-05-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EntityResolution operation and runs any +// custom request initialization. +func (c *EntityResolution) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/service/glue/api.go b/service/glue/api.go index ff259a66f61..6ca868f88e6 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -21513,7 +21513,7 @@ func (s *AmazonRedshiftSource) SetName(v string) *AmazonRedshiftSource { type AmazonRedshiftTarget struct { _ struct{} `type:"structure"` - // Specifies the data of the Amazon Reshift target node. + // Specifies the data of the Amazon Redshift target node. Data *AmazonRedshiftNodeData `type:"structure"` // The nodes that are inputs to the data target. @@ -25921,6 +25921,12 @@ type CodeGenConfigurationNode struct { // DynamicFrames. The output is the selected DynamicFrame SelectFromCollection *SelectFromCollection `type:"structure"` + // Specifies a Snowflake data source. + SnowflakeSource *SnowflakeSource `type:"structure"` + + // Specifies a target that writes to a Snowflake data source. + SnowflakeTarget *SnowflakeTarget `type:"structure"` + // Specifies a connector to an Apache Spark data source. SparkConnectorSource *SparkConnectorSource `type:"structure"` @@ -26265,6 +26271,16 @@ func (s *CodeGenConfigurationNode) Validate() error { invalidParams.AddNested("SelectFromCollection", err.(request.ErrInvalidParams)) } } + if s.SnowflakeSource != nil { + if err := s.SnowflakeSource.Validate(); err != nil { + invalidParams.AddNested("SnowflakeSource", err.(request.ErrInvalidParams)) + } + } + if s.SnowflakeTarget != nil { + if err := s.SnowflakeTarget.Validate(); err != nil { + invalidParams.AddNested("SnowflakeTarget", err.(request.ErrInvalidParams)) + } + } if s.SparkConnectorSource != nil { if err := s.SparkConnectorSource.Validate(); err != nil { invalidParams.AddNested("SparkConnectorSource", err.(request.ErrInvalidParams)) @@ -26668,6 +26684,18 @@ func (s *CodeGenConfigurationNode) SetSelectFromCollection(v *SelectFromCollecti return s } +// SetSnowflakeSource sets the SnowflakeSource field's value. +func (s *CodeGenConfigurationNode) SetSnowflakeSource(v *SnowflakeSource) *CodeGenConfigurationNode { + s.SnowflakeSource = v + return s +} + +// SetSnowflakeTarget sets the SnowflakeTarget field's value. +func (s *CodeGenConfigurationNode) SetSnowflakeTarget(v *SnowflakeTarget) *CodeGenConfigurationNode { + s.SnowflakeTarget = v + return s +} + // SetSparkConnectorSource sets the SparkConnectorSource field's value. func (s *CodeGenConfigurationNode) SetSparkConnectorSource(v *SparkConnectorSource) *CodeGenConfigurationNode { s.SparkConnectorSource = v @@ -64138,6 +64166,386 @@ func (s *SkewedInfo) SetSkewedColumnValues(v []*string) *SkewedInfo { return s } +// Specifies configuration for Snowflake nodes in Glue Studio. +type SnowflakeNodeData struct { + _ struct{} `type:"structure"` + + // Specifies what action to take when writing to a table with preexisting data. + // Valid values: append, merge, truncate, drop. + Action *string `type:"string"` + + // Specifies additional options passed to the Snowflake connector. If options + // are specified elsewhere in this node, this will take precedence. + AdditionalOptions map[string]*string `type:"map"` + + // Specifies whether automatic query pushdown is enabled. If pushdown is enabled, + // then when a query is run on Spark, if part of the query can be "pushed down" + // to the Snowflake server, it is pushed down. This improves performance of + // some queries. + AutoPushdown *bool `type:"boolean"` + + // Specifies a Glue Data Catalog Connection to a Snowflake endpoint. + Connection *Option `type:"structure"` + + // Specifies a Snowflake database for your node to use. + Database *string `type:"string"` + + // Not currently used. + IamRole *Option `type:"structure"` + + // Specifies a merge action. Valid values: simple, custom. If simple, merge + // behavior is defined by MergeWhenMatched and MergeWhenNotMatched. If custom, + // defined by MergeClause. + MergeAction *string `type:"string"` + + // A SQL statement that specifies a custom merge behavior. + MergeClause *string `type:"string"` + + // Specifies how to resolve records that match preexisting data when merging. + // Valid values: update, delete. + MergeWhenMatched *string `type:"string"` + + // Specifies how to process records that do not match preexisting data when + // merging. Valid values: insert, none. + MergeWhenNotMatched *string `type:"string"` + + // A SQL string run after the Snowflake connector performs its standard actions. + PostAction *string `type:"string"` + + // A SQL string run before the Snowflake connector performs its standard actions. + PreAction *string `type:"string"` + + // A SQL string used to retrieve data with the query sourcetype. + SampleQuery *string `type:"string"` + + // Specifies a Snowflake database schema for your node to use. + Schema *string `type:"string"` + + // Specifies the columns combined to identify a record when detecting matches + // for merges and upserts. A list of structures with value, label and description + // keys. Each structure describes a column. + SelectedColumns []*Option `type:"list"` + + // Specifies how retrieved data is specified. Valid values: "table", "query". + SourceType *string `type:"string"` + + // The name of a staging table used when performing merge or upsert append actions. + // Data is written to this table, then moved to table by a generated postaction. + StagingTable *string `type:"string"` + + // Specifies a Snowflake table for your node to use. + Table *string `type:"string"` + + // Manually defines the target schema for the node. A list of structures with + // value , label and description keys. Each structure defines a column. + TableSchema []*Option `type:"list"` + + // Not currently used. + TempDir *string `type:"string"` + + // Used when Action is append. Specifies the resolution behavior when a row + // already exists. If true, preexisting rows will be updated. If false, those + // rows will be inserted. + Upsert *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SnowflakeNodeData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SnowflakeNodeData) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *SnowflakeNodeData) SetAction(v string) *SnowflakeNodeData { + s.Action = &v + return s +} + +// SetAdditionalOptions sets the AdditionalOptions field's value. +func (s *SnowflakeNodeData) SetAdditionalOptions(v map[string]*string) *SnowflakeNodeData { + s.AdditionalOptions = v + return s +} + +// SetAutoPushdown sets the AutoPushdown field's value. +func (s *SnowflakeNodeData) SetAutoPushdown(v bool) *SnowflakeNodeData { + s.AutoPushdown = &v + return s +} + +// SetConnection sets the Connection field's value. +func (s *SnowflakeNodeData) SetConnection(v *Option) *SnowflakeNodeData { + s.Connection = v + return s +} + +// SetDatabase sets the Database field's value. +func (s *SnowflakeNodeData) SetDatabase(v string) *SnowflakeNodeData { + s.Database = &v + return s +} + +// SetIamRole sets the IamRole field's value. +func (s *SnowflakeNodeData) SetIamRole(v *Option) *SnowflakeNodeData { + s.IamRole = v + return s +} + +// SetMergeAction sets the MergeAction field's value. +func (s *SnowflakeNodeData) SetMergeAction(v string) *SnowflakeNodeData { + s.MergeAction = &v + return s +} + +// SetMergeClause sets the MergeClause field's value. +func (s *SnowflakeNodeData) SetMergeClause(v string) *SnowflakeNodeData { + s.MergeClause = &v + return s +} + +// SetMergeWhenMatched sets the MergeWhenMatched field's value. +func (s *SnowflakeNodeData) SetMergeWhenMatched(v string) *SnowflakeNodeData { + s.MergeWhenMatched = &v + return s +} + +// SetMergeWhenNotMatched sets the MergeWhenNotMatched field's value. +func (s *SnowflakeNodeData) SetMergeWhenNotMatched(v string) *SnowflakeNodeData { + s.MergeWhenNotMatched = &v + return s +} + +// SetPostAction sets the PostAction field's value. +func (s *SnowflakeNodeData) SetPostAction(v string) *SnowflakeNodeData { + s.PostAction = &v + return s +} + +// SetPreAction sets the PreAction field's value. +func (s *SnowflakeNodeData) SetPreAction(v string) *SnowflakeNodeData { + s.PreAction = &v + return s +} + +// SetSampleQuery sets the SampleQuery field's value. +func (s *SnowflakeNodeData) SetSampleQuery(v string) *SnowflakeNodeData { + s.SampleQuery = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *SnowflakeNodeData) SetSchema(v string) *SnowflakeNodeData { + s.Schema = &v + return s +} + +// SetSelectedColumns sets the SelectedColumns field's value. +func (s *SnowflakeNodeData) SetSelectedColumns(v []*Option) *SnowflakeNodeData { + s.SelectedColumns = v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *SnowflakeNodeData) SetSourceType(v string) *SnowflakeNodeData { + s.SourceType = &v + return s +} + +// SetStagingTable sets the StagingTable field's value. +func (s *SnowflakeNodeData) SetStagingTable(v string) *SnowflakeNodeData { + s.StagingTable = &v + return s +} + +// SetTable sets the Table field's value. +func (s *SnowflakeNodeData) SetTable(v string) *SnowflakeNodeData { + s.Table = &v + return s +} + +// SetTableSchema sets the TableSchema field's value. +func (s *SnowflakeNodeData) SetTableSchema(v []*Option) *SnowflakeNodeData { + s.TableSchema = v + return s +} + +// SetTempDir sets the TempDir field's value. +func (s *SnowflakeNodeData) SetTempDir(v string) *SnowflakeNodeData { + s.TempDir = &v + return s +} + +// SetUpsert sets the Upsert field's value. +func (s *SnowflakeNodeData) SetUpsert(v bool) *SnowflakeNodeData { + s.Upsert = &v + return s +} + +// Specifies a Snowflake data source. +type SnowflakeSource struct { + _ struct{} `type:"structure"` + + // Configuration for the Snowflake data source. + // + // Data is a required field + Data *SnowflakeNodeData `type:"structure" required:"true"` + + // The name of the Snowflake data source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specifies user-defined schemas for your output data. + OutputSchemas []*GlueSchema `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SnowflakeSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SnowflakeSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SnowflakeSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SnowflakeSource"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.OutputSchemas != nil { + for i, v := range s.OutputSchemas { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputSchemas", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetData sets the Data field's value. +func (s *SnowflakeSource) SetData(v *SnowflakeNodeData) *SnowflakeSource { + s.Data = v + return s +} + +// SetName sets the Name field's value. +func (s *SnowflakeSource) SetName(v string) *SnowflakeSource { + s.Name = &v + return s +} + +// SetOutputSchemas sets the OutputSchemas field's value. +func (s *SnowflakeSource) SetOutputSchemas(v []*GlueSchema) *SnowflakeSource { + s.OutputSchemas = v + return s +} + +// Specifies a Snowflake target. +type SnowflakeTarget struct { + _ struct{} `type:"structure"` + + // Specifies the data of the Snowflake target node. + // + // Data is a required field + Data *SnowflakeNodeData `type:"structure" required:"true"` + + // The nodes that are inputs to the data target. + Inputs []*string `min:"1" type:"list"` + + // The name of the Snowflake target. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SnowflakeTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SnowflakeTarget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SnowflakeTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SnowflakeTarget"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + if s.Inputs != nil && len(s.Inputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Inputs", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetData sets the Data field's value. +func (s *SnowflakeTarget) SetData(v *SnowflakeNodeData) *SnowflakeTarget { + s.Data = v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *SnowflakeTarget) SetInputs(v []*string) *SnowflakeTarget { + s.Inputs = v + return s +} + +// SetName sets the Name field's value. +func (s *SnowflakeTarget) SetName(v string) *SnowflakeTarget { + s.Name = &v + return s +} + // Specifies a field to sort by and a sort order. type SortCriterion struct { _ struct{} `type:"structure"` diff --git a/service/healthlake/api.go b/service/healthlake/api.go index 073df0c4f3d..8d8b374c002 100644 --- a/service/healthlake/api.go +++ b/service/healthlake/api.go @@ -56,7 +56,7 @@ func (c *HealthLake) CreateFHIRDatastoreRequest(input *CreateFHIRDatastoreInput) // CreateFHIRDatastore API operation for Amazon HealthLake. // -// Creates a Data Store that can ingest and export FHIR formatted data. +// Creates a data store that can ingest and export FHIR formatted data. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -145,7 +145,7 @@ func (c *HealthLake) DeleteFHIRDatastoreRequest(input *DeleteFHIRDatastoreInput) // DeleteFHIRDatastore API operation for Amazon HealthLake. // -// Deletes a Data Store. +// Deletes a data store. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -160,14 +160,14 @@ func (c *HealthLake) DeleteFHIRDatastoreRequest(input *DeleteFHIRDatastoreInput) // Access is denied. Your account is not authorized to perform this operation. // // - ConflictException -// The Data Store is in a transition state and the user requested action can +// The data store is in a transition state and the user requested action can // not be performed. // // - ValidationException // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // - ThrottlingException // The user has exceeded their maximum number of allowed calls to the given @@ -241,9 +241,9 @@ func (c *HealthLake) DescribeFHIRDatastoreRequest(input *DescribeFHIRDatastoreIn // DescribeFHIRDatastore API operation for Amazon HealthLake. // -// Gets the properties associated with the FHIR Data Store, including the Data -// Store ID, Data Store ARN, Data Store name, Data Store status, created at, -// Data Store type version, and Data Store endpoint. +// Gets the properties associated with the FHIR data store, including the data +// store ID, data store ARN, data store name, data store status, when the data +// store was created, data store type version, and the data store's endpoint. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -258,7 +258,7 @@ func (c *HealthLake) DescribeFHIRDatastoreRequest(input *DescribeFHIRDatastoreIn // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // - ThrottlingException // The user has exceeded their maximum number of allowed calls to the given @@ -348,7 +348,7 @@ func (c *HealthLake) DescribeFHIRExportJobRequest(input *DescribeFHIRExportJobIn // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // - ThrottlingException // The user has exceeded their maximum number of allowed calls to the given @@ -438,7 +438,7 @@ func (c *HealthLake) DescribeFHIRImportJobRequest(input *DescribeFHIRImportJobIn // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // - ThrottlingException // The user has exceeded their maximum number of allowed calls to the given @@ -518,8 +518,8 @@ func (c *HealthLake) ListFHIRDatastoresRequest(input *ListFHIRDatastoresInput) ( // ListFHIRDatastores API operation for Amazon HealthLake. // -// Lists all FHIR Data Stores that are in the user’s account, regardless of -// Data Store status. +// Lists all FHIR data stores that are in the user’s account, regardless of +// data store status. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -677,7 +677,7 @@ func (c *HealthLake) ListFHIRExportJobsRequest(input *ListFHIRExportJobsInput) ( // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // - AccessDeniedException // Access is denied. Your account is not authorized to perform this operation. @@ -826,7 +826,7 @@ func (c *HealthLake) ListFHIRImportJobsRequest(input *ListFHIRImportJobsInput) ( // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // - AccessDeniedException // Access is denied. Your account is not authorized to perform this operation. @@ -954,7 +954,7 @@ func (c *HealthLake) ListTagsForResourceRequest(input *ListTagsForResourceInput) // ListTagsForResource API operation for Amazon HealthLake. // -// Returns a list of all existing tags associated with a Data Store. +// Returns a list of all existing tags associated with a data store. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -969,7 +969,7 @@ func (c *HealthLake) ListTagsForResourceRequest(input *ListTagsForResourceInput) // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/ListTagsForResource func (c *HealthLake) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { @@ -1058,7 +1058,7 @@ func (c *HealthLake) StartFHIRExportJobRequest(input *StartFHIRExportJobInput) ( // Access is denied. Your account is not authorized to perform this operation. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // - InternalServerException // Unknown error occurs in the service. @@ -1150,7 +1150,7 @@ func (c *HealthLake) StartFHIRImportJobRequest(input *StartFHIRImportJobInput) ( // Access is denied. Your account is not authorized to perform this operation. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // - InternalServerException // Unknown error occurs in the service. @@ -1221,7 +1221,7 @@ func (c *HealthLake) TagResourceRequest(input *TagResourceInput) (req *request.R // TagResource API operation for Amazon HealthLake. // -// Adds a user specified key and value tag to a Data Store. +// Adds a user specified key and value tag to a data store. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1236,7 +1236,7 @@ func (c *HealthLake) TagResourceRequest(input *TagResourceInput) (req *request.R // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/TagResource func (c *HealthLake) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -1304,7 +1304,7 @@ func (c *HealthLake) UntagResourceRequest(input *UntagResourceInput) (req *reque // UntagResource API operation for Amazon HealthLake. // -// Removes tags from a Data Store. +// Removes tags from a data store. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1319,7 +1319,7 @@ func (c *HealthLake) UntagResourceRequest(input *UntagResourceInput) (req *reque // The user input parameter was invalid. // // - ResourceNotFoundException -// The requested Data Store was not found. +// The requested data store was not found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/healthlake-2017-07-01/UntagResource func (c *HealthLake) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { @@ -1407,7 +1407,7 @@ func (s *AccessDeniedException) RequestID() string { return s.RespMetadata.RequestID } -// The Data Store is in a transition state and the user requested action can +// The data store is in a transition state and the user requested action can // not be performed. type ConflictException struct { _ struct{} `type:"structure"` @@ -1478,27 +1478,27 @@ type CreateFHIRDatastoreInput struct { // Optional user provided token used for ensuring idempotency. ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` - // The user generated name for the Data Store. + // The user generated name for the data store. DatastoreName *string `min:"1" type:"string"` - // The FHIR version of the Data Store. The only supported version is R4. + // The FHIR version of the data store. The only supported version is R4. // // DatastoreTypeVersion is a required field DatastoreTypeVersion *string `type:"string" required:"true" enum:"FHIRVersion"` // The configuration of the identity provider that you want to use for your - // Data Store. + // data store. IdentityProviderConfiguration *IdentityProviderConfiguration `type:"structure"` - // Optional parameter to preload data upon creation of the Data Store. Currently, + // Optional parameter to preload data upon creation of the data store. Currently, // the only supported preloaded data is synthetic data generated from Synthea. PreloadDataConfig *PreloadDataConfig `type:"structure"` // The server-side encryption key configuration for a customer provided encryption - // key specified for creating a Data Store. + // key specified for creating a data store. SseConfiguration *SseConfiguration `type:"structure"` - // Resource tags that are applied to a Data Store when it is created. + // Resource tags that are applied to a data store when it is created. Tags []*Tag `type:"list"` } @@ -1609,25 +1609,24 @@ func (s *CreateFHIRDatastoreInput) SetTags(v []*Tag) *CreateFHIRDatastoreInput { type CreateFHIRDatastoreOutput struct { _ struct{} `type:"structure"` - // The Data Store ARN is generated during the creation of the Data Store and - // can be found in the output from the initial Data Store creation call. + // The data store ARN is generated during the creation of the data store and + // can be found in the output from the initial data store creation call. // // DatastoreArn is a required field DatastoreArn *string `type:"string" required:"true"` - // The AWS endpoint for the created Data Store. + // The AWS endpoint for the created data store. // // DatastoreEndpoint is a required field DatastoreEndpoint *string `min:"1" type:"string" required:"true"` - // The AWS-generated Data Store id. This id is in the output from the initial - // Data Store creation call. + // The AWS-generated data store id. This id is in the output from the initial + // data store creation call. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` - // The status of the FHIR Data Store. Possible statuses are ‘CREATING’, - // ‘ACTIVE’, ‘DELETING’, ‘DELETED’. + // The status of the FHIR data store. // // DatastoreStatus is a required field DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` @@ -1675,22 +1674,22 @@ func (s *CreateFHIRDatastoreOutput) SetDatastoreStatus(v string) *CreateFHIRData return s } -// The filters applied to Data Store query. +// The filters applied to data store query. type DatastoreFilter struct { _ struct{} `type:"structure"` - // A filter that allows the user to set cutoff dates for records. All Data Stores + // A filter that allows the user to set cutoff dates for records. All data stores // created after the specified date will be included in the results. CreatedAfter *time.Time `type:"timestamp"` - // A filter that allows the user to set cutoff dates for records. All Data Stores + // A filter that allows the user to set cutoff dates for records. All data stores // created before the specified date will be included in the results. CreatedBefore *time.Time `type:"timestamp"` - // Allows the user to filter Data Store results by name. + // Allows the user to filter data store results by name. DatastoreName *string `min:"1" type:"string"` - // Allows the user to filter Data Store results by status. + // Allows the user to filter data store results by status. DatastoreStatus *string `type:"string" enum:"DatastoreStatus"` } @@ -1749,35 +1748,34 @@ func (s *DatastoreFilter) SetDatastoreStatus(v string) *DatastoreFilter { return s } -// Displays the properties of the Data Store, including the ID, ARN, name, and -// the status of the Data Store. +// Displays the properties of the data store, including the ID, ARN, name, and +// the status of the data store. type DatastoreProperties struct { _ struct{} `type:"structure"` - // The time that a Data Store was created. + // The time that a data store was created. CreatedAt *time.Time `type:"timestamp"` - // The Amazon Resource Name used in the creation of the Data Store. + // The Amazon Resource Name used in the creation of the data store. // // DatastoreArn is a required field DatastoreArn *string `type:"string" required:"true"` - // The AWS endpoint for the Data Store. Each Data Store will have it's own endpoint - // with Data Store ID in the endpoint URL. + // The AWS endpoint for the data store. Each data store will have it's own endpoint + // with data store ID in the endpoint URL. // // DatastoreEndpoint is a required field DatastoreEndpoint *string `type:"string" required:"true"` - // The AWS-generated ID number for the Data Store. + // The AWS-generated ID number for the data store. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` - // The user-generated name for the Data Store. + // The user-generated name for the data store. DatastoreName *string `min:"1" type:"string"` - // The status of the Data Store. Possible statuses are 'CREATING', 'ACTIVE', - // 'DELETING', or 'DELETED'. + // The status of the data store. // // DatastoreStatus is a required field DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` @@ -1787,10 +1785,10 @@ type DatastoreProperties struct { // DatastoreTypeVersion is a required field DatastoreTypeVersion *string `type:"string" required:"true" enum:"FHIRVersion"` - // The identity provider that you selected when you created the Data Store. + // The identity provider that you selected when you created the data store. IdentityProviderConfiguration *IdentityProviderConfiguration `type:"structure"` - // The preloaded data configuration for the Data Store. Only data preloaded + // The preloaded data configuration for the data store. Only data preloaded // from Synthea is supported. PreloadDataConfig *PreloadDataConfig `type:"structure"` @@ -1880,7 +1878,7 @@ func (s *DatastoreProperties) SetSseConfiguration(v *SseConfiguration) *Datastor type DeleteFHIRDatastoreInput struct { _ struct{} `type:"structure"` - // The AWS-generated ID for the Data Store to be deleted. + // The AWS-generated ID for the data store to be deleted. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` @@ -1929,22 +1927,22 @@ func (s *DeleteFHIRDatastoreInput) SetDatastoreId(v string) *DeleteFHIRDatastore type DeleteFHIRDatastoreOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission. + // The Amazon Resource Name (ARN) that gives AWS HealthLake access permission. // // DatastoreArn is a required field DatastoreArn *string `type:"string" required:"true"` - // The AWS endpoint for the Data Store the user has requested to be deleted. + // The AWS endpoint for the data store the user has requested to be deleted. // // DatastoreEndpoint is a required field DatastoreEndpoint *string `min:"1" type:"string" required:"true"` - // The AWS-generated ID for the Data Store to be deleted. + // The AWS-generated ID for the data store to be deleted. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` - // The status of the Data Store that the user has requested to be deleted. + // The status of the data store that the user has requested to be deleted. // // DatastoreStatus is a required field DatastoreStatus *string `type:"string" required:"true" enum:"DatastoreStatus"` @@ -1995,7 +1993,7 @@ func (s *DeleteFHIRDatastoreOutput) SetDatastoreStatus(v string) *DeleteFHIRData type DescribeFHIRDatastoreInput struct { _ struct{} `type:"structure"` - // The AWS-generated Data Store ID. + // The AWS-generated data store ID. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` @@ -2044,9 +2042,9 @@ func (s *DescribeFHIRDatastoreInput) SetDatastoreId(v string) *DescribeFHIRDatas type DescribeFHIRDatastoreOutput struct { _ struct{} `type:"structure"` - // All properties associated with a Data Store, including the Data Store ID, - // Data Store ARN, Data Store name, Data Store status, created at, Data Store - // type version, and Data Store endpoint. + // All properties associated with a data store, including the data store ID, + // data store ARN, data store name, data store status, when the data store was + // created, data store type version, and the data store's endpoint. // // DatastoreProperties is a required field DatastoreProperties *DatastoreProperties `type:"structure" required:"true"` @@ -2079,7 +2077,7 @@ func (s *DescribeFHIRDatastoreOutput) SetDatastoreProperties(v *DatastorePropert type DescribeFHIRExportJobInput struct { _ struct{} `type:"structure"` - // The AWS generated ID for the Data Store from which files are being exported + // The AWS generated ID for the data store from which files are being exported // from for an export job. // // DatastoreId is a required field @@ -2180,7 +2178,7 @@ func (s *DescribeFHIRExportJobOutput) SetExportJobProperties(v *ExportJobPropert type DescribeFHIRImportJobInput struct { _ struct{} `type:"structure"` - // The AWS-generated ID of the Data Store. + // The AWS-generated ID of the data store. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` @@ -2285,7 +2283,7 @@ type ExportJobProperties struct { // The Amazon Resource Name used during the initiation of the job. DataAccessRoleArn *string `min:"20" type:"string"` - // The AWS generated ID for the Data Store from which files are being exported + // The AWS generated ID for the data store from which files are being exported // for an export job. // // DatastoreId is a required field @@ -2394,17 +2392,17 @@ func (s *ExportJobProperties) SetSubmitTime(v time.Time) *ExportJobProperties { return s } -// The identity provider configuration that you gave when the Data Store was +// The identity provider configuration that you gave when the data store was // created. type IdentityProviderConfiguration struct { _ struct{} `type:"structure"` - // The authorization strategy that you selected when you created the Data Store. + // The authorization strategy that you selected when you created the data store. // // AuthorizationStrategy is a required field AuthorizationStrategy *string `type:"string" required:"true" enum:"AuthorizationStrategy"` - // If you enabled fine-grained authorization when you created the Data Store. + // If you enabled fine-grained authorization when you created the data store. FineGrainedAuthorizationEnabled *bool `type:"boolean"` // The Amazon Resource Name (ARN) of the Lambda function that you want to use @@ -2493,12 +2491,12 @@ func (s *IdentityProviderConfiguration) SetMetadata(v string) *IdentityProviderC } // Displays the properties of the import job, including the ID, Arn, Name, and -// the status of the Data Store. +// the status of the data store. type ImportJobProperties struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your - // input data. + // The Amazon Resource Name (ARN) that gives AWS HealthLake access to your input + // data. DataAccessRoleArn *string `min:"20" type:"string"` // The datastore id used when the Import job was created. @@ -2624,7 +2622,7 @@ type InputDataConfig struct { _ struct{} `type:"structure"` // The S3Uri is the user specified S3 location of the FHIR data to be imported - // into Amazon HealthLake. + // into AWS HealthLake. S3Uri *string `type:"string"` } @@ -2716,18 +2714,18 @@ func (s *InternalServerException) RequestID() string { return s.RespMetadata.RequestID } -// The customer-managed-key(CMK) used when creating a Data Store. If a customer +// The customer-managed-key(CMK) used when creating a data store. If a customer // owned key is not specified, an AWS owned key will be used for encryption. type KmsEncryptionConfig struct { _ struct{} `type:"structure"` - // The type of customer-managed-key(CMK) used for encyrption. The two types + // The type of customer-managed-key(CMK) used for encryption. The two types // of supported CMKs are customer owned CMKs and AWS owned CMKs. // // CmkType is a required field CmkType *string `type:"string" required:"true" enum:"CmkType"` - // The KMS encryption key id/alias used to encrypt the Data Store contents at + // The KMS encryption key id/alias used to encrypt the data store contents at // rest. KmsKeyId *string `min:"1" type:"string"` } @@ -2781,14 +2779,14 @@ func (s *KmsEncryptionConfig) SetKmsKeyId(v string) *KmsEncryptionConfig { type ListFHIRDatastoresInput struct { _ struct{} `type:"structure"` - // Lists all filters associated with a FHIR Data Store request. + // Lists all filters associated with a FHIR data store request. Filter *DatastoreFilter `type:"structure"` - // The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest + // The maximum number of data stores returned in a single page of a ListFHIRDatastoresRequest // call. MaxResults *int64 `min:"1" type:"integer"` - // Fetches the next page of Data Stores when results are paginated. + // Fetches the next page of data stores when results are paginated. NextToken *string `type:"string"` } @@ -2849,7 +2847,7 @@ func (s *ListFHIRDatastoresInput) SetNextToken(v string) *ListFHIRDatastoresInpu type ListFHIRDatastoresOutput struct { _ struct{} `type:"structure"` - // All properties associated with the listed Data Stores. + // All properties associated with the listed data stores. // // DatastorePropertiesList is a required field DatastorePropertiesList []*DatastoreProperties `type:"list" required:"true"` @@ -2891,8 +2889,8 @@ func (s *ListFHIRDatastoresOutput) SetNextToken(v string) *ListFHIRDatastoresOut type ListFHIRExportJobsInput struct { _ struct{} `type:"structure"` - // This parameter limits the response to the export job with the specified Data - // Store ID. + // This parameter limits the response to the export job with the specified data + // store ID. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` @@ -3051,8 +3049,8 @@ func (s *ListFHIRExportJobsOutput) SetNextToken(v string) *ListFHIRExportJobsOut type ListFHIRImportJobsInput struct { _ struct{} `type:"structure"` - // This parameter limits the response to the import job with the specified Data - // Store ID. + // This parameter limits the response to the import job with the specified data + // store ID. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` @@ -3211,7 +3209,7 @@ func (s *ListFHIRImportJobsOutput) SetNextToken(v string) *ListFHIRImportJobsOut type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name(ARN) of the Data Store for which tags are being + // The Amazon Resource Name(ARN) of the data store for which tags are being // added. // // ResourceARN is a required field @@ -3261,7 +3259,7 @@ func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResource type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // Returns a list of tags associated with a Data Store. + // Returns a list of tags associated with a data store. Tags []*Tag `type:"list"` } @@ -3336,7 +3334,7 @@ func (s *OutputDataConfig) SetS3Configuration(v *S3Configuration) *OutputDataCon return s } -// The input properties for the preloaded Data Store. Only data preloaded from +// The input properties for the preloaded data store. Only data preloaded from // Synthea is supported. type PreloadDataConfig struct { _ struct{} `type:"structure"` @@ -3384,7 +3382,7 @@ func (s *PreloadDataConfig) SetPreloadDataType(v string) *PreloadDataConfig { return s } -// The requested Data Store was not found. +// The requested data store was not found. type ResourceNotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -3459,7 +3457,7 @@ type S3Configuration struct { KmsKeyId *string `min:"1" type:"string" required:"true"` // The S3Uri is the user specified S3 location of the FHIR data to be imported - // into Amazon HealthLake. + // into AWS HealthLake. // // S3Uri is a required field S3Uri *string `type:"string" required:"true"` @@ -3578,7 +3576,7 @@ type StartFHIRExportJobInput struct { // DataAccessRoleArn is a required field DataAccessRoleArn *string `min:"20" type:"string" required:"true"` - // The AWS generated ID for the Data Store from which files are being exported + // The AWS generated ID for the data store from which files are being exported // for an export job. // // DatastoreId is a required field @@ -3680,7 +3678,7 @@ func (s *StartFHIRExportJobInput) SetOutputDataConfig(v *OutputDataConfig) *Star type StartFHIRExportJobOutput struct { _ struct{} `type:"structure"` - // The AWS generated ID for the Data Store from which files are being exported + // The AWS generated ID for the data store from which files are being exported // for an export job. DatastoreId *string `min:"1" type:"string"` @@ -3738,12 +3736,12 @@ type StartFHIRImportJobInput struct { // Optional user provided token used for ensuring idempotency. ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` - // The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission. + // The Amazon Resource Name (ARN) that gives AWS HealthLake access permission. // // DataAccessRoleArn is a required field DataAccessRoleArn *string `min:"20" type:"string" required:"true"` - // The AWS-generated Data Store ID. + // The AWS-generated data store ID. // // DatastoreId is a required field DatastoreId *string `min:"1" type:"string" required:"true"` @@ -3858,7 +3856,7 @@ func (s *StartFHIRImportJobInput) SetJobOutputDataConfig(v *OutputDataConfig) *S type StartFHIRImportJobOutput struct { _ struct{} `type:"structure"` - // The AWS-generated Data Store ID. + // The AWS-generated data store ID. DatastoreId *string `min:"1" type:"string"` // The AWS-generated job ID. @@ -3976,13 +3974,13 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the Data - // Store which tags are being added to. + // The Amazon Resource Name(ARN)that gives AWS HealthLake access to the data + // store which tags are being added to. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // The user specified key and value pair tags being added to a Data Store. + // The user specified key and value pair tags being added to a data store. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` @@ -4137,13 +4135,13 @@ func (s *ThrottlingException) RequestID() string { type UntagResourceInput struct { _ struct{} `type:"structure"` - // "The Amazon Resource Name(ARN) of the Data Store for which tags are being - // removed + // The Amazon Resource Name(ARN) of the data store for which tags are being + // removed. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // The keys for the tags to be removed from the Healthlake Data Store. + // The keys for the tags to be removed from the HealthLake data store. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` diff --git a/service/healthlake/doc.go b/service/healthlake/doc.go index 3d73533ea6f..0121660d6f1 100644 --- a/service/healthlake/doc.go +++ b/service/healthlake/doc.go @@ -3,7 +3,7 @@ // Package healthlake provides the client and types for making API // requests to Amazon HealthLake. // -// Amazon HealthLake is a HIPAA eligibile service that allows customers to store, +// AWS HealthLake is a HIPAA eligibile service that allows customers to store, // transform, query, and analyze their FHIR-formatted data in a consistent fashion // in the cloud. // diff --git a/service/healthlake/errors.go b/service/healthlake/errors.go index add197b48f6..4d33f9d9466 100644 --- a/service/healthlake/errors.go +++ b/service/healthlake/errors.go @@ -17,7 +17,7 @@ const ( // ErrCodeConflictException for service response error code // "ConflictException". // - // The Data Store is in a transition state and the user requested action can + // The data store is in a transition state and the user requested action can // not be performed. ErrCodeConflictException = "ConflictException" @@ -30,7 +30,7 @@ const ( // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // - // The requested Data Store was not found. + // The requested data store was not found. ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeThrottlingException for service response error code diff --git a/service/managedblockchainquery/api.go b/service/managedblockchainquery/api.go new file mode 100644 index 00000000000..4d2b1059130 --- /dev/null +++ b/service/managedblockchainquery/api.go @@ -0,0 +1,3243 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package managedblockchainquery + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opBatchGetTokenBalance = "BatchGetTokenBalance" + +// BatchGetTokenBalanceRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetTokenBalance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetTokenBalance for more information on using the BatchGetTokenBalance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the BatchGetTokenBalanceRequest method. +// req, resp := client.BatchGetTokenBalanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/BatchGetTokenBalance +func (c *ManagedBlockchainQuery) BatchGetTokenBalanceRequest(input *BatchGetTokenBalanceInput) (req *request.Request, output *BatchGetTokenBalanceOutput) { + op := &request.Operation{ + Name: opBatchGetTokenBalance, + HTTPMethod: "POST", + HTTPPath: "/batch-get-token-balance", + } + + if input == nil { + input = &BatchGetTokenBalanceInput{} + } + + output = &BatchGetTokenBalanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchGetTokenBalance API operation for Amazon Managed Blockchain Query. +// +// Gets the token balance for a batch of tokens by using the GetTokenBalance +// action for every token in the request. +// +// Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token +// standards are supported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain Query's +// API operation BatchGetTokenBalance for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request +// a limit increase or delete unused resources, if possible. +// +// - ValidationException +// The resource passed is invalid. +// +// - ResourceNotFoundException +// The resource was not found. +// +// - AccessDeniedException +// The Amazon Web Services account doesn’t have access to this resource. +// +// - InternalServerException +// The request processing has failed because of an internal error in the service. +// +// - ServiceQuotaExceededException +// The service quota has been exceeded for this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/BatchGetTokenBalance +func (c *ManagedBlockchainQuery) BatchGetTokenBalance(input *BatchGetTokenBalanceInput) (*BatchGetTokenBalanceOutput, error) { + req, out := c.BatchGetTokenBalanceRequest(input) + return out, req.Send() +} + +// BatchGetTokenBalanceWithContext is the same as BatchGetTokenBalance with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetTokenBalance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) BatchGetTokenBalanceWithContext(ctx aws.Context, input *BatchGetTokenBalanceInput, opts ...request.Option) (*BatchGetTokenBalanceOutput, error) { + req, out := c.BatchGetTokenBalanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetTokenBalance = "GetTokenBalance" + +// GetTokenBalanceRequest generates a "aws/request.Request" representing the +// client's request for the GetTokenBalance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTokenBalance for more information on using the GetTokenBalance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetTokenBalanceRequest method. +// req, resp := client.GetTokenBalanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/GetTokenBalance +func (c *ManagedBlockchainQuery) GetTokenBalanceRequest(input *GetTokenBalanceInput) (req *request.Request, output *GetTokenBalanceOutput) { + op := &request.Operation{ + Name: opGetTokenBalance, + HTTPMethod: "POST", + HTTPPath: "/get-token-balance", + } + + if input == nil { + input = &GetTokenBalanceInput{} + } + + output = &GetTokenBalanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTokenBalance API operation for Amazon Managed Blockchain Query. +// +// Gets the balance of a specific token, including native tokens, for a given +// address (wallet or contract) on the blockchain. +// +// Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token +// standards are supported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain Query's +// API operation GetTokenBalance for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request +// a limit increase or delete unused resources, if possible. +// +// - ValidationException +// The resource passed is invalid. +// +// - ResourceNotFoundException +// The resource was not found. +// +// - AccessDeniedException +// The Amazon Web Services account doesn’t have access to this resource. +// +// - InternalServerException +// The request processing has failed because of an internal error in the service. +// +// - ServiceQuotaExceededException +// The service quota has been exceeded for this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/GetTokenBalance +func (c *ManagedBlockchainQuery) GetTokenBalance(input *GetTokenBalanceInput) (*GetTokenBalanceOutput, error) { + req, out := c.GetTokenBalanceRequest(input) + return out, req.Send() +} + +// GetTokenBalanceWithContext is the same as GetTokenBalance with the addition of +// the ability to pass a context and additional request options. +// +// See GetTokenBalance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) GetTokenBalanceWithContext(ctx aws.Context, input *GetTokenBalanceInput, opts ...request.Option) (*GetTokenBalanceOutput, error) { + req, out := c.GetTokenBalanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetTransaction = "GetTransaction" + +// GetTransactionRequest generates a "aws/request.Request" representing the +// client's request for the GetTransaction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTransaction for more information on using the GetTransaction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetTransactionRequest method. +// req, resp := client.GetTransactionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/GetTransaction +func (c *ManagedBlockchainQuery) GetTransactionRequest(input *GetTransactionInput) (req *request.Request, output *GetTransactionOutput) { + op := &request.Operation{ + Name: opGetTransaction, + HTTPMethod: "POST", + HTTPPath: "/get-transaction", + } + + if input == nil { + input = &GetTransactionInput{} + } + + output = &GetTransactionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTransaction API operation for Amazon Managed Blockchain Query. +// +// Get the details of a transaction. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain Query's +// API operation GetTransaction for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request +// a limit increase or delete unused resources, if possible. +// +// - ValidationException +// The resource passed is invalid. +// +// - ResourceNotFoundException +// The resource was not found. +// +// - AccessDeniedException +// The Amazon Web Services account doesn’t have access to this resource. +// +// - InternalServerException +// The request processing has failed because of an internal error in the service. +// +// - ServiceQuotaExceededException +// The service quota has been exceeded for this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/GetTransaction +func (c *ManagedBlockchainQuery) GetTransaction(input *GetTransactionInput) (*GetTransactionOutput, error) { + req, out := c.GetTransactionRequest(input) + return out, req.Send() +} + +// GetTransactionWithContext is the same as GetTransaction with the addition of +// the ability to pass a context and additional request options. +// +// See GetTransaction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) GetTransactionWithContext(ctx aws.Context, input *GetTransactionInput, opts ...request.Option) (*GetTransactionOutput, error) { + req, out := c.GetTransactionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTokenBalances = "ListTokenBalances" + +// ListTokenBalancesRequest generates a "aws/request.Request" representing the +// client's request for the ListTokenBalances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTokenBalances for more information on using the ListTokenBalances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTokenBalancesRequest method. +// req, resp := client.ListTokenBalancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/ListTokenBalances +func (c *ManagedBlockchainQuery) ListTokenBalancesRequest(input *ListTokenBalancesInput) (req *request.Request, output *ListTokenBalancesOutput) { + op := &request.Operation{ + Name: opListTokenBalances, + HTTPMethod: "POST", + HTTPPath: "/list-token-balances", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTokenBalancesInput{} + } + + output = &ListTokenBalancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTokenBalances API operation for Amazon Managed Blockchain Query. +// +// This action returns the following for a given a blockchain network: +// +// - Lists all token balances owned by an address (either a contact address +// or a wallet address). +// +// - Lists all token balances for all tokens created by a contract. +// +// - Lists all token balances for a given token. +// +// You must always specify the network property of the tokenFilter when using +// this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain Query's +// API operation ListTokenBalances for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request +// a limit increase or delete unused resources, if possible. +// +// - ValidationException +// The resource passed is invalid. +// +// - AccessDeniedException +// The Amazon Web Services account doesn’t have access to this resource. +// +// - InternalServerException +// The request processing has failed because of an internal error in the service. +// +// - ServiceQuotaExceededException +// The service quota has been exceeded for this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/ListTokenBalances +func (c *ManagedBlockchainQuery) ListTokenBalances(input *ListTokenBalancesInput) (*ListTokenBalancesOutput, error) { + req, out := c.ListTokenBalancesRequest(input) + return out, req.Send() +} + +// ListTokenBalancesWithContext is the same as ListTokenBalances with the addition of +// the ability to pass a context and additional request options. +// +// See ListTokenBalances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) ListTokenBalancesWithContext(ctx aws.Context, input *ListTokenBalancesInput, opts ...request.Option) (*ListTokenBalancesOutput, error) { + req, out := c.ListTokenBalancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTokenBalancesPages iterates over the pages of a ListTokenBalances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTokenBalances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTokenBalances operation. +// pageNum := 0 +// err := client.ListTokenBalancesPages(params, +// func(page *managedblockchainquery.ListTokenBalancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *ManagedBlockchainQuery) ListTokenBalancesPages(input *ListTokenBalancesInput, fn func(*ListTokenBalancesOutput, bool) bool) error { + return c.ListTokenBalancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTokenBalancesPagesWithContext same as ListTokenBalancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) ListTokenBalancesPagesWithContext(ctx aws.Context, input *ListTokenBalancesInput, fn func(*ListTokenBalancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTokenBalancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTokenBalancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTokenBalancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTransactionEvents = "ListTransactionEvents" + +// ListTransactionEventsRequest generates a "aws/request.Request" representing the +// client's request for the ListTransactionEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTransactionEvents for more information on using the ListTransactionEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTransactionEventsRequest method. +// req, resp := client.ListTransactionEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/ListTransactionEvents +func (c *ManagedBlockchainQuery) ListTransactionEventsRequest(input *ListTransactionEventsInput) (req *request.Request, output *ListTransactionEventsOutput) { + op := &request.Operation{ + Name: opListTransactionEvents, + HTTPMethod: "POST", + HTTPPath: "/list-transaction-events", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTransactionEventsInput{} + } + + output = &ListTransactionEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTransactionEvents API operation for Amazon Managed Blockchain Query. +// +// An array of TransactionEvent objects. Each object contains details about +// the transaction event. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain Query's +// API operation ListTransactionEvents for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request +// a limit increase or delete unused resources, if possible. +// +// - ValidationException +// The resource passed is invalid. +// +// - AccessDeniedException +// The Amazon Web Services account doesn’t have access to this resource. +// +// - InternalServerException +// The request processing has failed because of an internal error in the service. +// +// - ServiceQuotaExceededException +// The service quota has been exceeded for this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/ListTransactionEvents +func (c *ManagedBlockchainQuery) ListTransactionEvents(input *ListTransactionEventsInput) (*ListTransactionEventsOutput, error) { + req, out := c.ListTransactionEventsRequest(input) + return out, req.Send() +} + +// ListTransactionEventsWithContext is the same as ListTransactionEvents with the addition of +// the ability to pass a context and additional request options. +// +// See ListTransactionEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) ListTransactionEventsWithContext(ctx aws.Context, input *ListTransactionEventsInput, opts ...request.Option) (*ListTransactionEventsOutput, error) { + req, out := c.ListTransactionEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTransactionEventsPages iterates over the pages of a ListTransactionEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTransactionEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTransactionEvents operation. +// pageNum := 0 +// err := client.ListTransactionEventsPages(params, +// func(page *managedblockchainquery.ListTransactionEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *ManagedBlockchainQuery) ListTransactionEventsPages(input *ListTransactionEventsInput, fn func(*ListTransactionEventsOutput, bool) bool) error { + return c.ListTransactionEventsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTransactionEventsPagesWithContext same as ListTransactionEventsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) ListTransactionEventsPagesWithContext(ctx aws.Context, input *ListTransactionEventsInput, fn func(*ListTransactionEventsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTransactionEventsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTransactionEventsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTransactionEventsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTransactions = "ListTransactions" + +// ListTransactionsRequest generates a "aws/request.Request" representing the +// client's request for the ListTransactions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTransactions for more information on using the ListTransactions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTransactionsRequest method. +// req, resp := client.ListTransactionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/ListTransactions +func (c *ManagedBlockchainQuery) ListTransactionsRequest(input *ListTransactionsInput) (req *request.Request, output *ListTransactionsOutput) { + op := &request.Operation{ + Name: opListTransactions, + HTTPMethod: "POST", + HTTPPath: "/list-transactions", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTransactionsInput{} + } + + output = &ListTransactionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTransactions API operation for Amazon Managed Blockchain Query. +// +// Lists all of the transactions on a given wallet address or to a specific +// contract. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain Query's +// API operation ListTransactions for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request +// a limit increase or delete unused resources, if possible. +// +// - ValidationException +// The resource passed is invalid. +// +// - AccessDeniedException +// The Amazon Web Services account doesn’t have access to this resource. +// +// - InternalServerException +// The request processing has failed because of an internal error in the service. +// +// - ServiceQuotaExceededException +// The service quota has been exceeded for this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/ListTransactions +func (c *ManagedBlockchainQuery) ListTransactions(input *ListTransactionsInput) (*ListTransactionsOutput, error) { + req, out := c.ListTransactionsRequest(input) + return out, req.Send() +} + +// ListTransactionsWithContext is the same as ListTransactions with the addition of +// the ability to pass a context and additional request options. +// +// See ListTransactions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) ListTransactionsWithContext(ctx aws.Context, input *ListTransactionsInput, opts ...request.Option) (*ListTransactionsOutput, error) { + req, out := c.ListTransactionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTransactionsPages iterates over the pages of a ListTransactions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTransactions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTransactions operation. +// pageNum := 0 +// err := client.ListTransactionsPages(params, +// func(page *managedblockchainquery.ListTransactionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *ManagedBlockchainQuery) ListTransactionsPages(input *ListTransactionsInput, fn func(*ListTransactionsOutput, bool) bool) error { + return c.ListTransactionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTransactionsPagesWithContext same as ListTransactionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) ListTransactionsPagesWithContext(ctx aws.Context, input *ListTransactionsInput, fn func(*ListTransactionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTransactionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTransactionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTransactionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +// The Amazon Web Services account doesn’t have access to this resource. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The container for the exception message. + Message_ *string `locationName:"message" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Error generated from a failed BatchGetTokenBalance request. +type BatchGetTokenBalanceErrorItem struct { + _ struct{} `type:"structure"` + + // The container for time. + AtBlockchainInstant *BlockchainInstant `locationName:"atBlockchainInstant" type:"structure"` + + // The error code associated with the error. + // + // ErrorCode is a required field + ErrorCode *string `locationName:"errorCode" type:"string" required:"true"` + + // The message associated with the error. + // + // ErrorMessage is a required field + ErrorMessage *string `locationName:"errorMessage" type:"string" required:"true"` + + // The type of error. + // + // ErrorType is a required field + ErrorType *string `locationName:"errorType" type:"string" required:"true" enum:"ErrorType"` + + // The container for the identifier of the owner. + OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure"` + + // The container for the identifier for the token including the unique token + // ID and its blockchain network. + // + // Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token + // standards are supported. + TokenIdentifier *TokenIdentifier `locationName:"tokenIdentifier" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceErrorItem) GoString() string { + return s.String() +} + +// SetAtBlockchainInstant sets the AtBlockchainInstant field's value. +func (s *BatchGetTokenBalanceErrorItem) SetAtBlockchainInstant(v *BlockchainInstant) *BatchGetTokenBalanceErrorItem { + s.AtBlockchainInstant = v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *BatchGetTokenBalanceErrorItem) SetErrorCode(v string) *BatchGetTokenBalanceErrorItem { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *BatchGetTokenBalanceErrorItem) SetErrorMessage(v string) *BatchGetTokenBalanceErrorItem { + s.ErrorMessage = &v + return s +} + +// SetErrorType sets the ErrorType field's value. +func (s *BatchGetTokenBalanceErrorItem) SetErrorType(v string) *BatchGetTokenBalanceErrorItem { + s.ErrorType = &v + return s +} + +// SetOwnerIdentifier sets the OwnerIdentifier field's value. +func (s *BatchGetTokenBalanceErrorItem) SetOwnerIdentifier(v *OwnerIdentifier) *BatchGetTokenBalanceErrorItem { + s.OwnerIdentifier = v + return s +} + +// SetTokenIdentifier sets the TokenIdentifier field's value. +func (s *BatchGetTokenBalanceErrorItem) SetTokenIdentifier(v *TokenIdentifier) *BatchGetTokenBalanceErrorItem { + s.TokenIdentifier = v + return s +} + +type BatchGetTokenBalanceInput struct { + _ struct{} `type:"structure"` + + // An array of GetTokenBalanceInput objects whose balance is being requested. + GetTokenBalanceInputs []*BatchGetTokenBalanceInputItem `locationName:"getTokenBalanceInputs" min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetTokenBalanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetTokenBalanceInput"} + if s.GetTokenBalanceInputs != nil && len(s.GetTokenBalanceInputs) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GetTokenBalanceInputs", 1)) + } + if s.GetTokenBalanceInputs != nil { + for i, v := range s.GetTokenBalanceInputs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GetTokenBalanceInputs", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGetTokenBalanceInputs sets the GetTokenBalanceInputs field's value. +func (s *BatchGetTokenBalanceInput) SetGetTokenBalanceInputs(v []*BatchGetTokenBalanceInputItem) *BatchGetTokenBalanceInput { + s.GetTokenBalanceInputs = v + return s +} + +// The container for the input for getting a token balance. +type BatchGetTokenBalanceInputItem struct { + _ struct{} `type:"structure"` + + // The container for time. + AtBlockchainInstant *BlockchainInstant `locationName:"atBlockchainInstant" type:"structure"` + + // The container for the identifier of the owner. + // + // OwnerIdentifier is a required field + OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure" required:"true"` + + // The container for the identifier for the token including the unique token + // ID and its blockchain network. + // + // Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token + // standards are supported. + // + // TokenIdentifier is a required field + TokenIdentifier *TokenIdentifier `locationName:"tokenIdentifier" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceInputItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceInputItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetTokenBalanceInputItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetTokenBalanceInputItem"} + if s.OwnerIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerIdentifier")) + } + if s.TokenIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TokenIdentifier")) + } + if s.OwnerIdentifier != nil { + if err := s.OwnerIdentifier.Validate(); err != nil { + invalidParams.AddNested("OwnerIdentifier", err.(request.ErrInvalidParams)) + } + } + if s.TokenIdentifier != nil { + if err := s.TokenIdentifier.Validate(); err != nil { + invalidParams.AddNested("TokenIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAtBlockchainInstant sets the AtBlockchainInstant field's value. +func (s *BatchGetTokenBalanceInputItem) SetAtBlockchainInstant(v *BlockchainInstant) *BatchGetTokenBalanceInputItem { + s.AtBlockchainInstant = v + return s +} + +// SetOwnerIdentifier sets the OwnerIdentifier field's value. +func (s *BatchGetTokenBalanceInputItem) SetOwnerIdentifier(v *OwnerIdentifier) *BatchGetTokenBalanceInputItem { + s.OwnerIdentifier = v + return s +} + +// SetTokenIdentifier sets the TokenIdentifier field's value. +func (s *BatchGetTokenBalanceInputItem) SetTokenIdentifier(v *TokenIdentifier) *BatchGetTokenBalanceInputItem { + s.TokenIdentifier = v + return s +} + +type BatchGetTokenBalanceOutput struct { + _ struct{} `type:"structure"` + + // An array of BatchGetTokenBalanceErrorItem objects returned from the request. + // + // Errors is a required field + Errors []*BatchGetTokenBalanceErrorItem `locationName:"errors" type:"list" required:"true"` + + // An array of BatchGetTokenBalanceOutputItem objects returned by the response. + // + // TokenBalances is a required field + TokenBalances []*BatchGetTokenBalanceOutputItem `locationName:"tokenBalances" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchGetTokenBalanceOutput) SetErrors(v []*BatchGetTokenBalanceErrorItem) *BatchGetTokenBalanceOutput { + s.Errors = v + return s +} + +// SetTokenBalances sets the TokenBalances field's value. +func (s *BatchGetTokenBalanceOutput) SetTokenBalances(v []*BatchGetTokenBalanceOutputItem) *BatchGetTokenBalanceOutput { + s.TokenBalances = v + return s +} + +// The container for the properties of a token balance output. +type BatchGetTokenBalanceOutputItem struct { + _ struct{} `type:"structure"` + + // The container for time. + // + // AtBlockchainInstant is a required field + AtBlockchainInstant *BlockchainInstant `locationName:"atBlockchainInstant" type:"structure" required:"true"` + + // The container for the token balance. + // + // Balance is a required field + Balance *string `locationName:"balance" type:"string" required:"true"` + + // The container for time. + LastUpdatedTime *BlockchainInstant `locationName:"lastUpdatedTime" type:"structure"` + + // The container for the identifier of the owner. + OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure"` + + // The container for the identifier for the token including the unique token + // ID and its blockchain network. + // + // Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token + // standards are supported. + TokenIdentifier *TokenIdentifier `locationName:"tokenIdentifier" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceOutputItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetTokenBalanceOutputItem) GoString() string { + return s.String() +} + +// SetAtBlockchainInstant sets the AtBlockchainInstant field's value. +func (s *BatchGetTokenBalanceOutputItem) SetAtBlockchainInstant(v *BlockchainInstant) *BatchGetTokenBalanceOutputItem { + s.AtBlockchainInstant = v + return s +} + +// SetBalance sets the Balance field's value. +func (s *BatchGetTokenBalanceOutputItem) SetBalance(v string) *BatchGetTokenBalanceOutputItem { + s.Balance = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *BatchGetTokenBalanceOutputItem) SetLastUpdatedTime(v *BlockchainInstant) *BatchGetTokenBalanceOutputItem { + s.LastUpdatedTime = v + return s +} + +// SetOwnerIdentifier sets the OwnerIdentifier field's value. +func (s *BatchGetTokenBalanceOutputItem) SetOwnerIdentifier(v *OwnerIdentifier) *BatchGetTokenBalanceOutputItem { + s.OwnerIdentifier = v + return s +} + +// SetTokenIdentifier sets the TokenIdentifier field's value. +func (s *BatchGetTokenBalanceOutputItem) SetTokenIdentifier(v *TokenIdentifier) *BatchGetTokenBalanceOutputItem { + s.TokenIdentifier = v + return s +} + +// The container for time. +type BlockchainInstant struct { + _ struct{} `type:"structure"` + + // The container of the Timestamp of the blockchain instant. + // + // This timestamp will only be recorded up to the second. + Time *time.Time `locationName:"time" type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BlockchainInstant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BlockchainInstant) GoString() string { + return s.String() +} + +// SetTime sets the Time field's value. +func (s *BlockchainInstant) SetTime(v time.Time) *BlockchainInstant { + s.Time = &v + return s +} + +type GetTokenBalanceInput struct { + _ struct{} `type:"structure"` + + // The time for when the TokenBalance is requested or the current time if a + // time is not provided in the request. + // + // This time will only be recorded up to the second. + AtBlockchainInstant *BlockchainInstant `locationName:"atBlockchainInstant" type:"structure"` + + // The container for the identifier for the owner. + // + // OwnerIdentifier is a required field + OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure" required:"true"` + + // The container for the identifier for the token, including the unique token + // ID and its blockchain network. + // + // TokenIdentifier is a required field + TokenIdentifier *TokenIdentifier `locationName:"tokenIdentifier" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTokenBalanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTokenBalanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTokenBalanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTokenBalanceInput"} + if s.OwnerIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerIdentifier")) + } + if s.TokenIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TokenIdentifier")) + } + if s.OwnerIdentifier != nil { + if err := s.OwnerIdentifier.Validate(); err != nil { + invalidParams.AddNested("OwnerIdentifier", err.(request.ErrInvalidParams)) + } + } + if s.TokenIdentifier != nil { + if err := s.TokenIdentifier.Validate(); err != nil { + invalidParams.AddNested("TokenIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAtBlockchainInstant sets the AtBlockchainInstant field's value. +func (s *GetTokenBalanceInput) SetAtBlockchainInstant(v *BlockchainInstant) *GetTokenBalanceInput { + s.AtBlockchainInstant = v + return s +} + +// SetOwnerIdentifier sets the OwnerIdentifier field's value. +func (s *GetTokenBalanceInput) SetOwnerIdentifier(v *OwnerIdentifier) *GetTokenBalanceInput { + s.OwnerIdentifier = v + return s +} + +// SetTokenIdentifier sets the TokenIdentifier field's value. +func (s *GetTokenBalanceInput) SetTokenIdentifier(v *TokenIdentifier) *GetTokenBalanceInput { + s.TokenIdentifier = v + return s +} + +type GetTokenBalanceOutput struct { + _ struct{} `type:"structure"` + + // The container for time. + // + // AtBlockchainInstant is a required field + AtBlockchainInstant *BlockchainInstant `locationName:"atBlockchainInstant" type:"structure" required:"true"` + + // The container for the token balance. + // + // Balance is a required field + Balance *string `locationName:"balance" type:"string" required:"true"` + + // The container for time. + LastUpdatedTime *BlockchainInstant `locationName:"lastUpdatedTime" type:"structure"` + + // The container for the identifier of the owner. + OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure"` + + // The container for the identifier for the token including the unique token + // ID and its blockchain network. + // + // Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token + // standards are supported. + TokenIdentifier *TokenIdentifier `locationName:"tokenIdentifier" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTokenBalanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTokenBalanceOutput) GoString() string { + return s.String() +} + +// SetAtBlockchainInstant sets the AtBlockchainInstant field's value. +func (s *GetTokenBalanceOutput) SetAtBlockchainInstant(v *BlockchainInstant) *GetTokenBalanceOutput { + s.AtBlockchainInstant = v + return s +} + +// SetBalance sets the Balance field's value. +func (s *GetTokenBalanceOutput) SetBalance(v string) *GetTokenBalanceOutput { + s.Balance = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *GetTokenBalanceOutput) SetLastUpdatedTime(v *BlockchainInstant) *GetTokenBalanceOutput { + s.LastUpdatedTime = v + return s +} + +// SetOwnerIdentifier sets the OwnerIdentifier field's value. +func (s *GetTokenBalanceOutput) SetOwnerIdentifier(v *OwnerIdentifier) *GetTokenBalanceOutput { + s.OwnerIdentifier = v + return s +} + +// SetTokenIdentifier sets the TokenIdentifier field's value. +func (s *GetTokenBalanceOutput) SetTokenIdentifier(v *TokenIdentifier) *GetTokenBalanceOutput { + s.TokenIdentifier = v + return s +} + +type GetTransactionInput struct { + _ struct{} `type:"structure"` + + // The blockchain network where the transaction occurred. + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // TransactionHash is a required field + TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTransactionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTransactionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTransactionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTransactionInput"} + if s.Network == nil { + invalidParams.Add(request.NewErrParamRequired("Network")) + } + if s.TransactionHash == nil { + invalidParams.Add(request.NewErrParamRequired("TransactionHash")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNetwork sets the Network field's value. +func (s *GetTransactionInput) SetNetwork(v string) *GetTransactionInput { + s.Network = &v + return s +} + +// SetTransactionHash sets the TransactionHash field's value. +func (s *GetTransactionInput) SetTransactionHash(v string) *GetTransactionInput { + s.TransactionHash = &v + return s +} + +type GetTransactionOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the transaction. + // + // Transaction is a required field + Transaction *Transaction `locationName:"transaction" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTransactionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTransactionOutput) GoString() string { + return s.String() +} + +// SetTransaction sets the Transaction field's value. +func (s *GetTransactionOutput) SetTransaction(v *Transaction) *GetTransactionOutput { + s.Transaction = v + return s +} + +// The request processing has failed because of an internal error in the service. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The container for the exception message. + Message_ *string `locationName:"message" min:"1" type:"string"` + + // The container of the retryAfterSeconds value. + RetryAfterSeconds *int64 `location:"header" locationName:"Retry-After" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListTokenBalancesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of token balances to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string `locationName:"nextToken" type:"string"` + + // The contract or wallet address on the blockchain network by which to filter + // the request. You must specify the address property of the ownerFilter when + // listing balances of tokens owned by the address. + OwnerFilter *OwnerFilter `locationName:"ownerFilter" type:"structure"` + + // The contract address or a token identifier on the blockchain network by which + // to filter the request. You must specify the contractAddress property of this + // container when listing tokens minted by a contract. + // + // You must always specify the network property of this container when using + // this operation. + // + // TokenFilter is a required field + TokenFilter *TokenFilter `locationName:"tokenFilter" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTokenBalancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTokenBalancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTokenBalancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTokenBalancesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.TokenFilter == nil { + invalidParams.Add(request.NewErrParamRequired("TokenFilter")) + } + if s.OwnerFilter != nil { + if err := s.OwnerFilter.Validate(); err != nil { + invalidParams.AddNested("OwnerFilter", err.(request.ErrInvalidParams)) + } + } + if s.TokenFilter != nil { + if err := s.TokenFilter.Validate(); err != nil { + invalidParams.AddNested("TokenFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTokenBalancesInput) SetMaxResults(v int64) *ListTokenBalancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTokenBalancesInput) SetNextToken(v string) *ListTokenBalancesInput { + s.NextToken = &v + return s +} + +// SetOwnerFilter sets the OwnerFilter field's value. +func (s *ListTokenBalancesInput) SetOwnerFilter(v *OwnerFilter) *ListTokenBalancesInput { + s.OwnerFilter = v + return s +} + +// SetTokenFilter sets the TokenFilter field's value. +func (s *ListTokenBalancesInput) SetTokenFilter(v *TokenFilter) *ListTokenBalancesInput { + s.TokenFilter = v + return s +} + +type ListTokenBalancesOutput struct { + _ struct{} `type:"structure"` + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string `locationName:"nextToken" type:"string"` + + // An array of TokenBalance objects. Each object contains details about the + // token balance. + // + // TokenBalances is a required field + TokenBalances []*TokenBalance `locationName:"tokenBalances" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTokenBalancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTokenBalancesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTokenBalancesOutput) SetNextToken(v string) *ListTokenBalancesOutput { + s.NextToken = &v + return s +} + +// SetTokenBalances sets the TokenBalances field's value. +func (s *ListTokenBalancesOutput) SetTokenBalances(v []*TokenBalance) *ListTokenBalancesOutput { + s.TokenBalances = v + return s +} + +type ListTransactionEventsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of transaction events to list. + // + // Even if additional results can be retrieved, the request can return less + // results than maxResults or an empty array of results. + // + // To retrieve the next set of results, make another request with the returned + // nextToken value. The value of nextToken is null when there are no more results + // to return + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The blockchain network where the transaction events occurred. + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string `locationName:"nextToken" type:"string"` + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // TransactionHash is a required field + TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTransactionEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTransactionEventsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Network == nil { + invalidParams.Add(request.NewErrParamRequired("Network")) + } + if s.TransactionHash == nil { + invalidParams.Add(request.NewErrParamRequired("TransactionHash")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTransactionEventsInput) SetMaxResults(v int64) *ListTransactionEventsInput { + s.MaxResults = &v + return s +} + +// SetNetwork sets the Network field's value. +func (s *ListTransactionEventsInput) SetNetwork(v string) *ListTransactionEventsInput { + s.Network = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTransactionEventsInput) SetNextToken(v string) *ListTransactionEventsInput { + s.NextToken = &v + return s +} + +// SetTransactionHash sets the TransactionHash field's value. +func (s *ListTransactionEventsInput) SetTransactionHash(v string) *ListTransactionEventsInput { + s.TransactionHash = &v + return s +} + +type ListTransactionEventsOutput struct { + _ struct{} `type:"structure"` + + // An array of TransactionEvent objects. Each object contains details about + // the transaction events. + // + // Events is a required field + Events []*TransactionEvent `locationName:"events" type:"list" required:"true"` + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionEventsOutput) GoString() string { + return s.String() +} + +// SetEvents sets the Events field's value. +func (s *ListTransactionEventsOutput) SetEvents(v []*TransactionEvent) *ListTransactionEventsOutput { + s.Events = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTransactionEventsOutput) SetNextToken(v string) *ListTransactionEventsOutput { + s.NextToken = &v + return s +} + +type ListTransactionsInput struct { + _ struct{} `type:"structure"` + + // The address (either a contract or wallet), whose transactions are being requested. + // + // Address is a required field + Address *string `locationName:"address" type:"string" required:"true"` + + // The container for time. + FromBlockchainInstant *BlockchainInstant `locationName:"fromBlockchainInstant" type:"structure"` + + // The maximum number of transactions to list. + // + // Even if additional results can be retrieved, the request can return less + // results than maxResults or an empty array of results. + // + // To retrieve the next set of results, make another request with the returned + // nextToken value. The value of nextToken is null when there are no more results + // to return + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The blockchain network where the transactions occurred. + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string `locationName:"nextToken" type:"string"` + + // Sorts items in an ascending order if the first page starts at fromTime. Sorts + // items in a descending order if the first page starts at toTime. + Sort *ListTransactionsSort `locationName:"sort" type:"structure"` + + // The container for time. + ToBlockchainInstant *BlockchainInstant `locationName:"toBlockchainInstant" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTransactionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTransactionsInput"} + if s.Address == nil { + invalidParams.Add(request.NewErrParamRequired("Address")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Network == nil { + invalidParams.Add(request.NewErrParamRequired("Network")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddress sets the Address field's value. +func (s *ListTransactionsInput) SetAddress(v string) *ListTransactionsInput { + s.Address = &v + return s +} + +// SetFromBlockchainInstant sets the FromBlockchainInstant field's value. +func (s *ListTransactionsInput) SetFromBlockchainInstant(v *BlockchainInstant) *ListTransactionsInput { + s.FromBlockchainInstant = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTransactionsInput) SetMaxResults(v int64) *ListTransactionsInput { + s.MaxResults = &v + return s +} + +// SetNetwork sets the Network field's value. +func (s *ListTransactionsInput) SetNetwork(v string) *ListTransactionsInput { + s.Network = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTransactionsInput) SetNextToken(v string) *ListTransactionsInput { + s.NextToken = &v + return s +} + +// SetSort sets the Sort field's value. +func (s *ListTransactionsInput) SetSort(v *ListTransactionsSort) *ListTransactionsInput { + s.Sort = v + return s +} + +// SetToBlockchainInstant sets the ToBlockchainInstant field's value. +func (s *ListTransactionsInput) SetToBlockchainInstant(v *BlockchainInstant) *ListTransactionsInput { + s.ToBlockchainInstant = v + return s +} + +type ListTransactionsOutput struct { + _ struct{} `type:"structure"` + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string `locationName:"nextToken" type:"string"` + + // The array of transactions returned by the request. + // + // Transactions is a required field + Transactions []*TransactionOutputItem `locationName:"transactions" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTransactionsOutput) SetNextToken(v string) *ListTransactionsOutput { + s.NextToken = &v + return s +} + +// SetTransactions sets the Transactions field's value. +func (s *ListTransactionsOutput) SetTransactions(v []*TransactionOutputItem) *ListTransactionsOutput { + s.Transactions = v + return s +} + +// The container for determining how the list transaction result will be sorted. +type ListTransactionsSort struct { + _ struct{} `type:"structure"` + + // Defaults to the value TRANSACTION_TIMESTAMP. + SortBy *string `locationName:"sortBy" type:"string" enum:"ListTransactionsSortBy"` + + // The container for the sort order for ListTransactions. The SortOrder field + // only accepts the values ASCENDING and DESCENDING. Not providing SortOrder + // will default to ASCENDING. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrder"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionsSort) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTransactionsSort) GoString() string { + return s.String() +} + +// SetSortBy sets the SortBy field's value. +func (s *ListTransactionsSort) SetSortBy(v string) *ListTransactionsSort { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListTransactionsSort) SetSortOrder(v string) *ListTransactionsSort { + s.SortOrder = &v + return s +} + +// The container for the owner information to filter by. +type OwnerFilter struct { + _ struct{} `type:"structure"` + + // The contract or wallet address. + // + // Address is a required field + Address *string `locationName:"address" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnerFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnerFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnerFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnerFilter"} + if s.Address == nil { + invalidParams.Add(request.NewErrParamRequired("Address")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddress sets the Address field's value. +func (s *OwnerFilter) SetAddress(v string) *OwnerFilter { + s.Address = &v + return s +} + +// The container for the identifier of the owner. +type OwnerIdentifier struct { + _ struct{} `type:"structure"` + + // The contract or wallet address for the owner. + // + // Address is a required field + Address *string `locationName:"address" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnerIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnerIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnerIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnerIdentifier"} + if s.Address == nil { + invalidParams.Add(request.NewErrParamRequired("Address")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddress sets the Address field's value. +func (s *OwnerIdentifier) SetAddress(v string) *OwnerIdentifier { + s.Address = &v + return s +} + +// The resource was not found. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The container for the exception message. + Message_ *string `locationName:"message" min:"1" type:"string"` + + // The resourceId of the resource that caused the exception. + // + // ResourceId is a required field + ResourceId *string `locationName:"resourceId" type:"string" required:"true"` + + // The resourceType of the resource that caused the exception. + // + // ResourceType is a required field + ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The service quota has been exceeded for this resource. +type ServiceQuotaExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The container for the exception message. + Message_ *string `locationName:"message" min:"1" type:"string"` + + // The container for the quotaCode. + // + // QuotaCode is a required field + QuotaCode *string `locationName:"quotaCode" type:"string" required:"true"` + + // The resourceId of the resource that caused the exception. + // + // ResourceId is a required field + ResourceId *string `locationName:"resourceId" type:"string" required:"true"` + + // The resourceType of the resource that caused the exception. + // + // ResourceType is a required field + ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` + + // The container for the serviceCode. + // + // ServiceCode is a required field + ServiceCode *string `locationName:"serviceCode" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServiceQuotaExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServiceQuotaExceededException) GoString() string { + return s.String() +} + +func newErrorServiceQuotaExceededException(v protocol.ResponseMetadata) error { + return &ServiceQuotaExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceQuotaExceededException) Code() string { + return "ServiceQuotaExceededException" +} + +// Message returns the exception's message. +func (s *ServiceQuotaExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceQuotaExceededException) OrigErr() error { + return nil +} + +func (s *ServiceQuotaExceededException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceQuotaExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceQuotaExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request +// a limit increase or delete unused resources, if possible. +type ThrottlingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The container for the exception message. + Message_ *string `locationName:"message" min:"1" type:"string"` + + // The container for the quotaCode. + // + // QuotaCode is a required field + QuotaCode *string `locationName:"quotaCode" type:"string" required:"true"` + + // The container of the retryAfterSeconds value. + RetryAfterSeconds *int64 `location:"header" locationName:"Retry-After" type:"integer"` + + // The container for the serviceCode. + // + // ServiceCode is a required field + ServiceCode *string `locationName:"serviceCode" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ThrottlingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ThrottlingException) GoString() string { + return s.String() +} + +func newErrorThrottlingException(v protocol.ResponseMetadata) error { + return &ThrottlingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ThrottlingException) Code() string { + return "ThrottlingException" +} + +// Message returns the exception's message. +func (s *ThrottlingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ThrottlingException) OrigErr() error { + return nil +} + +func (s *ThrottlingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The balance of the token. +type TokenBalance struct { + _ struct{} `type:"structure"` + + // The time for when the TokenBalance is requested or the current time if a + // time is not provided in the request. + // + // This time will only be recorded up to the second. + // + // AtBlockchainInstant is a required field + AtBlockchainInstant *BlockchainInstant `locationName:"atBlockchainInstant" type:"structure" required:"true"` + + // The container of the token balance. + // + // Balance is a required field + Balance *string `locationName:"balance" type:"string" required:"true"` + + // The timestamp of the last transaction at which the balance for the token + // in the wallet was updated. + LastUpdatedTime *BlockchainInstant `locationName:"lastUpdatedTime" type:"structure"` + + // The container for the identifier of the owner. + OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure"` + + // The identifier for the token, including the unique token ID and its blockchain + // network. + TokenIdentifier *TokenIdentifier `locationName:"tokenIdentifier" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TokenBalance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TokenBalance) GoString() string { + return s.String() +} + +// SetAtBlockchainInstant sets the AtBlockchainInstant field's value. +func (s *TokenBalance) SetAtBlockchainInstant(v *BlockchainInstant) *TokenBalance { + s.AtBlockchainInstant = v + return s +} + +// SetBalance sets the Balance field's value. +func (s *TokenBalance) SetBalance(v string) *TokenBalance { + s.Balance = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *TokenBalance) SetLastUpdatedTime(v *BlockchainInstant) *TokenBalance { + s.LastUpdatedTime = v + return s +} + +// SetOwnerIdentifier sets the OwnerIdentifier field's value. +func (s *TokenBalance) SetOwnerIdentifier(v *OwnerIdentifier) *TokenBalance { + s.OwnerIdentifier = v + return s +} + +// SetTokenIdentifier sets the TokenIdentifier field's value. +func (s *TokenBalance) SetTokenIdentifier(v *TokenIdentifier) *TokenBalance { + s.TokenIdentifier = v + return s +} + +// The container of the token filter like the contract address on a given blockchain +// network or a unique token identifier on a given blockchain network. +// +// You must always specify the network property of this container when using +// this operation. +type TokenFilter struct { + _ struct{} `type:"structure"` + + // This is the address of the contract. + ContractAddress *string `locationName:"contractAddress" type:"string"` + + // The blockchain network of the token. + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + + // The unique identifier of the token. + TokenId *string `locationName:"tokenId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TokenFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TokenFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TokenFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TokenFilter"} + if s.Network == nil { + invalidParams.Add(request.NewErrParamRequired("Network")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContractAddress sets the ContractAddress field's value. +func (s *TokenFilter) SetContractAddress(v string) *TokenFilter { + s.ContractAddress = &v + return s +} + +// SetNetwork sets the Network field's value. +func (s *TokenFilter) SetNetwork(v string) *TokenFilter { + s.Network = &v + return s +} + +// SetTokenId sets the TokenId field's value. +func (s *TokenFilter) SetTokenId(v string) *TokenFilter { + s.TokenId = &v + return s +} + +// The container for the identifier for the token including the unique token +// ID and its blockchain network. +// +// Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token +// standards are supported. +type TokenIdentifier struct { + _ struct{} `type:"structure"` + + // This is the token's contract address. + ContractAddress *string `locationName:"contractAddress" type:"string"` + + // The blockchain network of the token. + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + + // The unique identifier of the token. + TokenId *string `locationName:"tokenId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TokenIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TokenIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TokenIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TokenIdentifier"} + if s.Network == nil { + invalidParams.Add(request.NewErrParamRequired("Network")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContractAddress sets the ContractAddress field's value. +func (s *TokenIdentifier) SetContractAddress(v string) *TokenIdentifier { + s.ContractAddress = &v + return s +} + +// SetNetwork sets the Network field's value. +func (s *TokenIdentifier) SetNetwork(v string) *TokenIdentifier { + s.Network = &v + return s +} + +// SetTokenId sets the TokenId field's value. +func (s *TokenIdentifier) SetTokenId(v string) *TokenIdentifier { + s.TokenId = &v + return s +} + +// There are two possible types of transactions used for this data type: +// +// - A Bitcoin transaction is a movement of BTC from one address to another. +// +// - An Ethereum transaction refers to an action initiated by an externally +// owned account, which is an account managed by a human, not a contract. +// For example, if Bob sends Alice 1 ETH, Bob's account must be debited and +// Alice's must be credited. This state-changing action occurs within a transaction. +type Transaction struct { + _ struct{} `type:"structure"` + + // The block hash is a unique identifier for a block. It is a fixed-size string + // that is calculated by using the information in the block. The block hash + // is used to verify the integrity of the data in the block. + BlockHash *string `locationName:"blockHash" type:"string"` + + // The block number in which the transaction is recorded. + BlockNumber *string `locationName:"blockNumber" type:"string"` + + // The blockchain address for the contract. + ContractAddress *string `locationName:"contractAddress" type:"string"` + + // The amount of gas used up to the specified point in the block. + CumulativeGasUsed *string `locationName:"cumulativeGasUsed" type:"string"` + + // The effective gas price. + EffectiveGasPrice *string `locationName:"effectiveGasPrice" type:"string"` + + // The initiator of the transaction. It is either in the form a public key or + // a contract address. + From *string `locationName:"from" type:"string"` + + // The amount of gas used for the transaction. + GasUsed *string `locationName:"gasUsed" type:"string"` + + // The blockchain network where the transaction occured. + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + + // The number of transactions in the block. + // + // NumberOfTransactions is a required field + NumberOfTransactions *int64 `locationName:"numberOfTransactions" type:"long" required:"true"` + + // The signature of the transaction. The X coordinate of a point R. + SignatureR *string `locationName:"signatureR" type:"string"` + + // The signature of the transaction. The Y coordinate of a point S. + SignatureS *string `locationName:"signatureS" type:"string"` + + // The signature of the transaction. The Z coordinate of a point V. + SignatureV *int64 `locationName:"signatureV" type:"integer"` + + // The status of the transaction. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"QueryTransactionStatus"` + + // The identifier of the transaction. It is generated whenever a transaction + // is verified and added to the blockchain. + // + // To is a required field + To *string `locationName:"to" type:"string" required:"true"` + + // The transaction fee. + TransactionFee *string `locationName:"transactionFee" type:"string"` + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // TransactionHash is a required field + TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` + + // The unique identifier of the transaction. It is generated whenever a transaction + // is verified and added to the blockchain. + TransactionId *string `locationName:"transactionId" type:"string"` + + // The index of the transaction within a blockchain. + // + // TransactionIndex is a required field + TransactionIndex *int64 `locationName:"transactionIndex" type:"long" required:"true"` + + // The Timestamp of the transaction. + // + // TransactionTimestamp is a required field + TransactionTimestamp *time.Time `locationName:"transactionTimestamp" type:"timestamp" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Transaction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Transaction) GoString() string { + return s.String() +} + +// SetBlockHash sets the BlockHash field's value. +func (s *Transaction) SetBlockHash(v string) *Transaction { + s.BlockHash = &v + return s +} + +// SetBlockNumber sets the BlockNumber field's value. +func (s *Transaction) SetBlockNumber(v string) *Transaction { + s.BlockNumber = &v + return s +} + +// SetContractAddress sets the ContractAddress field's value. +func (s *Transaction) SetContractAddress(v string) *Transaction { + s.ContractAddress = &v + return s +} + +// SetCumulativeGasUsed sets the CumulativeGasUsed field's value. +func (s *Transaction) SetCumulativeGasUsed(v string) *Transaction { + s.CumulativeGasUsed = &v + return s +} + +// SetEffectiveGasPrice sets the EffectiveGasPrice field's value. +func (s *Transaction) SetEffectiveGasPrice(v string) *Transaction { + s.EffectiveGasPrice = &v + return s +} + +// SetFrom sets the From field's value. +func (s *Transaction) SetFrom(v string) *Transaction { + s.From = &v + return s +} + +// SetGasUsed sets the GasUsed field's value. +func (s *Transaction) SetGasUsed(v string) *Transaction { + s.GasUsed = &v + return s +} + +// SetNetwork sets the Network field's value. +func (s *Transaction) SetNetwork(v string) *Transaction { + s.Network = &v + return s +} + +// SetNumberOfTransactions sets the NumberOfTransactions field's value. +func (s *Transaction) SetNumberOfTransactions(v int64) *Transaction { + s.NumberOfTransactions = &v + return s +} + +// SetSignatureR sets the SignatureR field's value. +func (s *Transaction) SetSignatureR(v string) *Transaction { + s.SignatureR = &v + return s +} + +// SetSignatureS sets the SignatureS field's value. +func (s *Transaction) SetSignatureS(v string) *Transaction { + s.SignatureS = &v + return s +} + +// SetSignatureV sets the SignatureV field's value. +func (s *Transaction) SetSignatureV(v int64) *Transaction { + s.SignatureV = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Transaction) SetStatus(v string) *Transaction { + s.Status = &v + return s +} + +// SetTo sets the To field's value. +func (s *Transaction) SetTo(v string) *Transaction { + s.To = &v + return s +} + +// SetTransactionFee sets the TransactionFee field's value. +func (s *Transaction) SetTransactionFee(v string) *Transaction { + s.TransactionFee = &v + return s +} + +// SetTransactionHash sets the TransactionHash field's value. +func (s *Transaction) SetTransactionHash(v string) *Transaction { + s.TransactionHash = &v + return s +} + +// SetTransactionId sets the TransactionId field's value. +func (s *Transaction) SetTransactionId(v string) *Transaction { + s.TransactionId = &v + return s +} + +// SetTransactionIndex sets the TransactionIndex field's value. +func (s *Transaction) SetTransactionIndex(v int64) *Transaction { + s.TransactionIndex = &v + return s +} + +// SetTransactionTimestamp sets the TransactionTimestamp field's value. +func (s *Transaction) SetTransactionTimestamp(v time.Time) *Transaction { + s.TransactionTimestamp = &v + return s +} + +// The container for the properties of a transaction event. +type TransactionEvent struct { + _ struct{} `type:"structure"` + + // The blockchain address. for the contract + ContractAddress *string `locationName:"contractAddress" type:"string"` + + // The type of transaction event. + // + // EventType is a required field + EventType *string `locationName:"eventType" type:"string" required:"true" enum:"QueryTransactionEventType"` + + // The wallet address initiating the transaction. It can either be a public + // key or a contract. + From *string `locationName:"from" type:"string"` + + // The blockchain network where the transaction occurred. + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + + // The wallet address receiving the transaction. It can either be a public key + // or a contract. + To *string `locationName:"to" type:"string"` + + // The unique identifier for the token involved in the transaction. + TokenId *string `locationName:"tokenId" type:"string"` + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // TransactionHash is a required field + TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` + + // The unique identifier of the transaction. It is generated whenever a transaction + // is verified and added to the blockchain. + TransactionId *string `locationName:"transactionId" type:"string"` + + // The value that was transacted. + Value *string `locationName:"value" type:"string"` + + // The position of the vout in the transaction output list. + VoutIndex *int64 `locationName:"voutIndex" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionEvent) GoString() string { + return s.String() +} + +// SetContractAddress sets the ContractAddress field's value. +func (s *TransactionEvent) SetContractAddress(v string) *TransactionEvent { + s.ContractAddress = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *TransactionEvent) SetEventType(v string) *TransactionEvent { + s.EventType = &v + return s +} + +// SetFrom sets the From field's value. +func (s *TransactionEvent) SetFrom(v string) *TransactionEvent { + s.From = &v + return s +} + +// SetNetwork sets the Network field's value. +func (s *TransactionEvent) SetNetwork(v string) *TransactionEvent { + s.Network = &v + return s +} + +// SetTo sets the To field's value. +func (s *TransactionEvent) SetTo(v string) *TransactionEvent { + s.To = &v + return s +} + +// SetTokenId sets the TokenId field's value. +func (s *TransactionEvent) SetTokenId(v string) *TransactionEvent { + s.TokenId = &v + return s +} + +// SetTransactionHash sets the TransactionHash field's value. +func (s *TransactionEvent) SetTransactionHash(v string) *TransactionEvent { + s.TransactionHash = &v + return s +} + +// SetTransactionId sets the TransactionId field's value. +func (s *TransactionEvent) SetTransactionId(v string) *TransactionEvent { + s.TransactionId = &v + return s +} + +// SetValue sets the Value field's value. +func (s *TransactionEvent) SetValue(v string) *TransactionEvent { + s.Value = &v + return s +} + +// SetVoutIndex sets the VoutIndex field's value. +func (s *TransactionEvent) SetVoutIndex(v int64) *TransactionEvent { + s.VoutIndex = &v + return s +} + +// The container of the transaction output. +type TransactionOutputItem struct { + _ struct{} `type:"structure"` + + // The blockchain network where the transaction occurred. + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + + // The hash of the transaction. It is generated whenever a transaction is verified + // and added to the blockchain. + // + // TransactionHash is a required field + TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` + + // The time when the transaction occurred. + // + // TransactionTimestamp is a required field + TransactionTimestamp *time.Time `locationName:"transactionTimestamp" type:"timestamp" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionOutputItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionOutputItem) GoString() string { + return s.String() +} + +// SetNetwork sets the Network field's value. +func (s *TransactionOutputItem) SetNetwork(v string) *TransactionOutputItem { + s.Network = &v + return s +} + +// SetTransactionHash sets the TransactionHash field's value. +func (s *TransactionOutputItem) SetTransactionHash(v string) *TransactionOutputItem { + s.TransactionHash = &v + return s +} + +// SetTransactionTimestamp sets the TransactionTimestamp field's value. +func (s *TransactionOutputItem) SetTransactionTimestamp(v time.Time) *TransactionOutputItem { + s.TransactionTimestamp = &v + return s +} + +// The resource passed is invalid. +type ValidationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The container for the fieldList of the exception. + FieldList []*ValidationExceptionField `locationName:"fieldList" type:"list"` + + // The container for the exception message. + Message_ *string `locationName:"message" min:"1" type:"string"` + + // The container for the reason for the exception + // + // Reason is a required field + Reason *string `locationName:"reason" type:"string" required:"true" enum:"ValidationExceptionReason"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidationException) GoString() string { + return s.String() +} + +func newErrorValidationException(v protocol.ResponseMetadata) error { + return &ValidationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ValidationException) Code() string { + return "ValidationException" +} + +// Message returns the exception's message. +func (s *ValidationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ValidationException) OrigErr() error { + return nil +} + +func (s *ValidationException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The resource passed is invalid. +type ValidationExceptionField struct { + _ struct{} `type:"structure"` + + // The ValidationException message. + // + // Message is a required field + Message *string `locationName:"message" type:"string" required:"true"` + + // The name of the field that triggered the ValidationException. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidationExceptionField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ValidationExceptionField) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *ValidationExceptionField) SetMessage(v string) *ValidationExceptionField { + s.Message = &v + return s +} + +// SetName sets the Name field's value. +func (s *ValidationExceptionField) SetName(v string) *ValidationExceptionField { + s.Name = &v + return s +} + +const ( + // ErrorTypeValidationException is a ErrorType enum value + ErrorTypeValidationException = "VALIDATION_EXCEPTION" + + // ErrorTypeResourceNotFoundException is a ErrorType enum value + ErrorTypeResourceNotFoundException = "RESOURCE_NOT_FOUND_EXCEPTION" +) + +// ErrorType_Values returns all elements of the ErrorType enum +func ErrorType_Values() []string { + return []string{ + ErrorTypeValidationException, + ErrorTypeResourceNotFoundException, + } +} + +const ( + // ListTransactionsSortByTransactionTimestamp is a ListTransactionsSortBy enum value + ListTransactionsSortByTransactionTimestamp = "TRANSACTION_TIMESTAMP" +) + +// ListTransactionsSortBy_Values returns all elements of the ListTransactionsSortBy enum +func ListTransactionsSortBy_Values() []string { + return []string{ + ListTransactionsSortByTransactionTimestamp, + } +} + +const ( + // QueryNetworkEthereumMainnet is a QueryNetwork enum value + QueryNetworkEthereumMainnet = "ETHEREUM_MAINNET" + + // QueryNetworkBitcoinMainnet is a QueryNetwork enum value + QueryNetworkBitcoinMainnet = "BITCOIN_MAINNET" +) + +// QueryNetwork_Values returns all elements of the QueryNetwork enum +func QueryNetwork_Values() []string { + return []string{ + QueryNetworkEthereumMainnet, + QueryNetworkBitcoinMainnet, + } +} + +const ( + // QueryTransactionEventTypeErc20Transfer is a QueryTransactionEventType enum value + QueryTransactionEventTypeErc20Transfer = "ERC20_TRANSFER" + + // QueryTransactionEventTypeErc20Mint is a QueryTransactionEventType enum value + QueryTransactionEventTypeErc20Mint = "ERC20_MINT" + + // QueryTransactionEventTypeErc20Burn is a QueryTransactionEventType enum value + QueryTransactionEventTypeErc20Burn = "ERC20_BURN" + + // QueryTransactionEventTypeErc20Deposit is a QueryTransactionEventType enum value + QueryTransactionEventTypeErc20Deposit = "ERC20_DEPOSIT" + + // QueryTransactionEventTypeErc20Withdrawal is a QueryTransactionEventType enum value + QueryTransactionEventTypeErc20Withdrawal = "ERC20_WITHDRAWAL" + + // QueryTransactionEventTypeErc721Transfer is a QueryTransactionEventType enum value + QueryTransactionEventTypeErc721Transfer = "ERC721_TRANSFER" + + // QueryTransactionEventTypeErc1155Transfer is a QueryTransactionEventType enum value + QueryTransactionEventTypeErc1155Transfer = "ERC1155_TRANSFER" + + // QueryTransactionEventTypeBitcoinVin is a QueryTransactionEventType enum value + QueryTransactionEventTypeBitcoinVin = "BITCOIN_VIN" + + // QueryTransactionEventTypeBitcoinVout is a QueryTransactionEventType enum value + QueryTransactionEventTypeBitcoinVout = "BITCOIN_VOUT" + + // QueryTransactionEventTypeInternalEthTransfer is a QueryTransactionEventType enum value + QueryTransactionEventTypeInternalEthTransfer = "INTERNAL_ETH_TRANSFER" + + // QueryTransactionEventTypeEthTransfer is a QueryTransactionEventType enum value + QueryTransactionEventTypeEthTransfer = "ETH_TRANSFER" +) + +// QueryTransactionEventType_Values returns all elements of the QueryTransactionEventType enum +func QueryTransactionEventType_Values() []string { + return []string{ + QueryTransactionEventTypeErc20Transfer, + QueryTransactionEventTypeErc20Mint, + QueryTransactionEventTypeErc20Burn, + QueryTransactionEventTypeErc20Deposit, + QueryTransactionEventTypeErc20Withdrawal, + QueryTransactionEventTypeErc721Transfer, + QueryTransactionEventTypeErc1155Transfer, + QueryTransactionEventTypeBitcoinVin, + QueryTransactionEventTypeBitcoinVout, + QueryTransactionEventTypeInternalEthTransfer, + QueryTransactionEventTypeEthTransfer, + } +} + +const ( + // QueryTransactionStatusFinal is a QueryTransactionStatus enum value + QueryTransactionStatusFinal = "FINAL" + + // QueryTransactionStatusFailed is a QueryTransactionStatus enum value + QueryTransactionStatusFailed = "FAILED" +) + +// QueryTransactionStatus_Values returns all elements of the QueryTransactionStatus enum +func QueryTransactionStatus_Values() []string { + return []string{ + QueryTransactionStatusFinal, + QueryTransactionStatusFailed, + } +} + +const ( + // ResourceTypeCollection is a ResourceType enum value + ResourceTypeCollection = "collection" +) + +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeCollection, + } +} + +const ( + // SortOrderAscending is a SortOrder enum value + SortOrderAscending = "ASCENDING" + + // SortOrderDescending is a SortOrder enum value + SortOrderDescending = "DESCENDING" +) + +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAscending, + SortOrderDescending, + } +} + +const ( + // ValidationExceptionReasonUnknownOperation is a ValidationExceptionReason enum value + ValidationExceptionReasonUnknownOperation = "unknownOperation" + + // ValidationExceptionReasonCannotParse is a ValidationExceptionReason enum value + ValidationExceptionReasonCannotParse = "cannotParse" + + // ValidationExceptionReasonFieldValidationFailed is a ValidationExceptionReason enum value + ValidationExceptionReasonFieldValidationFailed = "fieldValidationFailed" + + // ValidationExceptionReasonOther is a ValidationExceptionReason enum value + ValidationExceptionReasonOther = "other" +) + +// ValidationExceptionReason_Values returns all elements of the ValidationExceptionReason enum +func ValidationExceptionReason_Values() []string { + return []string{ + ValidationExceptionReasonUnknownOperation, + ValidationExceptionReasonCannotParse, + ValidationExceptionReasonFieldValidationFailed, + ValidationExceptionReasonOther, + } +} diff --git a/service/managedblockchainquery/doc.go b/service/managedblockchainquery/doc.go new file mode 100644 index 00000000000..4bda751321e --- /dev/null +++ b/service/managedblockchainquery/doc.go @@ -0,0 +1,36 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package managedblockchainquery provides the client and types for making API +// requests to Amazon Managed Blockchain Query. +// +// Amazon Managed Blockchain (AMB) Query provides you with convenient access +// to multi-blockchain network data, which makes it easier for you to extract +// contextual data related to blockchain activity. You can use AMB Query to +// read data from public blockchain networks, such as Bitcoin Mainnet and Ethereum +// Mainnet. You can also get information such as the current and historical +// balances of addresses, or you can get a list of blockchain transactions for +// a given time period. Additionally, you can get details of a given transaction, +// such as transaction events, which you can further analyze or use in business +// logic for your applications. +// +// See https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04 for more information on this service. +// +// See managedblockchainquery package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/managedblockchainquery/ +// +// # Using the Client +// +// To contact Amazon Managed Blockchain Query with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Managed Blockchain Query client ManagedBlockchainQuery for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/managedblockchainquery/#New +package managedblockchainquery diff --git a/service/managedblockchainquery/errors.go b/service/managedblockchainquery/errors.go new file mode 100644 index 00000000000..065a1290219 --- /dev/null +++ b/service/managedblockchainquery/errors.go @@ -0,0 +1,58 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package managedblockchainquery + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // The Amazon Web Services account doesn’t have access to this resource. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // The request processing has failed because of an internal error in the service. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource was not found. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeServiceQuotaExceededException for service response error code + // "ServiceQuotaExceededException". + // + // The service quota has been exceeded for this resource. + ErrCodeServiceQuotaExceededException = "ServiceQuotaExceededException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request or operation couldn't be performed because a service is throttling + // requests. The most common source of throttling errors is when you create + // resources that exceed your service limit for this resource type. Request + // a limit increase or delete unused resources, if possible. + ErrCodeThrottlingException = "ThrottlingException" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // The resource passed is invalid. + ErrCodeValidationException = "ValidationException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "InternalServerException": newErrorInternalServerException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ServiceQuotaExceededException": newErrorServiceQuotaExceededException, + "ThrottlingException": newErrorThrottlingException, + "ValidationException": newErrorValidationException, +} diff --git a/service/managedblockchainquery/managedblockchainqueryiface/interface.go b/service/managedblockchainquery/managedblockchainqueryiface/interface.go new file mode 100644 index 00000000000..50434cbca7d --- /dev/null +++ b/service/managedblockchainquery/managedblockchainqueryiface/interface.go @@ -0,0 +1,97 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package managedblockchainqueryiface provides an interface to enable mocking the Amazon Managed Blockchain Query service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package managedblockchainqueryiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/managedblockchainquery" +) + +// ManagedBlockchainQueryAPI provides an interface to enable mocking the +// managedblockchainquery.ManagedBlockchainQuery service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon Managed Blockchain Query. +// func myFunc(svc managedblockchainqueryiface.ManagedBlockchainQueryAPI) bool { +// // Make svc.BatchGetTokenBalance request +// } +// +// func main() { +// sess := session.New() +// svc := managedblockchainquery.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockManagedBlockchainQueryClient struct { +// managedblockchainqueryiface.ManagedBlockchainQueryAPI +// } +// func (m *mockManagedBlockchainQueryClient) BatchGetTokenBalance(input *managedblockchainquery.BatchGetTokenBalanceInput) (*managedblockchainquery.BatchGetTokenBalanceOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockManagedBlockchainQueryClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type ManagedBlockchainQueryAPI interface { + BatchGetTokenBalance(*managedblockchainquery.BatchGetTokenBalanceInput) (*managedblockchainquery.BatchGetTokenBalanceOutput, error) + BatchGetTokenBalanceWithContext(aws.Context, *managedblockchainquery.BatchGetTokenBalanceInput, ...request.Option) (*managedblockchainquery.BatchGetTokenBalanceOutput, error) + BatchGetTokenBalanceRequest(*managedblockchainquery.BatchGetTokenBalanceInput) (*request.Request, *managedblockchainquery.BatchGetTokenBalanceOutput) + + GetTokenBalance(*managedblockchainquery.GetTokenBalanceInput) (*managedblockchainquery.GetTokenBalanceOutput, error) + GetTokenBalanceWithContext(aws.Context, *managedblockchainquery.GetTokenBalanceInput, ...request.Option) (*managedblockchainquery.GetTokenBalanceOutput, error) + GetTokenBalanceRequest(*managedblockchainquery.GetTokenBalanceInput) (*request.Request, *managedblockchainquery.GetTokenBalanceOutput) + + GetTransaction(*managedblockchainquery.GetTransactionInput) (*managedblockchainquery.GetTransactionOutput, error) + GetTransactionWithContext(aws.Context, *managedblockchainquery.GetTransactionInput, ...request.Option) (*managedblockchainquery.GetTransactionOutput, error) + GetTransactionRequest(*managedblockchainquery.GetTransactionInput) (*request.Request, *managedblockchainquery.GetTransactionOutput) + + ListTokenBalances(*managedblockchainquery.ListTokenBalancesInput) (*managedblockchainquery.ListTokenBalancesOutput, error) + ListTokenBalancesWithContext(aws.Context, *managedblockchainquery.ListTokenBalancesInput, ...request.Option) (*managedblockchainquery.ListTokenBalancesOutput, error) + ListTokenBalancesRequest(*managedblockchainquery.ListTokenBalancesInput) (*request.Request, *managedblockchainquery.ListTokenBalancesOutput) + + ListTokenBalancesPages(*managedblockchainquery.ListTokenBalancesInput, func(*managedblockchainquery.ListTokenBalancesOutput, bool) bool) error + ListTokenBalancesPagesWithContext(aws.Context, *managedblockchainquery.ListTokenBalancesInput, func(*managedblockchainquery.ListTokenBalancesOutput, bool) bool, ...request.Option) error + + ListTransactionEvents(*managedblockchainquery.ListTransactionEventsInput) (*managedblockchainquery.ListTransactionEventsOutput, error) + ListTransactionEventsWithContext(aws.Context, *managedblockchainquery.ListTransactionEventsInput, ...request.Option) (*managedblockchainquery.ListTransactionEventsOutput, error) + ListTransactionEventsRequest(*managedblockchainquery.ListTransactionEventsInput) (*request.Request, *managedblockchainquery.ListTransactionEventsOutput) + + ListTransactionEventsPages(*managedblockchainquery.ListTransactionEventsInput, func(*managedblockchainquery.ListTransactionEventsOutput, bool) bool) error + ListTransactionEventsPagesWithContext(aws.Context, *managedblockchainquery.ListTransactionEventsInput, func(*managedblockchainquery.ListTransactionEventsOutput, bool) bool, ...request.Option) error + + ListTransactions(*managedblockchainquery.ListTransactionsInput) (*managedblockchainquery.ListTransactionsOutput, error) + ListTransactionsWithContext(aws.Context, *managedblockchainquery.ListTransactionsInput, ...request.Option) (*managedblockchainquery.ListTransactionsOutput, error) + ListTransactionsRequest(*managedblockchainquery.ListTransactionsInput) (*request.Request, *managedblockchainquery.ListTransactionsOutput) + + ListTransactionsPages(*managedblockchainquery.ListTransactionsInput, func(*managedblockchainquery.ListTransactionsOutput, bool) bool) error + ListTransactionsPagesWithContext(aws.Context, *managedblockchainquery.ListTransactionsInput, func(*managedblockchainquery.ListTransactionsOutput, bool) bool, ...request.Option) error +} + +var _ ManagedBlockchainQueryAPI = (*managedblockchainquery.ManagedBlockchainQuery)(nil) diff --git a/service/managedblockchainquery/service.go b/service/managedblockchainquery/service.go new file mode 100644 index 00000000000..f99de1e145c --- /dev/null +++ b/service/managedblockchainquery/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package managedblockchainquery + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// ManagedBlockchainQuery provides the API operation methods for making requests to +// Amazon Managed Blockchain Query. See this package's package overview docs +// for details on the service. +// +// ManagedBlockchainQuery methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type ManagedBlockchainQuery struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "ManagedBlockchain Query" // Name of service. + EndpointsID = "managedblockchain-query" // ID to lookup a service endpoint with. + ServiceID = "ManagedBlockchain Query" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the ManagedBlockchainQuery client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a ManagedBlockchainQuery client from just a session. +// svc := managedblockchainquery.New(mySession) +// +// // Create a ManagedBlockchainQuery client with additional configuration +// svc := managedblockchainquery.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ManagedBlockchainQuery { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "managedblockchain-query" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *ManagedBlockchainQuery { + svc := &ManagedBlockchainQuery{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2023-05-04", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ManagedBlockchainQuery operation and runs any +// custom request initialization. +func (c *ManagedBlockchainQuery) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/service/mediaconvert/api.go b/service/mediaconvert/api.go index 02e0627d0e2..c0e41b5728a 100644 --- a/service/mediaconvert/api.go +++ b/service/mediaconvert/api.go @@ -2791,12 +2791,11 @@ func (c *MediaConvert) UpdateQueueWithContext(ctx aws.Context, input *UpdateQueu return out, req.Send() } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value AAC. The service accepts one of two mutually exclusive groups of -// AAC settings--VBR and CBR. To select one of these modes, set the value of -// Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you -// control the audio quality with the setting VBR quality (vbrQuality). In CBR -// mode, you use the setting Bitrate (bitrate). Defaults and valid values depend +// Required when you set Codec to the value AAC. The service accepts one of +// two mutually exclusive groups of AAC settings--VBR and CBR. To select one +// of these modes, set the value of Bitrate control mode to "VBR" or "CBR". +// In VBR mode, you control the audio quality with the setting VBR quality. +// In CBR mode, you use the setting Bitrate. Defaults and valid values depend // on the rate control mode. type AacSettings struct { _ struct{} `type:"structure"` @@ -2817,9 +2816,8 @@ type AacSettings struct { // 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, // 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, // 768000, 896000, 1024000. The value you set is also constrained by the values - // that you choose for Profile (codecProfile), Bitrate control mode (codingMode), - // and Sample rate (sampleRate). Default values depend on Bitrate control mode - // and Profile. + // that you choose for Profile, Bitrate control mode, and Sample rate. Default + // values depend on Bitrate control mode and Profile. Bitrate *int64 `locationName:"bitrate" min:"6000" type:"integer"` // AAC Profile. @@ -2948,8 +2946,7 @@ func (s *AacSettings) SetVbrQuality(v string) *AacSettings { return s } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value AC3. +// Required when you set Codec to the value AC3. type Ac3Settings struct { _ struct{} `type:"structure"` @@ -2978,29 +2975,28 @@ type Ac3Settings struct { // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert - // ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). - // For information about the Dolby Digital DRC operating modes and profiles, - // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. + // ignores any value you provide for Dynamic range compression profile. For + // information about the Dolby Digital DRC operating modes and profiles, see + // the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Ac3DynamicRangeCompressionLine"` // When you want to add Dolby dynamic range compression (DRC) signaling to your // output stream, we recommend that you use the mode-specific settings instead - // of Dynamic range compression profile (DynamicRangeCompressionProfile). The - // mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) - // and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). - // Note that when you specify values for all three settings, MediaConvert ignores - // the value of this setting in favor of the mode-specific settings. If you - // do use this setting instead of the mode-specific settings, choose None (NONE) - // to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) - // to set the profile to Dolby's film standard profile for all operating modes. + // of Dynamic range compression profile. The mode-specific settings are Dynamic + // range compression profile, line mode and Dynamic range compression profile, + // RF mode. Note that when you specify values for all three settings, MediaConvert + // ignores the value of this setting in favor of the mode-specific settings. + // If you do use this setting instead of the mode-specific settings, choose + // None to leave out DRC signaling. Keep the default Film standard to set the + // profile to Dolby's film standard profile for all operating modes. DynamicRangeCompressionProfile *string `locationName:"dynamicRangeCompressionProfile" type:"string" enum:"Ac3DynamicRangeCompressionProfile"` // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any - // value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). - // For information about the Dolby Digital DRC operating modes and profiles, - // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. + // value you provide for Dynamic range compression profile. For information + // about the Dolby Digital DRC operating modes and profiles, see the Dynamic + // Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Ac3DynamicRangeCompressionRf"` // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only @@ -3213,13 +3209,12 @@ func (s *AdvancedInputFilterSettings) SetSharpening(v string) *AdvancedInputFilt return s } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value AIFF. +// Required when you set Codec to the value AIFF. type AiffSettings struct { _ struct{} `type:"structure"` - // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding - // quality for this audio track. + // Specify Bit depth, in bits per sample, to choose the encoding quality for + // this audio track. BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` // Specify the number of channels in this output audio track. Valid values are @@ -3363,10 +3358,10 @@ type AncillarySourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both - // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes - // the captions data in two ways: it passes the 608 data through using the 608 - // compatibility bytes fields of the 708 wrapper, and it also translates the - // 608 data into 708. + // 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions + // data in two ways: it passes the 608 data through using the 608 compatibility + // bytes fields of the 708 wrapper, and it also translates the 608 data into + // 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"AncillaryConvert608To708"` // Specifies the 608 channel number in the ancillary data track from which to @@ -3547,43 +3542,37 @@ func (s *AudioChannelTaggingSettings) SetChannelTag(v string) *AudioChannelTaggi type AudioCodecSettings struct { _ struct{} `type:"structure"` - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value AAC. The service accepts one of two mutually exclusive groups of - // AAC settings--VBR and CBR. To select one of these modes, set the value of - // Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you - // control the audio quality with the setting VBR quality (vbrQuality). In CBR - // mode, you use the setting Bitrate (bitrate). Defaults and valid values depend + // Required when you set Codec to the value AAC. The service accepts one of + // two mutually exclusive groups of AAC settings--VBR and CBR. To select one + // of these modes, set the value of Bitrate control mode to "VBR" or "CBR". + // In VBR mode, you control the audio quality with the setting VBR quality. + // In CBR mode, you use the setting Bitrate. Defaults and valid values depend // on the rate control mode. AacSettings *AacSettings `locationName:"aacSettings" type:"structure"` - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value AC3. + // Required when you set Codec to the value AC3. Ac3Settings *Ac3Settings `locationName:"ac3Settings" type:"structure"` - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value AIFF. + // Required when you set Codec to the value AIFF. AiffSettings *AiffSettings `locationName:"aiffSettings" type:"structure"` // Choose the audio codec for this output. Note that the option Dolby Digital - // passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital - // Plus audio inputs. Make sure that you choose a codec that's supported with - // your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio + // passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. + // Make sure that you choose a codec that's supported with your output container: + // https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio // For audio-only outputs, make sure that both your input audio codec and your // output audio codec are supported for audio-only workflows. For more information, // see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only // and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output Codec *string `locationName:"codec" type:"string" enum:"AudioCodec"` - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value EAC3_ATMOS. + // Required when you set Codec to the value EAC3_ATMOS. Eac3AtmosSettings *Eac3AtmosSettings `locationName:"eac3AtmosSettings" type:"structure"` - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value EAC3. + // Required when you set Codec to the value EAC3. Eac3Settings *Eac3Settings `locationName:"eac3Settings" type:"structure"` - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value MP2. + // Required when you set Codec to the value MP2. Mp2Settings *Mp2Settings `locationName:"mp2Settings" type:"structure"` // Required when you set Codec, under AudioDescriptions>CodecSettings, to the @@ -3598,8 +3587,7 @@ type AudioCodecSettings struct { // value Vorbis. VorbisSettings *VorbisSettings `locationName:"vorbisSettings" type:"structure"` - // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to - // the value WAV. + // Required when you set Codec to the value WAV. WavSettings *WavSettings `locationName:"wavSettings" type:"structure"` } @@ -3799,14 +3787,14 @@ type AudioDescription struct { CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` // Specify the language for this audio output track. The service puts this language - // code into your output audio track when you set Language code control (AudioLanguageCodeControl) - // to Use configured (USE_CONFIGURED). The service also uses your specified - // custom language code when you set Language code control (AudioLanguageCodeControl) - // to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language - // code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming - // outputs, you can also use any other code in the full RFC-5646 specification. - // Streaming outputs are those that are in one of the following output groups: - // CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming. + // code into your output audio track when you set Language code control to Use + // configured. The service also uses your specified custom language code when + // you set Language code control to Follow input, but your input file doesn't + // specify a language code. For all outputs, you can use an ISO 639-2 or ISO + // 639-3 code. For streaming outputs, you can also use any other code in the + // full RFC-5646 specification. Streaming outputs are those that are in one + // of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth + // Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Indicates the language of the audio output track. The ISO 639 language specified @@ -3816,11 +3804,11 @@ type AudioDescription struct { LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` // Specify which source for language code takes precedence for this audio track. - // When you choose Follow input (FOLLOW_INPUT), the service uses the language - // code from the input track if it's present. If there's no languge code on - // the input track, the service uses the code that you specify in the setting - // Language code (languageCode or customLanguageCode). When you choose Use configured - // (USE_CONFIGURED), the service uses the language code that you specify. + // When you choose Follow input, the service uses the language code from the + // input track if it's present. If there's no languge code on the input track, + // the service uses the code that you specify in the setting Language code. + // When you choose Use configured, the service uses the language code that you + // specify. LanguageCodeControl *string `locationName:"languageCodeControl" type:"string" enum:"AudioLanguageCodeControl"` // Advanced audio remixing settings. @@ -3974,11 +3962,11 @@ type AudioNormalizationSettings struct { // track loudness. PeakCalculation *string `locationName:"peakCalculation" type:"string" enum:"AudioNormalizationPeakCalculation"` - // When you use Audio normalization (AudioNormalizationSettings), optionally - // use this setting to specify a target loudness. If you don't specify a value - // here, the encoder chooses a value for you, based on the algorithm that you - // choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder - // will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS. + // When you use Audio normalization, optionally use this setting to specify + // a target loudness. If you don't specify a value here, the encoder chooses + // a value for you, based on the algorithm that you choose for Algorithm. If + // you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, + // the encoder will choose -23 LKFS. TargetLkfs *float64 `locationName:"targetLkfs" type:"double"` // Specify the True-peak limiter threshold in decibels relative to full scale @@ -4061,9 +4049,8 @@ func (s *AudioNormalizationSettings) SetTruePeakLimiterThreshold(v float64) *Aud return s } -// Use Audio selectors (AudioSelectors) to specify a track or set of tracks -// from the input that you will use in your outputs. You can use multiple Audio -// selectors per input. +// Use Audio selectors to specify a track or set of tracks from the input that +// you will use in your outputs. You can use multiple Audio selectors per input. type AudioSelector struct { _ struct{} `type:"structure"` @@ -4120,11 +4107,10 @@ type AudioSelector struct { // extract specific program data from the track. To select multiple programs, // create multiple selectors with the same Track and different Program numbers. // In the console, this setting is visible when you set Selector type to Track. - // Choose the program number from the dropdown list. If you are sending a JSON - // file, provide the program ID, which is part of the audio metadata. If your - // input file has incorrect metadata, you can choose All channels instead of - // a program number to have the service ignore the program IDs and include all - // the programs in the track. + // Choose the program number from the dropdown list. If your input file has + // incorrect metadata, you can choose All channels instead of a program number + // to have the service ignore the program IDs and include all the programs in + // the track. ProgramSelection *int64 `locationName:"programSelection" type:"integer"` // Use these settings to reorder the audio channels of one input to match those @@ -4138,9 +4124,7 @@ type AudioSelector struct { // Identify a track from the input audio to include in this selector by entering // the track index number. To include several tracks in a single audio selector, // specify multiple tracks as follows. Using the console, enter a comma-separated - // list. For examle, type "1,2,3" to include tracks 1 through 3. Specifying - // directly in your JSON job file, provide the track numbers in an array. For - // example, "tracks": [1,2,3]. + // list. For example, type "1,2,3" to include tracks 1 through 3. Tracks []*int64 `locationName:"tracks" type:"list"` } @@ -4256,10 +4240,9 @@ func (s *AudioSelector) SetTracks(v []*int64) *AudioSelector { } // Use audio selector groups to combine multiple sidecar audio inputs so that -// you can assign them to a single output audio tab (AudioDescription). Note -// that, if you're working with embedded audio, it's simpler to assign multiple -// input tracks into a single audio selector rather than use an audio selector -// group. +// you can assign them to a single output audio tab. Note that, if you're working +// with embedded audio, it's simpler to assign multiple input tracks into a +// single audio selector rather than use an audio selector group. type AudioSelectorGroup struct { _ struct{} `type:"structure"` @@ -4607,21 +4590,21 @@ func (s *AutomatedEncodingSettings) SetAbrSettings(v *AutomatedAbrSettings) *Aut } // Settings for quality-defined variable bitrate encoding with the AV1 codec. -// Use these settings only when you set QVBR for Rate control mode (RateControlMode). +// Use these settings only when you set QVBR for Rate control mode. type Av1QvbrSettings struct { _ struct{} `type:"structure"` - // Use this setting only when you set Rate control mode (RateControlMode) to - // QVBR. Specify the target quality level for this output. MediaConvert determines - // the right number of bits to use for each part of the video to maintain the - // video quality that you specify. When you keep the default value, AUTO, MediaConvert - // picks a quality level for you, based on characteristics of your input video. - // If you prefer to specify a quality level, specify a number from 1 through - // 10. Use higher numbers for greater quality. Level 10 results in nearly lossless - // compression. The quality level for most broadcast-quality transcodes is between - // 6 and 9. Optionally, to specify a value between whole numbers, also provide - // a value for the setting qvbrQualityLevelFineTune. For example, if you want - // your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune + // Use this setting only when you set Rate control mode to QVBR. Specify the + // target quality level for this output. MediaConvert determines the right number + // of bits to use for each part of the video to maintain the video quality that + // you specify. When you keep the default value, AUTO, MediaConvert picks a + // quality level for you, based on characteristics of your input video. If you + // prefer to specify a quality level, specify a number from 1 through 10. Use + // higher numbers for greater quality. Level 10 results in nearly lossless compression. + // The quality level for most broadcast-quality transcodes is between 6 and + // 9. Optionally, to specify a value between whole numbers, also provide a value + // for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR + // quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune // to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` @@ -4683,24 +4666,18 @@ type Av1Settings struct { _ struct{} `type:"structure"` // Specify the strength of any adaptive quantization filters that you enable. - // The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization). + // The value that you choose here applies to Spatial adaptive quantization. AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Av1AdaptiveQuantization"` - // Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit - // (BIT_10). + // Specify the Bit depth. You can choose 8-bit or 10-bit. BitDepth *string `locationName:"bitDepth" type:"string" enum:"Av1BitDepth"` - // If you are using the console, use the Framerate setting to specify the frame - // rate for this output. If you want to keep the same frame rate as the input - // video, choose Follow source. If you want to do frame rate conversion, choose - // a frame rate from the dropdown list or choose Custom. The framerates shown - // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Use the Framerate setting to specify the frame rate for this output. If you + // want to keep the same frame rate as the input video, choose Follow source. + // If you want to do frame rate conversion, choose a frame rate from the dropdown + // list or choose Custom. The framerates shown in the dropdown list are decimal + // approximations of fractions. If you choose Custom, specify your frame rate + // as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Av1FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -4748,7 +4725,7 @@ type Av1Settings struct { NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Settings for quality-defined variable bitrate encoding with the H.265 codec. - // Use these settings only when you set QVBR for Rate control mode (RateControlMode). + // Use these settings only when you set QVBR for Rate control mode. QvbrSettings *Av1QvbrSettings `locationName:"qvbrSettings" type:"structure"` // 'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined @@ -4761,21 +4738,20 @@ type Av1Settings struct { // be less than or equal to half the number of macroblock rows. Slices *int64 `locationName:"slices" min:"1" type:"integer"` - // Keep the default value, Enabled (ENABLED), to adjust quantization within - // each frame based on spatial variation of content complexity. When you enable - // this feature, the encoder uses fewer bits on areas that can sustain more - // distortion with no noticeable visual degradation and uses more bits on areas - // where any small distortion will be noticeable. For example, complex textured - // blocks are encoded with fewer bits and smooth textured blocks are encoded - // with more bits. Enabling this feature will almost always improve your video - // quality. Note, though, that this feature doesn't take into account where - // the viewer's attention is likely to be. If viewers are likely to be focusing - // their attention on a part of the screen with a lot of complex texture, you - // might choose to disable this feature. Related setting: When you enable spatial - // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) - // depending on your content. For homogeneous content, such as cartoons and - // video games, set it to Low. For content with a wider variety of textures, - // set it to High or Higher. + // Keep the default value, Enabled, to adjust quantization within each frame + // based on spatial variation of content complexity. When you enable this feature, + // the encoder uses fewer bits on areas that can sustain more distortion with + // no noticeable visual degradation and uses more bits on areas where any small + // distortion will be noticeable. For example, complex textured blocks are encoded + // with fewer bits and smooth textured blocks are encoded with more bits. Enabling + // this feature will almost always improve your video quality. Note, though, + // that this feature doesn't take into account where the viewer's attention + // is likely to be. If viewers are likely to be focusing their attention on + // a part of the screen with a lot of complex texture, you might choose to disable + // this feature. Related setting: When you enable spatial adaptive quantization, + // set the value for Adaptive quantization depending on your content. For homogeneous + // content, such as cartoons and video games, set it to Low. For content with + // a wider variety of textures, set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Av1SpatialAdaptiveQuantization"` } @@ -4965,8 +4941,8 @@ type AvcIntraSettings struct { // subsampling. AvcIntraClass *string `locationName:"avcIntraClass" type:"string" enum:"AvcIntraClass"` - // Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). - // When you set AVC-Intra class to a different value, this object isn't allowed. + // Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra + // class to a different value, this object isn't allowed. AvcIntraUhdSettings *AvcIntraUhdSettings `locationName:"avcIntraUhdSettings" type:"structure"` // If you are using the console, use the Framerate setting to specify the frame @@ -4974,12 +4950,7 @@ type AvcIntraSettings struct { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Custom, specify your frame rate as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"AvcIntraFramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -5012,32 +4983,30 @@ type AvcIntraSettings struct { FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type - // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) - // to produce outputs with the same field polarity as the source. For jobs that - // have multiple inputs, the output field polarity might change over the course - // of the output. Follow behavior depends on the input scan type. If the source - // is interlaced, the output will be interlaced with the same polarity as the - // source. If the source is progressive, the output will be interlaced with - // top field bottom field first, depending on which of the Follow options you - // choose. + // to create a progressive output, regardless of the scan type of your input. + // Use Top field first or Bottom field first to create an output that's interlaced + // with the same field polarity throughout. Use Follow, default top or Follow, + // default bottom to produce outputs with the same field polarity as the source. + // For jobs that have multiple inputs, the output field polarity might change + // over the course of the output. Follow behavior depends on the input scan + // type. If the source is interlaced, the output will be interlaced with the + // same polarity as the source. If the source is progressive, the output will + // be interlaced with top field bottom field first, depending on which of the + // Follow options you choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"AvcIntraInterlaceMode"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced - // field in the output. Keep the default value, Basic interlacing (INTERLACED), - // for all other output frame rates. With basic interlacing, MediaConvert performs - // any frame rate conversion first and then interlaces the frames. When you - // choose Optimized interlacing and you set your output frame rate to a value - // that isn't suitable for optimized interlacing, MediaConvert automatically - // falls back to basic interlacing. Required settings: To use optimized interlacing, - // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't - // use optimized interlacing for hard telecine outputs. You must also set Interlace - // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // to create a better quality interlaced output. In this case, each progressive + // frame from the input corresponds to an interlaced field in the output. Keep + // the default value, Basic interlacing, for all other output frame rates. With + // basic interlacing, MediaConvert performs any frame rate conversion first + // and then interlaces the frames. When you choose Optimized interlacing and + // you set your output frame rate to a value that isn't suitable for optimized + // interlacing, MediaConvert automatically falls back to basic interlacing. + // Required settings: To use optimized interlacing, you must set Telecine to + // None or Soft. You can't use optimized interlacing for hard telecine outputs. + // You must also set Interlace mode to a value other than Progressive. ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"AvcIntraScanTypeConversionMode"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per @@ -5045,17 +5014,14 @@ type AvcIntraSettings struct { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: - // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) - // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to - // 1. + // You must also set Framerate to 25. SlowPal *string `locationName:"slowPal" type:"string" enum:"AvcIntraSlowPal"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable - // hard telecine (HARD) to create a smoother picture. When you keep the default - // value, None (NONE), MediaConvert does a standard frame rate conversion to - // 29.97 without doing anything with the field polarity to create a smoother - // picture. + // hard telecine to create a smoother picture. When you keep the default value, + // None, MediaConvert does a standard frame rate conversion to 29.97 without + // doing anything with the field polarity to create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"AvcIntraTelecine"` } @@ -5153,17 +5119,17 @@ func (s *AvcIntraSettings) SetTelecine(v string) *AvcIntraSettings { return s } -// Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). -// When you set AVC-Intra class to a different value, this object isn't allowed. +// Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra +// class to a different value, this object isn't allowed. type AvcIntraUhdSettings struct { _ struct{} `type:"structure"` - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how many - // transcoding passes MediaConvert does with your video. When you choose Multi-pass - // (MULTI_PASS), your video quality is better and your output bitrate is more - // accurate. That is, the actual bitrate of your output is closer to the target - // bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), - // your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS). + // Optional. Use Quality tuning level to choose how many transcoding passes + // MediaConvert does with your video. When you choose Multi-pass, your video + // quality is better and your output bitrate is more accurate. That is, the + // actual bitrate of your output is closer to the target bitrate defined in + // the specification. When you choose Single-pass, your encoding time is faster. + // The default behavior is Single-pass. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"AvcIntraUhdQualityTuningLevel"` } @@ -5315,8 +5281,6 @@ func (s *BandwidthReductionFilter) SetStrength(v string) *BandwidthReductionFilt // writes the captions directly on your video frames, replacing pixels of video // content with the captions. Set up burn-in captions in the same output as // your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set destinationType to BURN_IN. type BurninDestinationSettings struct { _ struct{} `type:"structure"` @@ -5328,26 +5292,24 @@ type BurninDestinationSettings struct { // centered) relative to those coordinates. Alignment *string `locationName:"alignment" type:"string" enum:"BurninSubtitleAlignment"` - // Ignore this setting unless Style passthrough (StylePassthrough) is set to - // Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, - // or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. - // When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font - // color setting only applies to white text in your input captions. For example, - // if your font color setting is Yellow, and your input captions have red and - // white text, your output captions will have red and yellow text. When you - // choose ALL_TEXT, your font color setting applies to all of your output captions - // text. + // Ignore this setting unless Style passthrough is set to Enabled and Font color + // set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for + // additional font color controls. When you choose White text only, or leave + // blank, your font color setting only applies to white text in your input captions. + // For example, if your font color setting is Yellow, and your input captions + // have red and white text, your output captions will have red and yellow text. + // When you choose ALL_TEXT, your font color setting applies to all of your + // output captions text. ApplyFontColor *string `locationName:"applyFontColor" type:"string" enum:"BurninSubtitleApplyFontColor"` // Specify the color of the rectangle behind the captions. Leave background - // color (BackgroundColor) blank and set Style passthrough (StylePassthrough) - // to enabled to use the background color data from your input captions, if - // present. + // color blank and set Style passthrough to enabled to use the background color + // data from your input captions, if present. BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"BurninSubtitleBackgroundColor"` // Specify the opacity of the background rectangle. Enter a value from 0 to - // 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) - // is set to enabled, leave blank to pass through the background style information + // 255, where 0 is transparent and 255 is opaque. If Style passthrough is set + // to enabled, leave blank to pass through the background style information // in your input captions to your output captions. If Style passthrough is set // to disabled, leave blank to use a value of 0 and remove all backgrounds from // your output captions. @@ -5355,33 +5317,33 @@ type BurninDestinationSettings struct { // Specify the font that you want the service to use for your burn in captions // when your input captions specify a font that MediaConvert doesn't support. - // When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or - // leave blank, MediaConvert uses a supported font that most closely matches - // the font that your input captions specify. When there are multiple unsupported - // fonts in your input captions, MediaConvert matches each font with the supported - // font that matches best. When you explicitly choose a replacement font, MediaConvert - // uses that font to replace all unsupported fonts from your input. + // When you set Fallback font to best match, or leave blank, MediaConvert uses + // a supported font that most closely matches the font that your input captions + // specify. When there are multiple unsupported fonts in your input captions, + // MediaConvert matches each font with the supported font that matches best. + // When you explicitly choose a replacement font, MediaConvert uses that font + // to replace all unsupported fonts from your input. FallbackFont *string `locationName:"fallbackFont" type:"string" enum:"BurninSubtitleFallbackFont"` - // Specify the color of the burned-in captions text. Leave Font color (FontColor) - // blank and set Style passthrough (StylePassthrough) to enabled to use the - // font color data from your input captions, if present. + // Specify the color of the burned-in captions text. Leave Font color blank + // and set Style passthrough to enabled to use the font color data from your + // input captions, if present. FontColor *string `locationName:"fontColor" type:"string" enum:"BurninSubtitleFontColor"` // Specify the opacity of the burned-in captions. 255 is opaque; 0 is transparent. FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` - // Specify the Font resolution (FontResolution) in DPI (dots per inch). + // Specify the Font resolution in DPI (dots per inch). FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"` - // Set Font script (FontScript) to Automatically determined (AUTOMATIC), or - // leave blank, to automatically determine the font script in your input captions. - // Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) - // if your input font script uses Simplified or Traditional Chinese. + // Set Font script to Automatically determined, or leave blank, to automatically + // determine the font script in your input captions. Otherwise, set to Simplified + // Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses + // Simplified or Traditional Chinese. FontScript *string `locationName:"fontScript" type:"string" enum:"FontScript"` - // Specify the Font size (FontSize) in pixels. Must be a positive integer. Set - // to 0, or leave blank, for automatic font size. + // Specify the Font size in pixels. Must be a positive integer. Set to 0, or + // leave blank, for automatic font size. FontSize *int64 `locationName:"fontSize" type:"integer"` // Ignore this setting unless your Font color is set to Hex. Enter either six @@ -5391,27 +5353,26 @@ type BurninDestinationSettings struct { // value of 0xBB. HexFontColor *string `locationName:"hexFontColor" min:"6" type:"string"` - // Specify font outline color. Leave Outline color (OutlineColor) blank and - // set Style passthrough (StylePassthrough) to enabled to use the font outline - // color data from your input captions, if present. + // Specify font outline color. Leave Outline color blank and set Style passthrough + // to enabled to use the font outline color data from your input captions, if + // present. OutlineColor *string `locationName:"outlineColor" type:"string" enum:"BurninSubtitleOutlineColor"` - // Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave - // Outline size blank and set Style passthrough (StylePassthrough) to enabled - // to use the outline size data from your input captions, if present. + // Specify the Outline size of the caption text, in pixels. Leave Outline size + // blank and set Style passthrough to enabled to use the outline size data from + // your input captions, if present. OutlineSize *int64 `locationName:"outlineSize" type:"integer"` // Specify the color of the shadow cast by the captions. Leave Shadow color - // (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled - // to use the shadow color data from your input captions, if present. + // blank and set Style passthrough to enabled to use the shadow color data from + // your input captions, if present. ShadowColor *string `locationName:"shadowColor" type:"string" enum:"BurninSubtitleShadowColor"` // Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is - // transparent and 255 is opaque. If Style passthrough (StylePassthrough) is - // set to Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through - // the shadow style information in your input captions to your output captions. - // If Style passthrough is set to disabled, leave blank to use a value of 0 - // and remove all shadows from your output captions. + // transparent and 255 is opaque. If Style passthrough is set to Enabled, leave + // Shadow opacity blank to pass through the shadow style information in your + // input captions to your output captions. If Style passthrough is set to disabled, + // leave blank to use a value of 0 and remove all shadows from your output captions. ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` // Specify the horizontal offset of the shadow, relative to the captions in @@ -5420,38 +5381,35 @@ type BurninDestinationSettings struct { // Specify the vertical offset of the shadow relative to the captions in pixels. // A value of -2 would result in a shadow offset 2 pixels above the text. Leave - // Shadow y-offset (ShadowYOffset) blank and set Style passthrough (StylePassthrough) - // to enabled to use the shadow y-offset data from your input captions, if present. + // Shadow y-offset blank and set Style passthrough to enabled to use the shadow + // y-offset data from your input captions, if present. ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` - // Set Style passthrough (StylePassthrough) to ENABLED to use the available - // style, color, and position information from your input captions. MediaConvert - // uses default settings for any missing style and position information in your - // input captions. Set Style passthrough to DISABLED, or leave blank, to ignore - // the style and position information from your input captions and use default - // settings: white text with black outlining, bottom-center positioning, and - // automatic sizing. Whether you set Style passthrough to enabled or not, you - // can also choose to manually override any of the individual style and position - // settings. + // Set Style passthrough to ENABLED to use the available style, color, and position + // information from your input captions. MediaConvert uses default settings + // for any missing style and position information in your input captions. Set + // Style passthrough to DISABLED, or leave blank, to ignore the style and position + // information from your input captions and use default settings: white text + // with black outlining, bottom-center positioning, and automatic sizing. Whether + // you set Style passthrough to enabled or not, you can also choose to manually + // override any of the individual style and position settings. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"BurnInSubtitleStylePassthrough"` - // Specify whether the text spacing (TeletextSpacing) in your captions is set - // by the captions grid, or varies depending on letter width. Choose fixed grid - // (FIXED_GRID) to conform to the spacing specified in the captions file more - // accurately. Choose proportional (PROPORTIONAL) to make the text easier to - // read for closed captions. + // Specify whether the text spacing in your captions is set by the captions + // grid, or varies depending on letter width. Choose fixed grid to conform to + // the spacing specified in the captions file more accurately. Choose proportional + // to make the text easier to read for closed captions. TeletextSpacing *string `locationName:"teletextSpacing" type:"string" enum:"BurninSubtitleTeletextSpacing"` - // Specify the horizontal position (XPosition) of the captions, relative to - // the left side of the output in pixels. A value of 10 would result in the - // captions starting 10 pixels from the left of the output. If no explicit x_position - // is provided, the horizontal caption position will be determined by the alignment - // parameter. + // Specify the horizontal position of the captions, relative to the left side + // of the output in pixels. A value of 10 would result in the captions starting + // 10 pixels from the left of the output. If no explicit x_position is provided, + // the horizontal caption position will be determined by the alignment parameter. XPosition *int64 `locationName:"xPosition" type:"integer"` - // Specify the vertical position (YPosition) of the captions, relative to the - // top of the output in pixels. A value of 10 would result in the captions starting - // 10 pixels from the top of the output. If no explicit y_position is provided, + // Specify the vertical position of the captions, relative to the top of the + // output in pixels. A value of 10 would result in the captions starting 10 + // pixels from the top of the output. If no explicit y_position is provided, // the caption will be positioned towards the bottom of the output. YPosition *int64 `locationName:"yPosition" type:"integer"` } @@ -5717,12 +5675,10 @@ type CaptionDescription struct { // Smooth Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` - // Settings related to one captions tab on the MediaConvert console. In your - // job JSON, an instance of captions DestinationSettings is equivalent to one - // captions tab in the console. Usually, one captions tab corresponds to one - // output captions track. Depending on your output captions format, one tab - // might correspond to a set of output captions tracks. For more information, - // see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. + // Settings related to one captions tab on the MediaConvert console. Usually, + // one captions tab corresponds to one output captions track. Depending on your + // output captions format, one tab might correspond to a set of output captions + // tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` // Specify the language of this captions output track. For most captions output @@ -5821,12 +5777,10 @@ type CaptionDescriptionPreset struct { // Smooth Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` - // Settings related to one captions tab on the MediaConvert console. In your - // job JSON, an instance of captions DestinationSettings is equivalent to one - // captions tab in the console. Usually, one captions tab corresponds to one - // output captions track. Depending on your output captions format, one tab - // might correspond to a set of output captions tracks. For more information, - // see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. + // Settings related to one captions tab on the MediaConvert console. Usually, + // one captions tab corresponds to one output captions track. Depending on your + // output captions format, one tab might correspond to a set of output captions + // tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` // Specify the language of this captions output track. For most captions output @@ -5901,12 +5855,10 @@ func (s *CaptionDescriptionPreset) SetLanguageDescription(v string) *CaptionDesc return s } -// Settings related to one captions tab on the MediaConvert console. In your -// job JSON, an instance of captions DestinationSettings is equivalent to one -// captions tab in the console. Usually, one captions tab corresponds to one -// output captions track. Depending on your output captions format, one tab -// might correspond to a set of output captions tracks. For more information, -// see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. +// Settings related to one captions tab on the MediaConvert console. Usually, +// one captions tab corresponds to one output captions track. Depending on your +// output captions format, one tab might correspond to a set of output captions +// tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. type CaptionDestinationSettings struct { _ struct{} `type:"structure"` @@ -5914,8 +5866,6 @@ type CaptionDestinationSettings struct { // writes the captions directly on your video frames, replacing pixels of video // content with the captions. Set up burn-in captions in the same output as // your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set destinationType to BURN_IN. BurninDestinationSettings *BurninDestinationSettings `locationName:"burninDestinationSettings" type:"structure"` // Specify the format for this set of captions on this output. The default format @@ -5923,68 +5873,50 @@ type CaptionDestinationSettings struct { // constrains your choice of output captions format. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. // If you are using SCTE-20 and you want to create an output that complies with - // the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To - // create a non-compliant output where the embedded captions come first, choose - // Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20). + // the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant + // output where the embedded captions come first, choose Embedded plus SCTE-20. DestinationType *string `locationName:"destinationType" type:"string" enum:"CaptionDestinationType"` // Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set destinationType to DVB_SUB. DvbSubDestinationSettings *DvbSubDestinationSettings `locationName:"dvbSubDestinationSettings" type:"structure"` // Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or // ancillary) captions. Set up embedded captions in the same output as your // video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, - // or SCTE20_PLUS_EMBEDDED. EmbeddedDestinationSettings *EmbeddedDestinationSettings `locationName:"embeddedDestinationSettings" type:"structure"` // Settings related to IMSC captions. IMSC is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set destinationType to IMSC. ImscDestinationSettings *ImscDestinationSettings `locationName:"imscDestinationSettings" type:"structure"` // Settings related to SCC captions. SCC is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set destinationType to SCC. SccDestinationSettings *SccDestinationSettings `locationName:"sccDestinationSettings" type:"structure"` // Settings related to SRT captions. SRT is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions - // in the same output group, but different output from your video. When you - // work directly in your JSON job specification, include this object and any - // required children when you set destinationType to SRT. + // in the same output group, but different output from your video. SrtDestinationSettings *SrtDestinationSettings `locationName:"srtDestinationSettings" type:"structure"` // Settings related to teletext captions. Set up teletext captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set destinationType to TELETEXT. TeletextDestinationSettings *TeletextDestinationSettings `locationName:"teletextDestinationSettings" type:"structure"` // Settings related to TTML captions. TTML is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set destinationType to TTML. TtmlDestinationSettings *TtmlDestinationSettings `locationName:"ttmlDestinationSettings" type:"structure"` // Settings related to WebVTT captions. WebVTT is a sidecar format that holds // captions in a file that is separate from the video container. Set up sidecar // captions in the same output group, but different output from your video. // For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set destinationType to WebVTT. WebvttDestinationSettings *WebvttDestinationSettings `locationName:"webvttDestinationSettings" type:"structure"` } @@ -6181,21 +6113,19 @@ func (s *CaptionSelector) SetSourceSettings(v *CaptionSourceSettings) *CaptionSe // Ignore this setting unless your input captions format is SCC. To have the // service compensate for differing frame rates between your input captions // and input video, specify the frame rate of the captions file. Specify this -// value as a fraction. When you work directly in your JSON job specification, -// use the settings framerateNumerator and framerateDenominator. For example, -// you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for -// 23.976 fps, or 30000 / 1001 for 29.97 fps. +// value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 +// / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps. type CaptionSourceFramerate struct { _ struct{} `type:"structure"` // Specify the denominator of the fraction that represents the frame rate for - // the setting Caption source frame rate (CaptionSourceFramerate). Use this - // setting along with the setting Framerate numerator (framerateNumerator). + // the setting Caption source frame rate. Use this setting along with the setting + // Framerate numerator. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Specify the numerator of the fraction that represents the frame rate for - // the setting Caption source frame rate (CaptionSourceFramerate). Use this - // setting along with the setting Framerate denominator (framerateDenominator). + // the setting Caption source frame rate. Use this setting along with the setting + // Framerate denominator. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` } @@ -6266,8 +6196,8 @@ type CaptionSourceSettings struct { // of FileSoureSettings. FileSourceSettings *FileSourceSettings `locationName:"fileSourceSettings" type:"structure"` - // Use Source (SourceType) to identify the format of your input captions. The - // service cannot auto-detect caption format. + // Use Source to identify the format of your input captions. The service cannot + // auto-detect caption format. SourceType *string `locationName:"sourceType" type:"string" enum:"CaptionSourceType"` // Settings specific to Teletext caption sources, including Page number. @@ -6396,17 +6326,17 @@ func (s *CaptionSourceSettings) SetWebvttHlsSourceSettings(v *WebvttHlsSourceSet return s } -// Channel mapping (ChannelMapping) contains the group of fields that hold the -// remixing value for each channel, in dB. Specify remix values to indicate -// how much of the content from your input audio channel you want in your output -// audio channels. Each instance of the InputChannels or InputChannelsFineTune -// array specifies these values for one output channel. Use one instance of -// this array for each output channel. In the console, each array corresponds -// to a column in the graphical depiction of the mapping matrix. The rows of -// the graphical matrix correspond to input channels. Valid values are within -// the range from -60 (mute) through 6. A setting of 0 passes the input channel -// unchanged to the output channel (no attenuation or amplification). Use InputChannels -// or InputChannelsFineTune to specify your remix values. Don't use both. +// Channel mapping contains the group of fields that hold the remixing value +// for each channel, in dB. Specify remix values to indicate how much of the +// content from your input audio channel you want in your output audio channels. +// Each instance of the InputChannels or InputChannelsFineTune array specifies +// these values for one output channel. Use one instance of this array for each +// output channel. In the console, each array corresponds to a column in the +// graphical depiction of the mapping matrix. The rows of the graphical matrix +// correspond to input channels. Valid values are within the range from -60 +// (mute) through 6. A setting of 0 passes the input channel unchanged to the +// output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune +// to specify your remix values. Don't use both. type ChannelMapping struct { _ struct{} `type:"structure"` @@ -6612,7 +6542,7 @@ type CmafEncryptionSettings struct { ConstantInitializationVector *string `locationName:"constantInitializationVector" min:"32" type:"string"` // Specify the encryption scheme that you want the service to use when encrypting - // your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). + // your CMAF segments. Choose AES-CBC subsample or AES_CTR. EncryptionMethod *string `locationName:"encryptionMethod" type:"string" enum:"CmafEncryptionType"` // When you use DRM with CMAF outputs, choose whether the service writes the @@ -6700,9 +6630,6 @@ func (s *CmafEncryptionSettings) SetType(v string) *CmafEncryptionSettings { } // Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set Type, under OutputGroupSettings, to -// CMAF_GROUP_SETTINGS. type CmafGroupSettings struct { _ struct{} `type:"structure"` @@ -6720,9 +6647,8 @@ type CmafGroupSettings struct { BaseUrl *string `locationName:"baseUrl" type:"string"` // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no - // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching - // in your video distribution set up. For example, use the Cache-Control http - // header. + // tag. Otherwise, keep the default value Enabled and control caching in your + // video distribution set up. For example, use the Cache-Control http header. ClientCache *string `locationName:"clientCache" type:"string" enum:"CmafClientCache"` // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist @@ -6738,11 +6664,11 @@ type CmafGroupSettings struct { // in each AdaptationSet: Choose Distinct. DashManifestStyle *string `locationName:"dashManifestStyle" type:"string" enum:"DashManifestStyle"` - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. + // Use Destination to specify the S3 output location and the output filename + // base. Destination accepts format identifiers. If you do not specify the base + // filename in the URI, the service will use the filename of the input file. + // If your job has multiple inputs, the service uses the filename of the first + // input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of @@ -6754,21 +6680,19 @@ type CmafGroupSettings struct { // Specify the length, in whole seconds, of the mp4 fragments. When you don't // specify a value, MediaConvert defaults to 2. Related setting: Use Fragment - // length control (FragmentLengthControl) to specify whether the encoder enforces - // this value strictly. + // length control to specify whether the encoder enforces this value strictly. FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"` // Specify whether MediaConvert generates images for trick play. Keep the default - // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) - // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) - // to generate tiled thumbnails and full-resolution images of single frames. - // When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates - // a child manifest for each set of images that you generate and adds corresponding - // entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), - // MediaConvert adds an entry in the .mpd manifest for each set of images that - // you generate. A common application for these images is Roku trick mode. The - // thumbnails and full-frame images that MediaConvert creates with this feature - // are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md + // value, None, to not generate any images. Choose Thumbnail to generate tiled + // thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails + // and full-resolution images of single frames. When you enable Write HLS manifest, + // MediaConvert creates a child manifest for each set of images that you generate + // and adds corresponding entries to the parent manifest. When you enable Write + // DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set + // of images that you generate. A common application for these images is Roku + // trick mode. The thumbnails and full-frame images that MediaConvert creates + // with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"CmafImageBasedTrickPlay"` // Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED @@ -6807,22 +6731,21 @@ type CmafGroupSettings struct { MpdManifestBandwidthType *string `locationName:"mpdManifestBandwidthType" type:"string" enum:"CmafMpdManifestBandwidthType"` // Specify whether your DASH profile is on-demand or main. When you choose Main - // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 - // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), - // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. - // When you choose On-demand, you must also set the output group setting Segment - // control (SegmentControl) to Single file (SINGLE_FILE). + // profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your + // .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 + // in your .mpd. When you choose On-demand, you must also set the output group + // setting Segment control to Single file. MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"CmafMpdProfile"` // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time - // stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) - // when you want MediaConvert to use the initial PTS as the first time stamp - // in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore - // the initial PTS in the video stream and instead write the initial time stamp - // as zero in the manifest. For outputs that don't have B-frames, the time stamps - // in your DASH manifests start at zero regardless of your choice here. + // stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert + // to use the initial PTS as the first time stamp in the manifest. Choose Zero-based + // to have MediaConvert ignore the initial PTS in the video stream and instead + // write the initial time stamp as zero in the manifest. For outputs that don't + // have B-frames, the time stamps in your DASH manifests start at zero regardless + // of your choice here. PtsOffsetHandlingForBFrames *string `locationName:"ptsOffsetHandlingForBFrames" type:"string" enum:"CmafPtsOffsetHandlingForBFrames"` // When set to SINGLE_FILE, a single output file is generated, which is internally @@ -6832,17 +6755,16 @@ type CmafGroupSettings struct { // Specify the length, in whole seconds, of each segment. When you don't specify // a value, MediaConvert defaults to 10. Related settings: Use Segment length - // control (SegmentLengthControl) to specify whether the encoder enforces this - // value strictly. Use Segment control (CmafSegmentControl) to specify whether - // MediaConvert creates separate segment files or one content file that has - // metadata to mark the segment boundaries. + // control to specify whether the encoder enforces this value strictly. Use + // Segment control to specify whether MediaConvert creates separate segment + // files or one content file that has metadata to mark the segment boundaries. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // Specify how you want MediaConvert to determine the segment length. Choose - // Exact (EXACT) to have the encoder use the exact length that you specify with - // the setting Segment length (SegmentLength). This might result in extra I-frames. - // Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment - // lengths to match the next GOP boundary. + // Exact to have the encoder use the exact length that you specify with the + // setting Segment length. This might result in extra I-frames. Choose Multiple + // of GOP to have the encoder round up the segment lengths to match the next + // GOP boundary. SegmentLengthControl *string `locationName:"segmentLengthControl" type:"string" enum:"CmafSegmentLengthControl"` // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag @@ -6874,12 +6796,12 @@ type CmafGroupSettings struct { // When set to ENABLED, an Apple HLS manifest will be generated for this output. WriteHlsManifest *string `locationName:"writeHlsManifest" type:"string" enum:"CmafWriteHLSManifest"` - // When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), - // your DASH manifest shows precise segment durations. The segment duration - // information appears inside the SegmentTimeline element, inside SegmentTemplate - // at the Representation level. When this feature isn't enabled, the segment - // durations in your DASH manifest are approximate. The segment duration information - // appears in the duration attribute of the SegmentTemplate element. + // When you enable Precise segment duration in DASH manifests, your DASH manifest + // shows precise segment durations. The segment duration information appears + // inside the SegmentTimeline element, inside SegmentTemplate at the Representation + // level. When this feature isn't enabled, the segment durations in your DASH + // manifest are approximate. The segment duration information appears in the + // duration attribute of the SegmentTemplate element. WriteSegmentTimelineInRepresentation *string `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"CmafWriteSegmentTimelineInRepresentation"` } @@ -7218,17 +7140,17 @@ type CmfcSettings struct { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to - // ensure that the total duration of each audio stream is at least as long as - // the total duration of the video stream. After padding or trimming, the audio - // stream duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of - // the output. For unsegmented outputs, MediaConvert adds padding only to the - // end of the file. When you keep the default value, any minor discrepancies - // between audio and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. + // In all other cases, keep the default value, Default codec duration. When + // you choose Match video duration, MediaConvert pads the output audio streams + // with silence or trims them to ensure that the total duration of each audio + // stream is at least as long as the total duration of the video stream. After + // padding or trimming, the audio stream duration is no more than one frame + // longer than the video stream. MediaConvert applies audio padding or trimming + // only to the end of the last segment of the output. For unsegmented outputs, + // MediaConvert adds padding only to the end of the file. When you keep the + // default value, any minor discrepancies between audio and video duration will + // depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"CmfcAudioDuration"` // Specify the audio rendition group for this audio rendition. Specify up to @@ -7238,7 +7160,7 @@ type CmfcSettings struct { // for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio_aac_1". // Related setting: To associate the rendition group that this audio track belongs // to with a video rendition, include the same value that you provide here for - // that video output's setting Audio rendition sets (audioRenditionSets). + // that video output's setting Audio rendition sets. AudioGroupId *string `locationName:"audioGroupId" type:"string"` // List the audio rendition groups that you want included with this video rendition. @@ -7246,15 +7168,15 @@ type CmfcSettings struct { // rendition groups that have the audio group IDs "audio_aac_1" and "audio_dolby". // Then you would specify this value: "audio_aac_1,audio_dolby". Related setting: // The rendition groups that you include in your comma-separated list should - // all match values that you specify in the setting Audio group ID (AudioGroupId) - // for audio renditions in the same output group as this video rendition. Default - // behavior: If you don't specify anything here and for Audio group ID, MediaConvert - // puts each audio variant in its own audio rendition group and associates it - // with every video variant. Each value in your list appears in your HLS parent - // manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. - // To continue the previous example, say that the file name for the child manifest - // for your video rendition is "amazing_video_1.m3u8". Then, in your parent - // manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO="audio_aac_1"... + // all match values that you specify in the setting Audio group ID for audio + // renditions in the same output group as this video rendition. Default behavior: + // If you don't specify anything here and for Audio group ID, MediaConvert puts + // each audio variant in its own audio rendition group and associates it with + // every video variant. Each value in your list appears in your HLS parent manifest + // in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. To continue + // the previous example, say that the file name for the child manifest for your + // video rendition is "amazing_video_1.m3u8". Then, in your parent manifest, + // each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO="audio_aac_1"... // amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO="audio_dolby"... amazing_video_1.m3u8 AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"` @@ -7264,32 +7186,30 @@ type CmfcSettings struct { // writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry // for the audio variant. For more information about these attributes, see the // Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. - // Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) - // to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant - // in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) - // to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select - // to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this - // setting, MediaConvert defaults to Alternate audio, auto select, default. - // When there is more than one variant in your output group, you must explicitly - // choose a value for this setting. + // Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. + // Choose this value for only one variant in your output group. Choose Alternate + // audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose + // Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When + // you don't specify a value for this setting, MediaConvert defaults to Alternate + // audio, auto select, default. When there is more than one variant in your + // output group, you must explicitly choose a value for this setting. AudioTrackType *string `locationName:"audioTrackType" type:"string" enum:"CmfcAudioTrackType"` // Specify whether to flag this audio track as descriptive video service (DVS) - // in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes + // in your HLS parent manifest. When you choose Flag, MediaConvert includes // the parameter CHARACTERISTICS="public.accessibility.describes-video" in the // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't - // flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can - // help with accessibility on Apple devices. For more information, see the Apple - // documentation. + // flag, MediaConvert leaves this parameter out. The DVS flag can help with + // accessibility on Apple devices. For more information, see the Apple documentation. DescriptiveVideoServiceFlag *string `locationName:"descriptiveVideoServiceFlag" type:"string" enum:"CmfcDescriptiveVideoServiceFlag"` - // Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest - // that lists only the I-frames for this rendition, in addition to your regular - // manifest for this rendition. You might use this manifest as part of a workflow - // that creates preview functions for your video. MediaConvert adds both the - // I-frame only child manifest and the regular child manifest to the parent - // manifest. When you don't need the I-frame only child manifest, keep the default - // value Exclude (EXCLUDE). + // Choose Include to have MediaConvert generate an HLS child manifest that lists + // only the I-frames for this rendition, in addition to your regular manifest + // for this rendition. You might use this manifest as part of a workflow that + // creates preview functions for your video. MediaConvert adds both the I-frame + // only child manifest and the regular child manifest to the parent manifest. + // When you don't need the I-frame only child manifest, keep the default value + // Exclude. IFrameOnlyManifest *string `locationName:"iFrameOnlyManifest" type:"string" enum:"CmfcIFrameOnlyManifest"` // To include key-length-value metadata in this output: Set KLV metadata insertion @@ -7307,45 +7227,44 @@ type CmfcSettings struct { // To leave these elements out of your output MPD manifest, set Manifest metadata // signaling to Disabled. To enable Manifest metadata signaling, you must also // set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata - // (TimedMetadata) to Passthrough. + // to Passthrough. ManifestMetadataSignaling *string `locationName:"manifestMetadataSignaling" type:"string" enum:"CmfcManifestMetadataSignaling"` // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting - // SCC XML (sccXml). + // SCC XML. Scte35Esam *string `locationName:"scte35Esam" type:"string" enum:"CmfcScte35Esam"` // Ignore this setting unless you have SCTE-35 markers in your input video file. - // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear - // in your input to also appear in this output. Choose None (NONE) if you don't - // want those SCTE-35 markers in this output. + // Choose Passthrough if you want SCTE-35 markers that appear in your input + // to also appear in this output. Choose None if you don't want those SCTE-35 + // markers in this output. Scte35Source *string `locationName:"scte35Source" type:"string" enum:"CmfcScte35Source"` - // To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) - // to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata - // inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 - // metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: - // Set ID3 metadata to None (NONE) or leave blank. + // To include ID3 metadata in this output: Set ID3 metadata to Passthrough. + // Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes + // each instance of ID3 metadata in a separate Event Message (eMSG) box. To + // exclude this ID3 metadata: Set ID3 metadata to None or leave blank. TimedMetadata *string `locationName:"timedMetadata" type:"string" enum:"CmfcTimedMetadata"` // Specify the event message box (eMSG) version for ID3 timed metadata in your // output.For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 // Syntax.Leave blank to use the default value Version 0.When you specify Version - // 1, you must also set ID3 metadata (timedMetadata) to Passthrough. + // 1, you must also set ID3 metadata to Passthrough. TimedMetadataBoxVersion *string `locationName:"timedMetadataBoxVersion" type:"string" enum:"CmfcTimedMetadataBoxVersion"` - // Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 - // timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 - // section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 + // Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata + // in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 + // Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 // When you specify a value for ID3 metadata scheme ID URI, you must also set - // ID3 metadata (timedMetadata) to Passthrough. + // ID3 metadata to Passthrough. TimedMetadataSchemeIdUri *string `locationName:"timedMetadataSchemeIdUri" type:"string"` // Specify the event message box (eMSG) value for ID3 timed metadata in your // output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 // Semantics. When you specify a value for ID3 Metadata Value, you must also - // set ID3 metadata (timedMetadata) to Passthrough. + // set ID3 metadata to Passthrough. TimedMetadataValue *string `locationName:"timedMetadataValue" type:"string"` } @@ -7483,13 +7402,11 @@ type ColorCorrector struct { // signaled in the output. These values don't affect the pixel values that are // encoded in the video stream. They are intended to help the downstream video // player display content in a way that reflects the intentions of the the content - // creator. When you set Color space conversion (ColorSpaceConversion) to HDR - // 10 (FORCE_HDR10), these settings are required. You must set values for Max - // frame average light level (maxFrameAverageLightLevel) and Max content light - // level (maxContentLightLevel); these settings don't have a default value. - // The default values for the other HDR 10 metadata settings are defined by - // the P3D65 color space. For more information about MediaConvert HDR jobs, - // see https://docs.aws.amazon.com/console/mediaconvert/hdr. + // creator. When you set Color space conversion to HDR 10, these settings are + // required. You must set values for Max frame average light level and Max content + // light level; these settings don't have a default value. The default values + // for the other HDR 10 metadata settings are defined by the P3D65 color space. + // For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` // Specify how MediaConvert maps brightness and colors from your HDR input to @@ -7727,15 +7644,15 @@ type ContainerSettings struct { F4vSettings *F4vSettings `locationName:"f4vSettings" type:"structure"` // MPEG-2 TS container settings. These apply to outputs in a File output group - // when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). - // In these assets, data is organized by the program map table (PMT). Each transport - // stream program contains subsets of data, including audio, video, and metadata. - // Each of these subsets of data has a numerical label called a packet identifier - // (PID). Each transport stream program corresponds to one MediaConvert output. - // The PMT lists the types of data in a program along with their PID. Downstream - // systems and players use the program map table to look up the PID for each - // type of data it accesses and then uses the PIDs to locate specific data within - // the asset. + // when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, + // data is organized by the program map table (PMT). Each transport stream program + // contains subsets of data, including audio, video, and metadata. Each of these + // subsets of data has a numerical label called a packet identifier (PID). Each + // transport stream program corresponds to one MediaConvert output. The PMT + // lists the types of data in a program along with their PID. Downstream systems + // and players use the program map table to look up the PID for each type of + // data it accesses and then uses the PIDs to locate specific data within the + // asset. M2tsSettings *M2tsSettings `locationName:"m2tsSettings" type:"structure"` // These settings relate to the MPEG-2 transport stream (MPEG2-TS) container @@ -8649,10 +8566,10 @@ type DashIsoEncryptionSettings struct { // This setting can improve the compatibility of your output with video players // on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. - // Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback - // on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). - // If you choose Unencrypted SEI, for that output, the service will exclude - // the access unit delimiter and will leave the SEI NAL units unencrypted. + // Choose Unencrypted SEI only to correct problems with playback on older devices. + // Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, + // for that output, the service will exclude the access unit delimiter and will + // leave the SEI NAL units unencrypted. PlaybackDeviceCompatibility *string `locationName:"playbackDeviceCompatibility" type:"string" enum:"DashIsoPlaybackDeviceCompatibility"` // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings @@ -8692,9 +8609,6 @@ func (s *DashIsoEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *Da } // Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set Type, under OutputGroupSettings, to -// DASH_ISO_GROUP_SETTINGS. type DashIsoGroupSettings struct { _ struct{} `type:"structure"` @@ -8709,10 +8623,9 @@ type DashIsoGroupSettings struct { // the Dolby channel configuration tag, rather than the MPEG one. For example, // you might need to use this to make dynamic ad insertion work. Specify which // audio channel configuration scheme ID URI MediaConvert writes in your DASH - // manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), - // to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. - // Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have - // MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011. + // manifest. Keep the default value, MPEG channel configuration, to have MediaConvert + // write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel + // configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011. AudioChannelConfigSchemeIdUri *string `locationName:"audioChannelConfigSchemeIdUri" type:"string" enum:"DashIsoGroupAudioChannelConfigSchemeIdUri"` // A partial URI prefix that will be put in the manifest (.mpd) file at the @@ -8729,11 +8642,11 @@ type DashIsoGroupSettings struct { // in each AdaptationSet: Choose Distinct. DashManifestStyle *string `locationName:"dashManifestStyle" type:"string" enum:"DashManifestStyle"` - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. + // Use Destination to specify the S3 output location and the output filename + // base. Destination accepts format identifiers. If you do not specify the base + // filename in the URI, the service will use the filename of the input file. + // If your job has multiple inputs, the service uses the filename of the first + // input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of @@ -8755,13 +8668,13 @@ type DashIsoGroupSettings struct { HbbtvCompliance *string `locationName:"hbbtvCompliance" type:"string" enum:"DashIsoHbbtvCompliance"` // Specify whether MediaConvert generates images for trick play. Keep the default - // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) - // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) - // to generate tiled thumbnails and full-resolution images of single frames. - // MediaConvert adds an entry in the .mpd manifest for each set of images that - // you generate. A common application for these images is Roku trick mode. The - // thumbnails and full-frame images that MediaConvert creates with this feature - // are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md + // value, None, to not generate any images. Choose Thumbnail to generate tiled + // thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails + // and full-resolution images of single frames. MediaConvert adds an entry in + // the .mpd manifest for each set of images that you generate. A common application + // for these images is Roku trick mode. The thumbnails and full-frame images + // that MediaConvert creates with this feature are compatible with this Roku + // specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"DashIsoImageBasedTrickPlay"` // Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED @@ -8793,22 +8706,21 @@ type DashIsoGroupSettings struct { MpdManifestBandwidthType *string `locationName:"mpdManifestBandwidthType" type:"string" enum:"DashIsoMpdManifestBandwidthType"` // Specify whether your DASH profile is on-demand or main. When you choose Main - // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 - // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), - // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. - // When you choose On-demand, you must also set the output group setting Segment - // control (SegmentControl) to Single file (SINGLE_FILE). + // profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your + // .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 + // in your .mpd. When you choose On-demand, you must also set the output group + // setting Segment control to Single file. MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"DashIsoMpdProfile"` // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time - // stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) - // when you want MediaConvert to use the initial PTS as the first time stamp - // in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore - // the initial PTS in the video stream and instead write the initial time stamp - // as zero in the manifest. For outputs that don't have B-frames, the time stamps - // in your DASH manifests start at zero regardless of your choice here. + // stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert + // to use the initial PTS as the first time stamp in the manifest. Choose Zero-based + // to have MediaConvert ignore the initial PTS in the video stream and instead + // write the initial time stamp as zero in the manifest. For outputs that don't + // have B-frames, the time stamps in your DASH manifests start at zero regardless + // of your choice here. PtsOffsetHandlingForBFrames *string `locationName:"ptsOffsetHandlingForBFrames" type:"string" enum:"DashIsoPtsOffsetHandlingForBFrames"` // When set to SINGLE_FILE, a single output file is generated, which is internally @@ -8818,17 +8730,16 @@ type DashIsoGroupSettings struct { // Specify the length, in whole seconds, of each segment. When you don't specify // a value, MediaConvert defaults to 30. Related settings: Use Segment length - // control (SegmentLengthControl) to specify whether the encoder enforces this - // value strictly. Use Segment control (DashIsoSegmentControl) to specify whether - // MediaConvert creates separate segment files or one content file that has - // metadata to mark the segment boundaries. + // control to specify whether the encoder enforces this value strictly. Use + // Segment control to specify whether MediaConvert creates separate segment + // files or one content file that has metadata to mark the segment boundaries. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // Specify how you want MediaConvert to determine the segment length. Choose - // Exact (EXACT) to have the encoder use the exact length that you specify with - // the setting Segment length (SegmentLength). This might result in extra I-frames. - // Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment - // lengths to match the next GOP boundary. + // Exact to have the encoder use the exact length that you specify with the + // setting Segment length. This might result in extra I-frames. Choose Multiple + // of GOP to have the encoder round up the segment lengths to match the next + // GOP boundary. SegmentLengthControl *string `locationName:"segmentLengthControl" type:"string" enum:"DashIsoSegmentLengthControl"` // Specify the video sample composition time offset mode in the output fMP4 @@ -9159,10 +9070,10 @@ type Deinterlacer struct { // into progressive will probably result in lower quality video. Control *string `locationName:"control" type:"string" enum:"DeinterlacerControl"` - // Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. - // Default is Deinterlace. - Deinterlace converts interlaced to progressive. - // - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. - // - Adaptive auto-detects and converts to progressive. + // Use Deinterlacer to choose how the service will do deinterlacing. Default + // is Deinterlace.- Deinterlace converts interlaced to progressive.- Inverse + // telecine converts Hard Telecine 29.97i to progressive 23.976p.- Adaptive + // auto-detects and converts to progressive. Mode *string `locationName:"mode" type:"string" enum:"DeinterlacerMode"` } @@ -9795,9 +9706,7 @@ func (s *DolbyVisionLevel6Metadata) SetMaxFall(v int64) *DolbyVisionLevel6Metada } // Use these settings to insert a DVB Network Information Table (NIT) in the -// transport stream of this output. When you work directly in your JSON job -// specification, include this object only when your job has a transport stream -// output and the container settings contain the object M2tsSettings. +// transport stream of this output. type DvbNitSettings struct { _ struct{} `type:"structure"` @@ -9866,9 +9775,7 @@ func (s *DvbNitSettings) SetNitInterval(v int64) *DvbNitSettings { } // Use these settings to insert a DVB Service Description Table (SDT) in the -// transport stream of this output. When you work directly in your JSON job -// specification, include this object only when your job has a transport stream -// output and the container settings contain the object M2tsSettings. +// transport stream of this output. type DvbSdtSettings struct { _ struct{} `type:"structure"` @@ -9956,8 +9863,6 @@ func (s *DvbSdtSettings) SetServiceProviderName(v string) *DvbSdtSettings { // Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set destinationType to DVB_SUB. type DvbSubDestinationSettings struct { _ struct{} `type:"structure"` @@ -9970,26 +9875,24 @@ type DvbSubDestinationSettings struct { // your DVB-Sub settings must be identical. Alignment *string `locationName:"alignment" type:"string" enum:"DvbSubtitleAlignment"` - // Ignore this setting unless Style Passthrough (StylePassthrough) is set to - // Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, - // or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. - // When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font - // color setting only applies to white text in your input captions. For example, - // if your font color setting is Yellow, and your input captions have red and - // white text, your output captions will have red and yellow text. When you - // choose ALL_TEXT, your font color setting applies to all of your output captions - // text. + // Ignore this setting unless Style Passthrough is set to Enabled and Font color + // set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for + // additional font color controls. When you choose White text only, or leave + // blank, your font color setting only applies to white text in your input captions. + // For example, if your font color setting is Yellow, and your input captions + // have red and white text, your output captions will have red and yellow text. + // When you choose ALL_TEXT, your font color setting applies to all of your + // output captions text. ApplyFontColor *string `locationName:"applyFontColor" type:"string" enum:"DvbSubtitleApplyFontColor"` // Specify the color of the rectangle behind the captions. Leave background - // color (BackgroundColor) blank and set Style passthrough (StylePassthrough) - // to enabled to use the background color data from your input captions, if - // present. + // color blank and set Style passthrough to enabled to use the background color + // data from your input captions, if present. BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"DvbSubtitleBackgroundColor"` // Specify the opacity of the background rectangle. Enter a value from 0 to - // 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) - // is set to enabled, leave blank to pass through the background style information + // 255, where 0 is transparent and 255 is opaque. If Style passthrough is set + // to enabled, leave blank to pass through the background style information // in your input captions to your output captions. If Style passthrough is set // to disabled, leave blank to use a value of 0 and remove all backgrounds from // your output captions. Within your job settings, all of your DVB-Sub settings @@ -10008,68 +9911,68 @@ type DvbSubDestinationSettings struct { // must match. DdsHandling *string `locationName:"ddsHandling" type:"string" enum:"DvbddsHandling"` - // Use this setting, along with DDS y-coordinate (ddsYCoordinate), to specify - // the upper left corner of the display definition segment (DDS) display window. - // With this setting, specify the distance, in pixels, between the left side - // of the frame and the left side of the DDS display window. Keep the default - // value, 0, to have MediaConvert automatically choose this offset. Related - // setting: When you use this setting, you must set DDS handling (ddsHandling) - // to a value other than None (NONE). MediaConvert uses these values to determine - // whether to write page position data to the DDS or to the page composition - // segment (PCS). All burn-in and DVB-Sub font settings must match. + // Use this setting, along with DDS y-coordinate, to specify the upper left + // corner of the display definition segment (DDS) display window. With this + // setting, specify the distance, in pixels, between the left side of the frame + // and the left side of the DDS display window. Keep the default value, 0, to + // have MediaConvert automatically choose this offset. Related setting: When + // you use this setting, you must set DDS handling to a value other than None. + // MediaConvert uses these values to determine whether to write page position + // data to the DDS or to the page composition segment. All burn-in and DVB-Sub + // font settings must match. DdsXCoordinate *int64 `locationName:"ddsXCoordinate" type:"integer"` - // Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify - // the upper left corner of the display definition segment (DDS) display window. - // With this setting, specify the distance, in pixels, between the top of the - // frame and the top of the DDS display window. Keep the default value, 0, to - // have MediaConvert automatically choose this offset. Related setting: When - // you use this setting, you must set DDS handling (ddsHandling) to a value - // other than None (NONE). MediaConvert uses these values to determine whether - // to write page position data to the DDS or to the page composition segment - // (PCS). All burn-in and DVB-Sub font settings must match. + // Use this setting, along with DDS x-coordinate, to specify the upper left + // corner of the display definition segment (DDS) display window. With this + // setting, specify the distance, in pixels, between the top of the frame and + // the top of the DDS display window. Keep the default value, 0, to have MediaConvert + // automatically choose this offset. Related setting: When you use this setting, + // you must set DDS handling to a value other than None. MediaConvert uses these + // values to determine whether to write page position data to the DDS or to + // the page composition segment (PCS). All burn-in and DVB-Sub font settings + // must match. DdsYCoordinate *int64 `locationName:"ddsYCoordinate" type:"integer"` // Specify the font that you want the service to use for your burn in captions // when your input captions specify a font that MediaConvert doesn't support. - // When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or - // leave blank, MediaConvert uses a supported font that most closely matches - // the font that your input captions specify. When there are multiple unsupported - // fonts in your input captions, MediaConvert matches each font with the supported - // font that matches best. When you explicitly choose a replacement font, MediaConvert - // uses that font to replace all unsupported fonts from your input. + // When you set Fallback font to best match, or leave blank, MediaConvert uses + // a supported font that most closely matches the font that your input captions + // specify. When there are multiple unsupported fonts in your input captions, + // MediaConvert matches each font with the supported font that matches best. + // When you explicitly choose a replacement font, MediaConvert uses that font + // to replace all unsupported fonts from your input. FallbackFont *string `locationName:"fallbackFont" type:"string" enum:"DvbSubSubtitleFallbackFont"` - // Specify the color of the captions text. Leave Font color (FontColor) blank - // and set Style passthrough (StylePassthrough) to enabled to use the font color - // data from your input captions, if present. Within your job settings, all - // of your DVB-Sub settings must be identical. + // Specify the color of the captions text. Leave Font color blank and set Style + // passthrough to enabled to use the font color data from your input captions, + // if present. Within your job settings, all of your DVB-Sub settings must be + // identical. FontColor *string `locationName:"fontColor" type:"string" enum:"DvbSubtitleFontColor"` // Specify the opacity of the burned-in captions. 255 is opaque; 0 is transparent.Within // your job settings, all of your DVB-Sub settings must be identical. FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` - // Specify the Font resolution (FontResolution) in DPI (dots per inch).Within - // your job settings, all of your DVB-Sub settings must be identical. + // Specify the Font resolution in DPI (dots per inch).Within your job settings, + // all of your DVB-Sub settings must be identical. FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"` - // Set Font script (FontScript) to Automatically determined (AUTOMATIC), or - // leave blank, to automatically determine the font script in your input captions. - // Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) - // if your input font script uses Simplified or Traditional Chinese. Within - // your job settings, all of your DVB-Sub settings must be identical. + // Set Font script to Automatically determined, or leave blank, to automatically + // determine the font script in your input captions. Otherwise, set to Simplified + // Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses + // Simplified or Traditional Chinese. Within your job settings, all of your + // DVB-Sub settings must be identical. FontScript *string `locationName:"fontScript" type:"string" enum:"FontScript"` - // Specify the Font size (FontSize) in pixels. Must be a positive integer. Set - // to 0, or leave blank, for automatic font size. Within your job settings, - // all of your DVB-Sub settings must be identical. + // Specify the Font size in pixels. Must be a positive integer. Set to 0, or + // leave blank, for automatic font size. Within your job settings, all of your + // DVB-Sub settings must be identical. FontSize *int64 `locationName:"fontSize" type:"integer"` // Specify the height, in pixels, of this set of DVB-Sub captions. The default // value is 576 pixels. Related setting: When you use this setting, you must - // set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in - // and DVB-Sub font settings must match. + // set DDS handling to a value other than None. All burn-in and DVB-Sub font + // settings must match. Height *int64 `locationName:"height" min:"1" type:"integer"` // Ignore this setting unless your Font color is set to Hex. Enter either six @@ -10079,31 +9982,29 @@ type DvbSubDestinationSettings struct { // value of 0xBB. HexFontColor *string `locationName:"hexFontColor" min:"6" type:"string"` - // Specify font outline color. Leave Outline color (OutlineColor) blank and - // set Style passthrough (StylePassthrough) to enabled to use the font outline - // color data from your input captions, if present. Within your job settings, - // all of your DVB-Sub settings must be identical. + // Specify font outline color. Leave Outline color blank and set Style passthrough + // to enabled to use the font outline color data from your input captions, if + // present. Within your job settings, all of your DVB-Sub settings must be identical. OutlineColor *string `locationName:"outlineColor" type:"string" enum:"DvbSubtitleOutlineColor"` - // Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave - // Outline size blank and set Style passthrough (StylePassthrough) to enabled - // to use the outline size data from your input captions, if present. Within - // your job settings, all of your DVB-Sub settings must be identical. + // Specify the Outline size of the caption text, in pixels. Leave Outline size + // blank and set Style passthrough to enabled to use the outline size data from + // your input captions, if present. Within your job settings, all of your DVB-Sub + // settings must be identical. OutlineSize *int64 `locationName:"outlineSize" type:"integer"` // Specify the color of the shadow cast by the captions. Leave Shadow color - // (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled - // to use the shadow color data from your input captions, if present. Within - // your job settings, all of your DVB-Sub settings must be identical. + // blank and set Style passthrough to enabled to use the shadow color data from + // your input captions, if present. Within your job settings, all of your DVB-Sub + // settings must be identical. ShadowColor *string `locationName:"shadowColor" type:"string" enum:"DvbSubtitleShadowColor"` // Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is - // transparent and 255 is opaque. If Style passthrough (StylePassthrough) is - // set to Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through - // the shadow style information in your input captions to your output captions. - // If Style passthrough is set to disabled, leave blank to use a value of 0 - // and remove all shadows from your output captions. Within your job settings, - // all of your DVB-Sub settings must be identical. + // transparent and 255 is opaque. If Style passthrough is set to Enabled, leave + // Shadow opacity blank to pass through the shadow style information in your + // input captions to your output captions. If Style passthrough is set to disabled, + // leave blank to use a value of 0 and remove all shadows from your output captions. + // Within your job settings, all of your DVB-Sub settings must be identical. ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` // Specify the horizontal offset of the shadow, relative to the captions in @@ -10113,20 +10014,19 @@ type DvbSubDestinationSettings struct { // Specify the vertical offset of the shadow relative to the captions in pixels. // A value of -2 would result in a shadow offset 2 pixels above the text. Leave - // Shadow y-offset (ShadowYOffset) blank and set Style passthrough (StylePassthrough) - // to enabled to use the shadow y-offset data from your input captions, if present. - // Within your job settings, all of your DVB-Sub settings must be identical. + // Shadow y-offset blank and set Style passthrough to enabled to use the shadow + // y-offset data from your input captions, if present. Within your job settings, + // all of your DVB-Sub settings must be identical. ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` - // Set Style passthrough (StylePassthrough) to ENABLED to use the available - // style, color, and position information from your input captions. MediaConvert - // uses default settings for any missing style and position information in your - // input captions. Set Style passthrough to DISABLED, or leave blank, to ignore - // the style and position information from your input captions and use default - // settings: white text with black outlining, bottom-center positioning, and - // automatic sizing. Whether you set Style passthrough to enabled or not, you - // can also choose to manually override any of the individual style and position - // settings. + // Set Style passthrough to ENABLED to use the available style, color, and position + // information from your input captions. MediaConvert uses default settings + // for any missing style and position information in your input captions. Set + // Style passthrough to DISABLED, or leave blank, to ignore the style and position + // information from your input captions and use default settings: white text + // with black outlining, bottom-center positioning, and automatic sizing. Whether + // you set Style passthrough to enabled or not, you can also choose to manually + // override any of the individual style and position settings. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"DvbSubtitleStylePassthrough"` // Specify whether your DVB subtitles are standard or for hearing impaired. @@ -10134,31 +10034,29 @@ type DvbSubDestinationSettings struct { // dialogue. Choose standard if your subtitles include only dialogue. SubtitlingType *string `locationName:"subtitlingType" type:"string" enum:"DvbSubtitlingType"` - // Specify whether the Text spacing (TeletextSpacing) in your captions is set - // by the captions grid, or varies depending on letter width. Choose fixed grid - // (FIXED_GRID) to conform to the spacing specified in the captions file more - // accurately. Choose proportional (PROPORTIONAL) to make the text easier to - // read for closed captions. Within your job settings, all of your DVB-Sub settings - // must be identical. + // Specify whether the Text spacing in your captions is set by the captions + // grid, or varies depending on letter width. Choose fixed grid to conform to + // the spacing specified in the captions file more accurately. Choose proportional + // to make the text easier to read for closed captions. Within your job settings, + // all of your DVB-Sub settings must be identical. TeletextSpacing *string `locationName:"teletextSpacing" type:"string" enum:"DvbSubtitleTeletextSpacing"` // Specify the width, in pixels, of this set of DVB-Sub captions. The default // value is 720 pixels. Related setting: When you use this setting, you must - // set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in - // and DVB-Sub font settings must match. + // set DDS handling to a value other than None. All burn-in and DVB-Sub font + // settings must match. Width *int64 `locationName:"width" min:"1" type:"integer"` - // Specify the horizontal position (XPosition) of the captions, relative to - // the left side of the outputin pixels. A value of 10 would result in the captions - // starting 10 pixels from the left ofthe output. If no explicit x_position - // is provided, the horizontal caption position will bedetermined by the alignment - // parameter. Within your job settings, all of your DVB-Sub settings must be - // identical. + // Specify the horizontal position of the captions, relative to the left side + // of the output in pixels. A value of 10 would result in the captions starting + // 10 pixels from the left of the output. If no explicit x_position is provided, + // the horizontal caption position will be determined by the alignment parameter. + // Within your job settings, all of your DVB-Sub settings must be identical. XPosition *int64 `locationName:"xPosition" type:"integer"` - // Specify the vertical position (YPosition) of the captions, relative to the - // top of the output in pixels. A value of 10 would result in the captions starting - // 10 pixels from the top of the output. If no explicit y_position is provided, + // Specify the vertical position of the captions, relative to the top of the + // output in pixels. A value of 10 would result in the captions starting 10 + // pixels from the top of the output. If no explicit y_position is provided, // the caption will be positioned towards the bottom of the output. Within your // job settings, all of your DVB-Sub settings must be identical. YPosition *int64 `locationName:"yPosition" type:"integer"` @@ -10420,9 +10318,7 @@ func (s *DvbSubSourceSettings) SetPid(v int64) *DvbSubSourceSettings { } // Use these settings to insert a DVB Time and Date Table (TDT) in the transport -// stream of this output. When you work directly in your JSON job specification, -// include this object only when your job has a transport stream output and -// the container settings contain the object M2tsSettings. +// stream of this output. type DvbTdtSettings struct { _ struct{} `type:"structure"` @@ -10468,8 +10364,7 @@ func (s *DvbTdtSettings) SetTdtInterval(v int64) *DvbTdtSettings { return s } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value EAC3_ATMOS. +// Required when you set Codec to the value EAC3_ATMOS. type Eac3AtmosSettings struct { _ struct{} `type:"structure"` @@ -10492,88 +10387,77 @@ type Eac3AtmosSettings struct { DialogueIntelligence *string `locationName:"dialogueIntelligence" type:"string" enum:"Eac3AtmosDialogueIntelligence"` // Specify whether MediaConvert should use any downmix metadata from your input - // file. Keep the default value, Custom (SPECIFIED) to provide downmix values - // in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use - // the metadata from your input. Related settings--Use these settings to specify - // your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), - // Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right - // total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), - // and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for - // Downmix control (DownmixControl) and you don't specify values for the related - // settings, MediaConvert uses default values for those settings. + // file. Keep the default value, Custom to provide downmix values in your job + // settings. Choose Follow source to use the metadata from your input. Related + // settings--Use these settings to specify your downmix values: Left only/Right + // only surround, Left total/Right total surround, Left total/Right total center, + // Left only/Right only center, and Stereo downmix. When you keep Custom for + // Downmix control and you don't specify values for the related settings, MediaConvert + // uses default values for those settings. DownmixControl *string `locationName:"downmixControl" type:"string" enum:"Eac3AtmosDownmixControl"` // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the line operating mode. - // Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: - // To have MediaConvert use the value you specify here, keep the default value, - // Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). - // Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). - // For information about the Dolby DRC operating modes and profiles, see the - // Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. + // Default value: Film light Related setting: To have MediaConvert use the value + // you specify here, keep the default value, Custom for the setting Dynamic + // range control. Otherwise, MediaConvert ignores Dynamic range compression + // line. For information about the Dolby DRC operating modes and profiles, see + // the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3AtmosDynamicRangeCompressionLine"` // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the RF operating mode. - // Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: - // To have MediaConvert use the value you specify here, keep the default value, - // Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). - // Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). - // For information about the Dolby DRC operating modes and profiles, see the - // Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. + // Default value: Film light Related setting: To have MediaConvert use the value + // you specify here, keep the default value, Custom for the setting Dynamic + // range control. Otherwise, MediaConvert ignores Dynamic range compression + // RF. For information about the Dolby DRC operating modes and profiles, see + // the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3AtmosDynamicRangeCompressionRf"` // Specify whether MediaConvert should use any dynamic range control metadata - // from your input file. Keep the default value, Custom (SPECIFIED), to provide - // dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) - // to use the metadata from your input. Related settings--Use these settings - // to specify your dynamic range control values: Dynamic range compression line - // (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). - // When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) + // from your input file. Keep the default value, Custom, to provide dynamic + // range control values in your job settings. Choose Follow source to use the + // metadata from your input. Related settings--Use these settings to specify + // your dynamic range control values: Dynamic range compression line and Dynamic + // range compression RF. When you keep the value Custom for Dynamic range control // and you don't specify values for the related settings, MediaConvert uses // default values for those settings. DynamicRangeControl *string `locationName:"dynamicRangeControl" type:"string" enum:"Eac3AtmosDynamicRangeControl"` // Specify a value for the following Dolby Atmos setting: Left only/Right only // center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default - // value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: 3.0, 1.5, - // 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this - // value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). - // Related setting: To have MediaConvert use this value, keep the default value, - // Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, - // MediaConvert ignores Left only/Right only center (LoRoCenterMixLevel). + // value: -3 dB. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related + // setting: How the service uses this value depends on the value that you choose + // for Stereo downmix. Related setting: To have MediaConvert use this value, + // keep the default value, Custom for the setting Downmix control. Otherwise, + // MediaConvert ignores Left only/Right only center. LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` - // Specify a value for the following Dolby Atmos setting: Left only/Right only - // (Lo/Ro surround). MediaConvert uses this value for downmixing. Default value: - // -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: -1.5, -3.0, -4.5, - // -6.0, and -60. The value -60 mutes the channel. Related setting: How the - // service uses this value depends on the value that you choose for Stereo downmix - // (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this - // value, keep the default value, Custom (SPECIFIED) for the setting Downmix - // control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right - // only surround (LoRoSurroundMixLevel). + // Specify a value for the following Dolby Atmos setting: Left only/Right only. + // MediaConvert uses this value for downmixing. Default value: -3 dB. Valid + // values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. + // Related setting: How the service uses this value depends on the value that + // you choose for Stereo downmix. Related setting: To have MediaConvert use + // this value, keep the default value, Custom for the setting Downmix control. + // Otherwise, MediaConvert ignores Left only/Right only surround. LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` // Specify a value for the following Dolby Atmos setting: Left total/Right total // center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default - // value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: 3.0, 1.5, - // 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this - // value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). - // Related setting: To have MediaConvert use this value, keep the default value, - // Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, - // MediaConvert ignores Left total/Right total center (LtRtCenterMixLevel). + // value: -3 dB Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related + // setting: How the service uses this value depends on the value that you choose + // for Stereo downmix. Related setting: To have MediaConvert use this value, + // keep the default value, Custom for the setting Downmix control. Otherwise, + // MediaConvert ignores Left total/Right total center. LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` // Specify a value for the following Dolby Atmos setting: Left total/Right total // surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. - // Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: - // -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related - // setting: How the service uses this value depends on the value that you choose - // for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert - // use this value, keep the default value, Custom (SPECIFIED) for the setting - // Downmix control (DownmixControl). Otherwise, the service ignores Left total/Right - // total surround (LtRtSurroundMixLevel). + // Default value: -3 dB Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value + // -60 mutes the channel. Related setting: How the service uses this value depends + // on the value that you choose for Stereo downmix. Related setting: To have + // MediaConvert use this value, keep the default value, Custom for the setting + // Downmix control. Otherwise, the service ignores Left total/Right total surround. LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` // Choose how the service meters the loudness of your audio. @@ -10588,10 +10472,9 @@ type Eac3AtmosSettings struct { SpeechThreshold *int64 `locationName:"speechThreshold" type:"integer"` // Choose how the service does stereo downmixing. Default value: Not indicated - // (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert - // use this value, keep the default value, Custom (SPECIFIED) for the setting - // Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo - // downmix (StereoDownmix). + // Related setting: To have MediaConvert use this value, keep the default value, + // Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo + // downmix. StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3AtmosStereoDownmix"` // Specify whether your input audio has an additional center rear surround channel @@ -10735,8 +10618,7 @@ func (s *Eac3AtmosSettings) SetSurroundExMode(v string) *Eac3AtmosSettings { return s } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value EAC3. +// Required when you set Codec to the value EAC3. type Eac3Settings struct { _ struct{} `type:"structure"` @@ -10771,17 +10653,17 @@ type Eac3Settings struct { // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert - // ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). - // For information about the Dolby Digital DRC operating modes and profiles, - // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. + // ignores any value you provide for Dynamic range compression profile. For + // information about the Dolby Digital DRC operating modes and profiles, see + // the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3DynamicRangeCompressionLine"` // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any - // value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). - // For information about the Dolby Digital DRC operating modes and profiles, - // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. + // value you provide for Dynamic range compression profile. For information + // about the Dolby Digital DRC operating modes and profiles, see the Dynamic + // Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3DynamicRangeCompressionRf"` // When encoding 3/2 audio, controls whether the LFE channel is enabled @@ -10792,43 +10674,39 @@ type Eac3Settings struct { LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Eac3LfeFilter"` // Specify a value for the following Dolby Digital Plus setting: Left only/Right - // only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. - // How the service uses this value depends on the value that you choose for - // Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, - // -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies - // only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) - // for the setting Coding mode (Eac3CodingMode). If you choose a different value - // for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel). + // only center mix. MediaConvert uses this value for downmixing. How the service + // uses this value depends on the value that you choose for Stereo downmix. + // Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 + // mutes the channel. This setting applies only if you keep the default value + // of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different + // value for Coding mode, the service ignores Left only/Right only center. LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left only/Right - // only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the - // service uses this value depends on the value that you choose for Stereo downmix - // (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value - // -60 mutes the channel. This setting applies only if you keep the default - // value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode - // (Eac3CodingMode). If you choose a different value for Coding mode, the service - // ignores Left only/Right only surround (loRoSurroundMixLevel). + // only. MediaConvert uses this value for downmixing. How the service uses this + // value depends on the value that you choose for Stereo downmix. Valid values: + // -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting + // applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the + // setting Coding mode. If you choose a different value for Coding mode, the + // service ignores Left only/Right only surround. LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left total/Right - // total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. - // How the service uses this value depends on the value that you choose for - // Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, - // -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies - // only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) - // for the setting Coding mode (Eac3CodingMode). If you choose a different value - // for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel). + // total center mix. MediaConvert uses this value for downmixing. How the service + // uses this value depends on the value that you choose for Stereo downmix. + // Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 + // mutes the channel. This setting applies only if you keep the default value + // of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different + // value for Coding mode, the service ignores Left total/Right total center. LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left total/Right - // total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. - // How the service uses this value depends on the value that you choose for - // Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, - // and -60. The value -60 mutes the channel. This setting applies only if you - // keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the - // setting Coding mode (Eac3CodingMode). If you choose a different value for - // Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel). + // total surround mix. MediaConvert uses this value for downmixing. How the + // service uses this value depends on the value that you choose for Stereo downmix. + // Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. + // This setting applies only if you keep the default value of 3/2 - L, R, C, + // Ls, Rs for the setting Coding mode. If you choose a different value for Coding + // mode, the service ignores Left total/Right total surround. LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, @@ -10850,9 +10728,9 @@ type Eac3Settings struct { SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` // Choose how the service does stereo downmixing. This setting only applies - // if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) - // for the setting Coding mode (Eac3CodingMode). If you choose a different value - // for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix). + // if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding + // mode. If you choose a different value for Coding mode, the service ignores + // Stereo downmix. StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3StereoDownmix"` // When encoding 3/2 audio, sets whether an extra center back surround channel @@ -11030,9 +10908,6 @@ func (s *Eac3Settings) SetSurroundMode(v string) *Eac3Settings { // Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or // ancillary) captions. Set up embedded captions in the same output as your // video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, -// or SCTE20_PLUS_EMBEDDED. type EmbeddedDestinationSettings struct { _ struct{} `type:"structure"` @@ -11047,10 +10922,10 @@ type EmbeddedDestinationSettings struct { // both 608 and 708 captions embedded in your output stream. Optionally, specify // the 708 service number for each output captions channel. Choose a different // number for each channel. To use this setting, also set Force 608 to 708 upconvert - // (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector - // settings. If you choose to upconvert but don't specify a 708 service number, - // MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) - // for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. + // to Upconvert in your input captions selector settings. If you choose to upconvert + // but don't specify a 708 service number, MediaConvert uses the number that + // you specify for CC channel number for the 708 service number. For more information, + // see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. Destination708ServiceNumber *int64 `locationName:"destination708ServiceNumber" min:"1" type:"integer"` } @@ -11105,10 +10980,10 @@ type EmbeddedSourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both - // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes - // the captions data in two ways: it passes the 608 data through using the 608 - // compatibility bytes fields of the 708 wrapper, and it also translates the - // 608 data into 708. + // 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions + // data in two ways: it passes the 608 data through using the 608 compatibility + // bytes fields of the 708 wrapper, and it also translates the 608 data into + // 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"EmbeddedConvert608To708"` // Specifies the 608/708 channel number within the video track from which to @@ -11257,7 +11132,7 @@ type EsamSettings struct { // Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. // The transcoder uses the manifest conditioning instructions that you provide - // in the setting MCC XML (mccXml). + // in the setting MCC XML. ManifestConfirmConditionNotification *EsamManifestConfirmConditionNotification `locationName:"manifestConfirmConditionNotification" type:"structure"` // Specifies the stream distance, in milliseconds, between the SCTE 35 messages @@ -11269,7 +11144,7 @@ type EsamSettings struct { // Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. // The transcoder uses the signal processing instructions that you provide in - // the setting SCC XML (sccXml). + // the setting SCC XML. SignalProcessingNotification *EsamSignalProcessingNotification `locationName:"signalProcessingNotification" type:"structure"` } @@ -11316,12 +11191,10 @@ type EsamSignalProcessingNotification struct { // Provide your ESAM SignalProcessingNotification XML document inside your JSON // job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The // transcoder will use the signal processing instructions in the message that - // you supply. Provide your ESAM SignalProcessingNotification XML document inside - // your JSON job settings. For your MPEG2-TS file outputs, if you want the service - // to place SCTE-35 markers at the insertion points you specify in the XML document, - // you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either - // specify an ESAM XML document or enable SCTE-35 passthrough. You can't do - // both. + // you supply. For your MPEG2-TS file outputs, if you want the service to place + // SCTE-35 markers at the insertion points you specify in the XML document, + // you must also enable SCTE-35 ESAM. Note that you can either specify an ESAM + // XML document or enable SCTE-35 passthrough. You can't do both. SccXml *string `locationName:"sccXml" type:"string"` } @@ -11434,17 +11307,15 @@ func (s *F4vSettings) SetMoovPlacement(v string) *F4vSettings { // Settings related to your File output group. MediaConvert uses this group // of settings to generate a single standalone file, rather than a streaming -// package. When you work directly in your JSON job specification, include this -// object and any required children when you set Type, under OutputGroupSettings, -// to FILE_GROUP_SETTINGS. +// package. type FileGroupSettings struct { _ struct{} `type:"structure"` - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. + // Use Destination to specify the S3 output location and the output filename + // base. Destination accepts format identifiers. If you do not specify the base + // filename in the URI, the service will use the filename of the input file. + // If your job has multiple inputs, the service uses the filename of the first + // input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of @@ -11490,10 +11361,10 @@ type FileSourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both - // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes - // the captions data in two ways: it passes the 608 data through using the 608 - // compatibility bytes fields of the 708 wrapper, and it also translates the - // 608 data into 708. + // 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions + // data in two ways: it passes the 608 data through using the 608 compatibility + // bytes fields of the 708 wrapper, and it also translates the 608 data into + // 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"FileSourceConvert608To708"` // Choose the presentation style of your input SCC captions. To use the same @@ -11505,10 +11376,8 @@ type FileSourceSettings struct { // Ignore this setting unless your input captions format is SCC. To have the // service compensate for differing frame rates between your input captions // and input video, specify the frame rate of the captions file. Specify this - // value as a fraction. When you work directly in your JSON job specification, - // use the settings framerateNumerator and framerateDenominator. For example, - // you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for - // 23.976 fps, or 30000 / 1001 for 29.97 fps. + // value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 + // / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps. Framerate *CaptionSourceFramerate `locationName:"framerate" type:"structure"` // External caption file used for loading captions. Accepted file extensions @@ -11521,19 +11390,18 @@ type FileSourceSettings struct { // For example, type 15 to add 15 seconds to all the times in the captions file. // Type -5 to subtract 5 seconds from the times in the captions file. You can // optionally specify your time delta in milliseconds instead of seconds. When - // you do so, set the related setting, Time delta units (TimeDeltaUnits) to - // Milliseconds (MILLISECONDS). Note that, when you specify a time delta for - // timecode-based caption sources, such as SCC and STL, and your time delta - // isn't a multiple of the input frame rate, MediaConvert snaps the captions - // to the nearest frame. For example, when your input video frame rate is 25 - // fps and you specify 1010ms for time delta, MediaConvert delays your captions - // by 1000 ms. + // you do so, set the related setting, Time delta units to Milliseconds. Note + // that, when you specify a time delta for timecode-based caption sources, such + // as SCC and STL, and your time delta isn't a multiple of the input frame rate, + // MediaConvert snaps the captions to the nearest frame. For example, when your + // input video frame rate is 25 fps and you specify 1010ms for time delta, MediaConvert + // delays your captions by 1000 ms. TimeDelta *int64 `locationName:"timeDelta" type:"integer"` - // When you use the setting Time delta (TimeDelta) to adjust the sync between - // your sidecar captions and your video, use this setting to specify the units - // for the delta that you specify. When you don't specify a value for Time delta - // units (TimeDeltaUnits), MediaConvert uses seconds by default. + // When you use the setting Time delta to adjust the sync between your sidecar + // captions and your video, use this setting to specify the units for the delta + // that you specify. When you don't specify a value for Time delta units, MediaConvert + // uses seconds by default. TimeDeltaUnits *string `locationName:"timeDeltaUnits" type:"string" enum:"FileSourceTimeDeltaUnits"` } @@ -11742,8 +11610,7 @@ func (s *ForceIncludeRenditionSize) SetWidth(v int64) *ForceIncludeRenditionSize return s } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value FRAME_CAPTURE. +// Required when you set Codec to the value FRAME_CAPTURE. type FrameCaptureSettings struct { _ struct{} `type:"structure"` @@ -12227,7 +12094,7 @@ func (s *GetQueueOutput) SetQueue(v *Queue) *GetQueueOutput { } // Settings for quality-defined variable bitrate encoding with the H.264 codec. -// Use these settings only when you set QVBR for Rate control mode (RateControlMode). +// Use these settings only when you set QVBR for Rate control mode. type H264QvbrSettings struct { _ struct{} `type:"structure"` @@ -12239,17 +12106,17 @@ type H264QvbrSettings struct { // by the number of seconds of encoded output. MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"` - // Use this setting only when you set Rate control mode (RateControlMode) to - // QVBR. Specify the target quality level for this output. MediaConvert determines - // the right number of bits to use for each part of the video to maintain the - // video quality that you specify. When you keep the default value, AUTO, MediaConvert - // picks a quality level for you, based on characteristics of your input video. - // If you prefer to specify a quality level, specify a number from 1 through - // 10. Use higher numbers for greater quality. Level 10 results in nearly lossless - // compression. The quality level for most broadcast-quality transcodes is between - // 6 and 9. Optionally, to specify a value between whole numbers, also provide - // a value for the setting qvbrQualityLevelFineTune. For example, if you want - // your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune + // Use this setting only when you set Rate control mode to QVBR. Specify the + // target quality level for this output. MediaConvert determines the right number + // of bits to use for each part of the video to maintain the video quality that + // you specify. When you keep the default value, AUTO, MediaConvert picks a + // quality level for you, based on characteristics of your input video. If you + // prefer to specify a quality level, specify a number from 1 through 10. Use + // higher numbers for greater quality. Level 10 results in nearly lossless compression. + // The quality level for most broadcast-quality transcodes is between 6 and + // 9. Optionally, to specify a value between whole numbers, also provide a value + // for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR + // quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune // to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` @@ -12314,20 +12181,19 @@ func (s *H264QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *H264QvbrSetti return s } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value H_264. +// Required when you set Codec to the value H_264. type H264Settings struct { _ struct{} `type:"structure"` - // Keep the default value, Auto (AUTO), for this setting to have MediaConvert - // automatically apply the best types of quantization for your video content. - // When you want to apply your quantization settings manually, you must set - // H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting - // to specify the strength of any adaptive quantization filters that you enable. - // If you don't want MediaConvert to do any adaptive quantization in this transcode, - // set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related - // settings: The value that you choose here applies to the following settings: - // H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization. + // Keep the default value, Auto, for this setting to have MediaConvert automatically + // apply the best types of quantization for your video content. When you want + // to apply your quantization settings manually, you must set H264AdaptiveQuantization + // to a value other than Auto. Use this setting to specify the strength of any + // adaptive quantization filters that you enable. If you don't want MediaConvert + // to do any adaptive quantization in this transcode, set Adaptive quantization + // to Off. Related settings: The value that you choose here applies to the following + // settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, + // and H264TemporalAdaptiveQuantization. AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H264AdaptiveQuantization"` // The Bandwidth reduction filter increases the video quality of your output @@ -12346,7 +12212,7 @@ type H264Settings struct { Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // Specify an H.264 level that is consistent with your output video settings. - // If you aren't sure what level to specify, choose Auto (AUTO). + // If you aren't sure what level to specify, choose Auto. CodecLevel *string `locationName:"codecLevel" type:"string" enum:"H264CodecLevel"` // H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the @@ -12368,9 +12234,9 @@ type H264Settings struct { // The video encoding method for your MPEG-4 AVC output. Keep the default value, // PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose - // Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced - // fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding - // for interlaced outputs. + // Force field to disable PAFF encoding and create separate interlaced fields. + // Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for + // interlaced outputs. FieldEncoding *string `locationName:"fieldEncoding" type:"string" enum:"H264FieldEncoding"` // Only use this setting when you change the default value, AUTO, for the setting @@ -12378,14 +12244,13 @@ type H264Settings struct { // and all other adaptive quantization from your JSON job specification, MediaConvert // automatically applies the best types of quantization for your video content. // When you set H264AdaptiveQuantization to a value other than AUTO, the default - // value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change - // this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears - // as a visual flicker that can arise when the encoder saves bits by copying - // some macroblocks many times from frame to frame, and then refreshes them - // at the I-frame. When you enable this setting, the encoder updates these macroblocks - // slightly more often to smooth out the flicker. To manually enable or disable - // H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) - // to a value other than AUTO. + // value for H264FlickerAdaptiveQuantization is Disabled. Change this value + // to Enabled to reduce I-frame pop. I-frame pop appears as a visual flicker + // that can arise when the encoder saves bits by copying some macroblocks many + // times from frame to frame, and then refreshes them at the I-frame. When you + // enable this setting, the encoder updates these macroblocks slightly more + // often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, + // you must set Adaptive quantization to a value other than AUTO. FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H264FlickerAdaptiveQuantization"` // If you are using the console, use the Framerate setting to specify the frame @@ -12393,12 +12258,7 @@ type H264Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Custom, specify your frame rate as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H264FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -12440,34 +12300,30 @@ type H264Settings struct { // Specify the relative frequency of open to closed GOPs in this output. For // example, if you want to allow four open GOPs and then require a closed GOP, // set this value to 5. We recommend that you have the transcoder automatically - // choose this value for you based on characteristics of your input video. To - // enable this automatic behavior, keep the default value by leaving this setting - // out of your JSON job specification. In the console, do this by keeping the - // default empty value. If you do explicitly specify a value, for segmented - // outputs, don't set this value to 0. + // choose this value for you based on characteristics of your input video. In + // the console, do this by keeping the default empty value. If you do explicitly + // specify a value, for segmented outputs, don't set this value to 0. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` - // Use this setting only when you set GOP mode control (GopSizeUnits) to Specified, - // frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP length using - // a whole number of frames or a decimal value of seconds. MediaConvert will - // interpret this value as frames or seconds depending on the value you choose - // for GOP mode control (GopSizeUnits). If you want to allow MediaConvert to - // automatically determine GOP size, leave GOP size blank and set GOP mode control - // to Auto (AUTO). If your output group specifies HLS, DASH, or CMAF, leave - // GOP size blank and set GOP mode control to Auto in each output in your output - // group. + // Use this setting only when you set GOP mode control to Specified, frames + // or Specified, seconds. Specify the GOP length using a whole number of frames + // or a decimal value of seconds. MediaConvert will interpret this value as + // frames or seconds depending on the value you choose for GOP mode control. + // If you want to allow MediaConvert to automatically determine GOP size, leave + // GOP size blank and set GOP mode control to Auto. If your output group specifies + // HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto + // in each output in your output group. GopSize *float64 `locationName:"gopSize" type:"double"` // Specify how the transcoder determines GOP size for this output. We recommend // that you have the transcoder automatically choose this value for you based // on characteristics of your input video. To enable this automatic behavior, - // choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if - // you don't specify GOP mode control (GopSizeUnits), MediaConvert will use - // automatic behavior. If your output group specifies HLS, DASH, or CMAF, set - // GOP mode control to Auto and leave GOP size blank in each output in your - // output group. To explicitly specify the GOP length, choose Specified, frames - // (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length - // in the related setting GOP size (GopSize). + // choose Auto and and leave GOP size blank. By default, if you don't specify + // GOP mode control, MediaConvert will use automatic behavior. If your output + // group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave + // GOP size blank in each output in your output group. To explicitly specify + // the GOP length, choose Specified, frames or Specified, seconds and then provide + // the GOP length in the related setting GOP size. GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H264GopSizeUnits"` // If your downstream systems have strict buffer requirements: Specify the minimum @@ -12484,39 +12340,37 @@ type H264Settings struct { HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type - // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) - // to produce outputs with the same field polarity as the source. For jobs that - // have multiple inputs, the output field polarity might change over the course - // of the output. Follow behavior depends on the input scan type. If the source - // is interlaced, the output will be interlaced with the same polarity as the - // source. If the source is progressive, the output will be interlaced with - // top field bottom field first, depending on which of the Follow options you - // choose. + // to create a progressive output, regardless of the scan type of your input. + // Use Top field first or Bottom field first to create an output that's interlaced + // with the same field polarity throughout. Use Follow, default top or Follow, + // default bottom to produce outputs with the same field polarity as the source. + // For jobs that have multiple inputs, the output field polarity might change + // over the course of the output. Follow behavior depends on the input scan + // type. If the source is interlaced, the output will be interlaced with the + // same polarity as the source. If the source is progressive, the output will + // be interlaced with top field bottom field first, depending on which of the + // Follow options you choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H264InterlaceMode"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. Required when Rate control mode is QVBR. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` - // Use this setting only when you also enable Scene change detection (SceneChangeDetect). - // This setting determines how the encoder manages the spacing between I-frames - // that it inserts as part of the I-frame cadence and the I-frames that it inserts - // for Scene change detection. We recommend that you have the transcoder automatically + // Use this setting only when you also enable Scene change detection. This setting + // determines how the encoder manages the spacing between I-frames that it inserts + // as part of the I-frame cadence and the I-frames that it inserts for Scene + // change detection. We recommend that you have the transcoder automatically // choose this value for you based on characteristics of your input video. To - // enable this automatic behavior, keep the default value by leaving this setting - // out of your JSON job specification. In the console, do this by keeping the - // default empty value. When you explicitly specify a value for this setting, - // the encoder determines whether to skip a cadence-driven I-frame by the value - // you set. For example, if you set Min I interval (minIInterval) to 5 and a - // cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, - // then the encoder skips the cadence-driven I-frame. In this way, one GOP is - // shrunk slightly and one GOP is stretched slightly. When the cadence-driven - // I-frames are farther from the scene-change I-frame than the value you set, - // then the encoder leaves all I-frames in place and the GOPs surrounding the - // scene change are smaller than the usual cadence GOPs. + // enable this automatic behavior, do this by keeping the default empty value. + // When you explicitly specify a value for this setting, the encoder determines + // whether to skip a cadence-driven I-frame by the value you set. For example, + // if you set Min I interval to 5 and a cadence-driven I-frame would fall within + // 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven + // I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched + // slightly. When the cadence-driven I-frames are farther from the scene-change + // I-frame than the value you set, then the encoder leaves all I-frames in place + // and the GOPs surrounding the scene change are smaller than the usual cadence + // GOPs. MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Specify the number of B-frames between reference frames in this output. For @@ -12531,28 +12385,25 @@ type H264Settings struct { NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) - // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), - // uses the PAR from your input video for your output. To specify a different - // PAR in the console, choose any value other than Follow source. To specify - // a different PAR by editing the JSON job specification, choose SPECIFIED. - // When you choose SPECIFIED for this setting, you must also specify values - // for the parNumerator and parDenominator settings. + // for this output. The default behavior, Follow source, uses the PAR from your + // input video for your output. To specify a different PAR in the console, choose + // any value other than Follow source. When you choose SPECIFIED for this setting, + // you must also specify values for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"H264ParControl"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is + // 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // The Quality tuning level you choose represents a trade-off between the encoding @@ -12566,7 +12417,7 @@ type H264Settings struct { QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H264QualityTuningLevel"` // Settings for quality-defined variable bitrate encoding with the H.265 codec. - // Use these settings only when you set QVBR for Rate control mode (RateControlMode). + // Use these settings only when you set QVBR for Rate control mode. QvbrSettings *H264QvbrSettings `locationName:"qvbrSettings" type:"structure"` // Use this setting to specify whether this output has a variable bitrate (VBR), @@ -12578,24 +12429,22 @@ type H264Settings struct { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced - // field in the output. Keep the default value, Basic interlacing (INTERLACED), - // for all other output frame rates. With basic interlacing, MediaConvert performs - // any frame rate conversion first and then interlaces the frames. When you - // choose Optimized interlacing and you set your output frame rate to a value - // that isn't suitable for optimized interlacing, MediaConvert automatically - // falls back to basic interlacing. Required settings: To use optimized interlacing, - // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't - // use optimized interlacing for hard telecine outputs. You must also set Interlace - // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // to create a better quality interlaced output. In this case, each progressive + // frame from the input corresponds to an interlaced field in the output. Keep + // the default value, Basic interlacing, for all other output frame rates. With + // basic interlacing, MediaConvert performs any frame rate conversion first + // and then interlaces the frames. When you choose Optimized interlacing and + // you set your output frame rate to a value that isn't suitable for optimized + // interlacing, MediaConvert automatically falls back to basic interlacing. + // Required settings: To use optimized interlacing, you must set Telecine to + // None or Soft. You can't use optimized interlacing for hard telecine outputs. + // You must also set Interlace mode to a value other than Progressive. ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"H264ScanTypeConversionMode"` // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. - // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) - // for further video quality improvement. For more information about QVBR, see - // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. + // If this output uses QVBR, choose Transition detection for further video quality + // improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H264SceneChangeDetect"` // Number of slices per picture. Must be less than or equal to the number of @@ -12608,46 +12457,43 @@ type H264Settings struct { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: - // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) - // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to - // 1. + // You must also set Framerate to 25. SlowPal *string `locationName:"slowPal" type:"string" enum:"H264SlowPal"` // Ignore this setting unless you need to comply with a specification that requires // a specific value. If you don't have a specification requirement, we recommend // that you adjust the softness of your output by using a lower value for the - // setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). - // The Softness (softness) setting specifies the quantization matrices that - // the encoder uses. Keep the default value, 0, for flat quantization. Choose - // the value 1 or 16 to use the default JVT softening quantization matricies - // from the H.264 specification. Choose a value from 17 to 128 to use planar - // interpolation. Increasing values from 17 to 128 result in increasing reduction - // of high-frequency data. The value 128 results in the softest video. + // setting Sharpness or by enabling a noise reducer filter. The Softness setting + // specifies the quantization matrices that the encoder uses. Keep the default + // value, 0, for flat quantization. Choose the value 1 or 16 to use the default + // JVT softening quantization matricies from the H.264 specification. Choose + // a value from 17 to 128 to use planar interpolation. Increasing values from + // 17 to 128 result in increasing reduction of high-frequency data. The value + // 128 results in the softest video. Softness *int64 `locationName:"softness" type:"integer"` - // Only use this setting when you change the default value, Auto (AUTO), for - // the setting H264AdaptiveQuantization. When you keep all defaults, excluding - // H264AdaptiveQuantization and all other adaptive quantization from your JSON - // job specification, MediaConvert automatically applies the best types of quantization - // for your video content. When you set H264AdaptiveQuantization to a value - // other than AUTO, the default value for H264SpatialAdaptiveQuantization is - // Enabled (ENABLED). Keep this default value to adjust quantization within - // each frame based on spatial variation of content complexity. When you enable - // this feature, the encoder uses fewer bits on areas that can sustain more - // distortion with no noticeable visual degradation and uses more bits on areas - // where any small distortion will be noticeable. For example, complex textured - // blocks are encoded with fewer bits and smooth textured blocks are encoded - // with more bits. Enabling this feature will almost always improve your video - // quality. Note, though, that this feature doesn't take into account where - // the viewer's attention is likely to be. If viewers are likely to be focusing - // their attention on a part of the screen with a lot of complex texture, you - // might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). - // Related setting: When you enable spatial adaptive quantization, set the value - // for Adaptive quantization (H264AdaptiveQuantization) depending on your content. - // For homogeneous content, such as cartoons and video games, set it to Low. - // For content with a wider variety of textures, set it to High or Higher. To - // manually enable or disable H264SpatialAdaptiveQuantization, you must set - // Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO. + // Only use this setting when you change the default value, Auto, for the setting + // H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization + // and all other adaptive quantization from your JSON job specification, MediaConvert + // automatically applies the best types of quantization for your video content. + // When you set H264AdaptiveQuantization to a value other than AUTO, the default + // value for H264SpatialAdaptiveQuantization is Enabled. Keep this default value + // to adjust quantization within each frame based on spatial variation of content + // complexity. When you enable this feature, the encoder uses fewer bits on + // areas that can sustain more distortion with no noticeable visual degradation + // and uses more bits on areas where any small distortion will be noticeable. + // For example, complex textured blocks are encoded with fewer bits and smooth + // textured blocks are encoded with more bits. Enabling this feature will almost + // always improve your video quality. Note, though, that this feature doesn't + // take into account where the viewer's attention is likely to be. If viewers + // are likely to be focusing their attention on a part of the screen with a + // lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization + // to Disabled. Related setting: When you enable spatial adaptive quantization, + // set the value for Adaptive quantization depending on your content. For homogeneous + // content, such as cartoons and video games, set it to Low. For content with + // a wider variety of textures, set it to High or Higher. To manually enable + // or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization + // to a value other than AUTO. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H264SpatialAdaptiveQuantization"` // Produces a bitstream compliant with SMPTE RP-2027. @@ -12655,12 +12501,12 @@ type H264Settings struct { // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable - // hard or soft telecine to create a smoother picture. Hard telecine (HARD) - // produces a 29.97i output. Soft telecine (SOFT) produces an output with a - // 23.976 output that signals to the video player device to do the conversion - // during play back. When you keep the default value, None (NONE), MediaConvert - // does a standard frame rate conversion to 29.97 without doing anything with - // the field polarity to create a smoother picture. + // hard or soft telecine to create a smoother picture. Hard telecine produces + // a 29.97i output. Soft telecine produces an output with a 23.976 output that + // signals to the video player device to do the conversion during play back. + // When you keep the default value, None, MediaConvert does a standard frame + // rate conversion to 29.97 without doing anything with the field polarity to + // create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"H264Telecine"` // Only use this setting when you change the default value, AUTO, for the setting @@ -12668,22 +12514,21 @@ type H264Settings struct { // and all other adaptive quantization from your JSON job specification, MediaConvert // automatically applies the best types of quantization for your video content. // When you set H264AdaptiveQuantization to a value other than AUTO, the default - // value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this - // default value to adjust quantization within each frame based on temporal - // variation of content complexity. When you enable this feature, the encoder - // uses fewer bits on areas of the frame that aren't moving and uses more bits - // on complex objects with sharp edges that move a lot. For example, this feature - // improves the readability of text tickers on newscasts and scoreboards on - // sports matches. Enabling this feature will almost always improve your video - // quality. Note, though, that this feature doesn't take into account where - // the viewer's attention is likely to be. If viewers are likely to be focusing - // their attention on a part of the screen that doesn't have moving objects - // with sharp edges, such as sports athletes' faces, you might choose to set - // H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: - // When you enable temporal quantization, adjust the strength of the filter - // with the setting Adaptive quantization (adaptiveQuantization). To manually + // value for H264TemporalAdaptiveQuantization is Enabled. Keep this default + // value to adjust quantization within each frame based on temporal variation + // of content complexity. When you enable this feature, the encoder uses fewer + // bits on areas of the frame that aren't moving and uses more bits on complex + // objects with sharp edges that move a lot. For example, this feature improves + // the readability of text tickers on newscasts and scoreboards on sports matches. + // Enabling this feature will almost always improve your video quality. Note, + // though, that this feature doesn't take into account where the viewer's attention + // is likely to be. If viewers are likely to be focusing their attention on + // a part of the screen that doesn't have moving objects with sharp edges, such + // as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization + // to Disabled. Related setting: When you enable temporal quantization, adjust + // the strength of the filter with the setting Adaptive quantization. To manually // enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive - // quantization (H264AdaptiveQuantization) to a value other than AUTO. + // quantization to a value other than AUTO. TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H264TemporalAdaptiveQuantization"` // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. @@ -13000,7 +12845,7 @@ func (s *H264Settings) SetUnregisteredSeiTimecode(v string) *H264Settings { } // Settings for quality-defined variable bitrate encoding with the H.265 codec. -// Use these settings only when you set QVBR for Rate control mode (RateControlMode). +// Use these settings only when you set QVBR for Rate control mode. type H265QvbrSettings struct { _ struct{} `type:"structure"` @@ -13012,17 +12857,17 @@ type H265QvbrSettings struct { // by the number of seconds of encoded output. MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"` - // Use this setting only when you set Rate control mode (RateControlMode) to - // QVBR. Specify the target quality level for this output. MediaConvert determines - // the right number of bits to use for each part of the video to maintain the - // video quality that you specify. When you keep the default value, AUTO, MediaConvert - // picks a quality level for you, based on characteristics of your input video. - // If you prefer to specify a quality level, specify a number from 1 through - // 10. Use higher numbers for greater quality. Level 10 results in nearly lossless - // compression. The quality level for most broadcast-quality transcodes is between - // 6 and 9. Optionally, to specify a value between whole numbers, also provide - // a value for the setting qvbrQualityLevelFineTune. For example, if you want - // your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune + // Use this setting only when you set Rate control mode to QVBR. Specify the + // target quality level for this output. MediaConvert determines the right number + // of bits to use for each part of the video to maintain the video quality that + // you specify. When you keep the default value, AUTO, MediaConvert picks a + // quality level for you, based on characteristics of your input video. If you + // prefer to specify a quality level, specify a number from 1 through 10. Use + // higher numbers for greater quality. Level 10 results in nearly lossless compression. + // The quality level for most broadcast-quality transcodes is between 6 and + // 9. Optionally, to specify a value between whole numbers, also provide a value + // for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR + // quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune // to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` @@ -13091,16 +12936,13 @@ func (s *H265QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *H265QvbrSetti type H265Settings struct { _ struct{} `type:"structure"` - // When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), - // or leave blank, MediaConvert automatically applies quantization to improve - // the video quality of your output. Set Adaptive Quantization to Low (LOW), - // Medium (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control - // the strength of the quantization filter. When you do, you can specify a value - // for Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal - // Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive - // Quantization (H265FlickerAdaptiveQuantization), to further control the quantization - // filter. Set Adaptive Quantization to Off (OFF) to apply no quantization to - // your output. + // When you set Adaptive Quantization to Auto, or leave blank, MediaConvert + // automatically applies quantization to improve the video quality of your output. + // Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually + // control the strength of the quantization filter. When you do, you can specify + // a value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, + // and Flicker Adaptive Quantization, to further control the quantization filter. + // Set Adaptive Quantization to Off to apply no quantization to your output. AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H265AdaptiveQuantization"` // Enables Alternate Transfer Function SEI message for outputs using Hybrid @@ -13146,20 +12988,15 @@ type H265Settings struct { // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. This setting is disabled by // default. Related setting: In addition to enabling this setting, you must - // also set adaptiveQuantization to a value other than Off (OFF). + // also set adaptiveQuantization to a value other than Off. FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H265FlickerAdaptiveQuantization"` - // If you are using the console, use the Framerate setting to specify the frame - // rate for this output. If you want to keep the same frame rate as the input - // video, choose Follow source. If you want to do frame rate conversion, choose - // a frame rate from the dropdown list or choose Custom. The framerates shown - // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Use the Framerate setting to specify the frame rate for this output. If you + // want to keep the same frame rate as the input video, choose Follow source. + // If you want to do frame rate conversion, choose a frame rate from the dropdown + // list or choose Custom. The framerates shown in the dropdown list are decimal + // approximations of fractions. If you choose Custom, specify your frame rate + // as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H265FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -13202,33 +13039,30 @@ type H265Settings struct { // example, if you want to allow four open GOPs and then require a closed GOP, // set this value to 5. We recommend that you have the transcoder automatically // choose this value for you based on characteristics of your input video. To - // enable this automatic behavior, keep the default value by leaving this setting - // out of your JSON job specification. In the console, do this by keeping the - // default empty value. If you do explicitly specify a value, for segmented - // outputs, don't set this value to 0. + // enable this automatic behavior, do this by keeping the default empty value. + // If you do explicitly specify a value, for segmented outputs, don't set this + // value to 0. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` - // Use this setting only when you set GOP mode control (GopSizeUnits) to Specified, - // frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP length using - // a whole number of frames or a decimal value of seconds. MediaConvert will - // interpret this value as frames or seconds depending on the value you choose - // for GOP mode control (GopSizeUnits). If you want to allow MediaConvert to - // automatically determine GOP size, leave GOP size blank and set GOP mode control - // to Auto (AUTO). If your output group specifies HLS, DASH, or CMAF, leave - // GOP size blank and set GOP mode control to Auto in each output in your output - // group. + // Use this setting only when you set GOP mode control to Specified, frames + // or Specified, seconds. Specify the GOP length using a whole number of frames + // or a decimal value of seconds. MediaConvert will interpret this value as + // frames or seconds depending on the value you choose for GOP mode control. + // If you want to allow MediaConvert to automatically determine GOP size, leave + // GOP size blank and set GOP mode control to Auto. If your output group specifies + // HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto + // in each output in your output group. GopSize *float64 `locationName:"gopSize" type:"double"` // Specify how the transcoder determines GOP size for this output. We recommend // that you have the transcoder automatically choose this value for you based // on characteristics of your input video. To enable this automatic behavior, - // choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if - // you don't specify GOP mode control (GopSizeUnits), MediaConvert will use - // automatic behavior. If your output group specifies HLS, DASH, or CMAF, set - // GOP mode control to Auto and leave GOP size blank in each output in your - // output group. To explicitly specify the GOP length, choose Specified, frames - // (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length - // in the related setting GOP size (GopSize). + // choose Auto and and leave GOP size blank. By default, if you don't specify + // GOP mode control, MediaConvert will use automatic behavior. If your output + // group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave + // GOP size blank in each output in your output group. To explicitly specify + // the GOP length, choose Specified, frames or Specified, seconds and then provide + // the GOP length in the related setting GOP size. GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H265GopSizeUnits"` // If your downstream systems have strict buffer requirements: Specify the minimum @@ -13245,39 +13079,37 @@ type H265Settings struct { HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type - // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) - // to produce outputs with the same field polarity as the source. For jobs that - // have multiple inputs, the output field polarity might change over the course - // of the output. Follow behavior depends on the input scan type. If the source - // is interlaced, the output will be interlaced with the same polarity as the - // source. If the source is progressive, the output will be interlaced with - // top field bottom field first, depending on which of the Follow options you - // choose. + // to create a progressive output, regardless of the scan type of your input. + // Use Top field first or Bottom field first to create an output that's interlaced + // with the same field polarity throughout. Use Follow, default top or Follow, + // default bottom to produce outputs with the same field polarity as the source. + // For jobs that have multiple inputs, the output field polarity might change + // over the course of the output. Follow behavior depends on the input scan + // type. If the source is interlaced, the output will be interlaced with the + // same polarity as the source. If the source is progressive, the output will + // be interlaced with top field bottom field first, depending on which of the + // Follow options you choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H265InterlaceMode"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. Required when Rate control mode is QVBR. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` - // Use this setting only when you also enable Scene change detection (SceneChangeDetect). - // This setting determines how the encoder manages the spacing between I-frames - // that it inserts as part of the I-frame cadence and the I-frames that it inserts - // for Scene change detection. We recommend that you have the transcoder automatically + // Use this setting only when you also enable Scene change detection. This setting + // determines how the encoder manages the spacing between I-frames that it inserts + // as part of the I-frame cadence and the I-frames that it inserts for Scene + // change detection. We recommend that you have the transcoder automatically // choose this value for you based on characteristics of your input video. To - // enable this automatic behavior, keep the default value by leaving this setting - // out of your JSON job specification. In the console, do this by keeping the - // default empty value. When you explicitly specify a value for this setting, - // the encoder determines whether to skip a cadence-driven I-frame by the value - // you set. For example, if you set Min I interval (minIInterval) to 5 and a - // cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, - // then the encoder skips the cadence-driven I-frame. In this way, one GOP is - // shrunk slightly and one GOP is stretched slightly. When the cadence-driven - // I-frames are farther from the scene-change I-frame than the value you set, - // then the encoder leaves all I-frames in place and the GOPs surrounding the - // scene change are smaller than the usual cadence GOPs. + // enable this automatic behavior, do this by keeping the default empty value. + // When you explicitly specify a value for this setting, the encoder determines + // whether to skip a cadence-driven I-frame by the value you set. For example, + // if you set Min I interval to 5 and a cadence-driven I-frame would fall within + // 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven + // I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched + // slightly. When the cadence-driven I-frames are farther from the scene-change + // I-frame than the value you set, then the encoder leaves all I-frames in place + // and the GOPs surrounding the scene change are smaller than the usual cadence + // GOPs. MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Specify the number of B-frames between reference frames in this output. For @@ -13292,37 +13124,34 @@ type H265Settings struct { NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) - // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), - // uses the PAR from your input video for your output. To specify a different - // PAR in the console, choose any value other than Follow source. To specify - // a different PAR by editing the JSON job specification, choose SPECIFIED. - // When you choose SPECIFIED for this setting, you must also specify values - // for the parNumerator and parDenominator settings. + // for this output. The default behavior, Follow source, uses the PAR from your + // input video for your output. To specify a different PAR, choose any value + // other than Follow source. When you choose SPECIFIED for this setting, you + // must also specify values for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"H265ParControl"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is + // 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you - // want to trade off encoding speed for output video quality. The default behavior - // is faster, lower quality, single-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H265QualityTuningLevel"` // Settings for quality-defined variable bitrate encoding with the H.265 codec. - // Use these settings only when you set QVBR for Rate control mode (RateControlMode). + // Use these settings only when you set QVBR for Rate control mode. QvbrSettings *H265QvbrSettings `locationName:"qvbrSettings" type:"structure"` // Use this setting to specify whether this output has a variable bitrate (VBR), @@ -13335,24 +13164,22 @@ type H265Settings struct { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced - // field in the output. Keep the default value, Basic interlacing (INTERLACED), - // for all other output frame rates. With basic interlacing, MediaConvert performs - // any frame rate conversion first and then interlaces the frames. When you - // choose Optimized interlacing and you set your output frame rate to a value - // that isn't suitable for optimized interlacing, MediaConvert automatically - // falls back to basic interlacing. Required settings: To use optimized interlacing, - // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't - // use optimized interlacing for hard telecine outputs. You must also set Interlace - // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // to create a better quality interlaced output. In this case, each progressive + // frame from the input corresponds to an interlaced field in the output. Keep + // the default value, Basic interlacing, for all other output frame rates. With + // basic interlacing, MediaConvert performs any frame rate conversion first + // and then interlaces the frames. When you choose Optimized interlacing and + // you set your output frame rate to a value that isn't suitable for optimized + // interlacing, MediaConvert automatically falls back to basic interlacing. + // Required settings: To use optimized interlacing, you must set Telecine to + // None or Soft. You can't use optimized interlacing for hard telecine outputs. + // You must also set Interlace mode to a value other than Progressive. ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"H265ScanTypeConversionMode"` // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. - // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) - // for further video quality improvement. For more information about QVBR, see - // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. + // If this output uses QVBR, choose Transition detection for further video quality + // improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H265SceneChangeDetect"` // Number of slices per picture. Must be less than or equal to the number of @@ -13365,50 +13192,45 @@ type H265Settings struct { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: - // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) - // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to - // 1. + // You must also set Framerate to 25. SlowPal *string `locationName:"slowPal" type:"string" enum:"H265SlowPal"` - // Keep the default value, Enabled (ENABLED), to adjust quantization within - // each frame based on spatial variation of content complexity. When you enable - // this feature, the encoder uses fewer bits on areas that can sustain more - // distortion with no noticeable visual degradation and uses more bits on areas - // where any small distortion will be noticeable. For example, complex textured - // blocks are encoded with fewer bits and smooth textured blocks are encoded - // with more bits. Enabling this feature will almost always improve your video - // quality. Note, though, that this feature doesn't take into account where - // the viewer's attention is likely to be. If viewers are likely to be focusing - // their attention on a part of the screen with a lot of complex texture, you - // might choose to disable this feature. Related setting: When you enable spatial - // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) - // depending on your content. For homogeneous content, such as cartoons and - // video games, set it to Low. For content with a wider variety of textures, - // set it to High or Higher. + // Keep the default value, Enabled, to adjust quantization within each frame + // based on spatial variation of content complexity. When you enable this feature, + // the encoder uses fewer bits on areas that can sustain more distortion with + // no noticeable visual degradation and uses more bits on areas where any small + // distortion will be noticeable. For example, complex textured blocks are encoded + // with fewer bits and smooth textured blocks are encoded with more bits. Enabling + // this feature will almost always improve your video quality. Note, though, + // that this feature doesn't take into account where the viewer's attention + // is likely to be. If viewers are likely to be focusing their attention on + // a part of the screen with a lot of complex texture, you might choose to disable + // this feature. Related setting: When you enable spatial adaptive quantization, + // set the value for Adaptive quantization depending on your content. For homogeneous + // content, such as cartoons and video games, set it to Low. For content with + // a wider variety of textures, set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H265SpatialAdaptiveQuantization"` - // This field applies only if the Streams > Advanced > Framerate (framerate) - // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors - // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced - // Mode field (interlace_mode) to identify the scan type for the output: Progressive, - // Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output - // from 23.976 input. - Soft: produces 23.976; the player converts this output - // to 29.97i. + // This field applies only if the Streams > Advanced > Framerate field is set + // to 29.970. This field works with the Streams > Advanced > Preprocessors > + // Deinterlacer field and the Streams > Advanced > Interlaced Mode field to + // identify the scan type for the output: Progressive, Interlaced, Hard Telecine + // or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: + // produces 23.976; the player converts this output to 29.97i. Telecine *string `locationName:"telecine" type:"string" enum:"H265Telecine"` - // Keep the default value, Enabled (ENABLED), to adjust quantization within - // each frame based on temporal variation of content complexity. When you enable - // this feature, the encoder uses fewer bits on areas of the frame that aren't - // moving and uses more bits on complex objects with sharp edges that move a - // lot. For example, this feature improves the readability of text tickers on - // newscasts and scoreboards on sports matches. Enabling this feature will almost - // always improve your video quality. Note, though, that this feature doesn't - // take into account where the viewer's attention is likely to be. If viewers - // are likely to be focusing their attention on a part of the screen that doesn't - // have moving objects with sharp edges, such as sports athletes' faces, you - // might choose to disable this feature. Related setting: When you enable temporal - // quantization, adjust the strength of the filter with the setting Adaptive - // quantization (adaptiveQuantization). + // Keep the default value, Enabled, to adjust quantization within each frame + // based on temporal variation of content complexity. When you enable this feature, + // the encoder uses fewer bits on areas of the frame that aren't moving and + // uses more bits on complex objects with sharp edges that move a lot. For example, + // this feature improves the readability of text tickers on newscasts and scoreboards + // on sports matches. Enabling this feature will almost always improve your + // video quality. Note, though, that this feature doesn't take into account + // where the viewer's attention is likely to be. If viewers are likely to be + // focusing their attention on a part of the screen that doesn't have moving + // objects with sharp edges, such as sports athletes' faces, you might choose + // to disable this feature. Related setting: When you enable temporal quantization, + // adjust the strength of the filter with the setting Adaptive quantization. TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H265TemporalAdaptiveQuantization"` // Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers @@ -14200,9 +14022,6 @@ func (s *HlsEncryptionSettings) SetType(v string) *HlsEncryptionSettings { } // Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set Type, under OutputGroupSettings, to -// HLS_GROUP_SETTINGS. type HlsGroupSettings struct { _ struct{} `type:"structure"` @@ -14218,9 +14037,9 @@ type HlsGroupSettings struct { AdditionalManifests []*HlsAdditionalManifest `locationName:"additionalManifests" type:"list"` // Ignore this setting unless you are using FairPlay DRM with Verimatrix and - // you encounter playback issues. Keep the default value, Include (INCLUDE), - // to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only - // headers from your audio segments. + // you encounter playback issues. Keep the default value, Include, to output + // audio-only headers. Choose Exclude to remove the audio-only headers from + // your audio segments. AudioOnlyHeader *string `locationName:"audioOnlyHeader" type:"string" enum:"HlsAudioOnlyHeader"` // A partial URI prefix that will be prepended to each output in the media .m3u8 @@ -14242,29 +14061,27 @@ type HlsGroupSettings struct { // line from the manifest. CaptionLanguageSetting *string `locationName:"captionLanguageSetting" type:"string" enum:"HlsCaptionLanguageSetting"` - // Set Caption segment length control (CaptionSegmentLengthControl) to Match - // video (MATCH_VIDEO) to create caption segments that align with the video - // segments from the first video output in this output group. For example, if - // the video segments are 2 seconds long, your WebVTT segments will also be - // 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) + // Set Caption segment length control to Match video to create caption segments + // that align with the video segments from the first video output in this output + // group. For example, if the video segments are 2 seconds long, your WebVTT + // segments will also be 2 seconds long. Keep the default setting, Large segments // to create caption segments that are 300 seconds long. CaptionSegmentLengthControl *string `locationName:"captionSegmentLengthControl" type:"string" enum:"HlsCaptionSegmentLengthControl"` // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no - // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching - // in your video distribution set up. For example, use the Cache-Control http - // header. + // tag. Otherwise, keep the default value Enabled and control caching in your + // video distribution set up. For example, use the Cache-Control http header. ClientCache *string `locationName:"clientCache" type:"string" enum:"HlsClientCache"` // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. CodecSpecification *string `locationName:"codecSpecification" type:"string" enum:"HlsCodecSpecification"` - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. + // Use Destination to specify the S3 output location and the output filename + // base. Destination accepts format identifiers. If you do not specify the base + // filename in the URI, the service will use the filename of the input file. + // If your job has multiple inputs, the service uses the filename of the first + // input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of @@ -14278,14 +14095,13 @@ type HlsGroupSettings struct { Encryption *HlsEncryptionSettings `locationName:"encryption" type:"structure"` // Specify whether MediaConvert generates images for trick play. Keep the default - // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) - // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) - // to generate tiled thumbnails and full-resolution images of single frames. - // MediaConvert creates a child manifest for each set of images that you generate - // and adds corresponding entries to the parent manifest. A common application - // for these images is Roku trick mode. The thumbnails and full-frame images - // that MediaConvert creates with this feature are compatible with this Roku - // specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md + // value, None, to not generate any images. Choose Thumbnail to generate tiled + // thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails + // and full-resolution images of single frames. MediaConvert creates a child + // manifest for each set of images that you generate and adds corresponding + // entries to the parent manifest. A common application for these images is + // Roku trick mode. The thumbnails and full-frame images that MediaConvert creates + // with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"HlsImageBasedTrickPlay"` // Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED @@ -14347,17 +14163,16 @@ type HlsGroupSettings struct { // Specify the length, in whole seconds, of each segment. When you don't specify // a value, MediaConvert defaults to 10. Related settings: Use Segment length - // control (SegmentLengthControl) to specify whether the encoder enforces this - // value strictly. Use Segment control (HlsSegmentControl) to specify whether - // MediaConvert creates separate segment files or one content file that has - // metadata to mark the segment boundaries. + // control to specify whether the encoder enforces this value strictly. Use + // Segment control to specify whether MediaConvert creates separate segment + // files or one content file that has metadata to mark the segment boundaries. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // Specify how you want MediaConvert to determine the segment length. Choose - // Exact (EXACT) to have the encoder use the exact length that you specify with - // the setting Segment length (SegmentLength). This might result in extra I-frames. - // Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment - // lengths to match the next GOP boundary. + // Exact to have the encoder use the exact length that you specify with the + // setting Segment length. This might result in extra I-frames. Choose Multiple + // of GOP to have the encoder round up the segment lengths to match the next + // GOP boundary. SegmentLengthControl *string `locationName:"segmentLengthControl" type:"string" enum:"HlsSegmentLengthControl"` // Specify the number of segments to write to a subdirectory before starting @@ -14379,19 +14194,16 @@ type HlsGroupSettings struct { // the actual duration of a track in a segment is longer than the target duration. TargetDurationCompatibilityMode *string `locationName:"targetDurationCompatibilityMode" type:"string" enum:"HlsTargetDurationCompatibilityMode"` - // Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 - // timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: - // Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to - // Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame - // type to None (NONE). + // Specify the type of the ID3 frame to use for ID3 timestamps in your output. + // To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. + // To exclude ID3 timestamps: Set ID3 timestamp frame type to None. TimedMetadataId3Frame *string `locationName:"timedMetadataId3Frame" type:"string" enum:"HlsTimedMetadataId3Frame"` // Specify the interval in seconds to write ID3 timestamps in your output. The // first timestamp starts at the output timecode and date, and increases incrementally // with each ID3 timestamp. To use the default interval of 10 seconds: Leave // blank. To include this metadata in your output: Set ID3 timestamp frame type - // (timedMetadataId3Frame) to PRIV (PRIV) or TDRL (TDRL), and set ID3 metadata - // (timedMetadata) to Passthrough (PASSTHROUGH). + // to PRIV or TDRL, and set ID3 metadata to Passthrough. TimedMetadataId3Period *int64 `locationName:"timedMetadataId3Period" type:"integer"` // Provides an extra millisecond delta offset to fine tune the timestamps. @@ -14838,9 +14650,9 @@ type HlsSettings struct { // Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream // (M2TS) to create a file in an MPEG2-TS container. Keep the default value - // Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless - // of the value that you specify here, if this output has video, the service - // will place the output into an MPEG2-TS container. + // Automatic to create an audio-only file in a raw container. Regardless of + // the value that you specify here, if this output has video, the service will + // place the output into an MPEG2-TS container. AudioOnlyContainer *string `locationName:"audioOnlyContainer" type:"string" enum:"HlsAudioOnlyContainer"` // List all the audio groups that are used with the video output stream. Input @@ -14861,21 +14673,20 @@ type HlsSettings struct { AudioTrackType *string `locationName:"audioTrackType" type:"string" enum:"HlsAudioTrackType"` // Specify whether to flag this audio track as descriptive video service (DVS) - // in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes + // in your HLS parent manifest. When you choose Flag, MediaConvert includes // the parameter CHARACTERISTICS="public.accessibility.describes-video" in the // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't - // flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can - // help with accessibility on Apple devices. For more information, see the Apple - // documentation. + // flag, MediaConvert leaves this parameter out. The DVS flag can help with + // accessibility on Apple devices. For more information, see the Apple documentation. DescriptiveVideoServiceFlag *string `locationName:"descriptiveVideoServiceFlag" type:"string" enum:"HlsDescriptiveVideoServiceFlag"` - // Choose Include (INCLUDE) to have MediaConvert generate a child manifest that - // lists only the I-frames for this rendition, in addition to your regular manifest + // Choose Include to have MediaConvert generate a child manifest that lists + // only the I-frames for this rendition, in addition to your regular manifest // for this rendition. You might use this manifest as part of a workflow that // creates preview functions for your video. MediaConvert adds both the I-frame // only child manifest and the regular child manifest to the parent manifest. // When you don't need the I-frame only child manifest, keep the default value - // Exclude (EXCLUDE). + // Exclude. IFrameOnlyManifest *string `locationName:"iFrameOnlyManifest" type:"string" enum:"HlsIFrameOnlyManifest"` // Use this setting to add an identifying string to the filename of each segment. @@ -15017,17 +14828,17 @@ func (s *HopDestination) SetWaitMinutes(v int64) *HopDestination { return s } -// To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) -// to specify the base 64 encoded string and use Timecode (TimeCode) to specify -// the time when the tag should be inserted. To insert multiple ID3 tags in -// your output, create multiple instances of ID3 insertion (Id3Insertion). +// To insert ID3 tags in your output, specify two values. Use ID3 tag to specify +// the base 64 encoded string and use Timecode to specify the time when the +// tag should be inserted. To insert multiple ID3 tags in your output, create +// multiple instances of ID3 insertion. type Id3Insertion struct { _ struct{} `type:"structure"` - // Use ID3 tag (Id3) to provide a fully formed ID3 tag in base64-encode format. + // Use ID3 tag to provide a fully formed ID3 tag in base64-encode format. Id3 *string `locationName:"id3" type:"string"` - // Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format. + // Provide a Timecode in HH:MM:SS:FF or HH:MM:SS;FF format. Timecode *string `locationName:"timecode" type:"string"` } @@ -15137,8 +14948,6 @@ func (s *ImageInserter) SetSdrReferenceWhiteLevel(v int64) *ImageInserter { // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set destinationType to IMSC. type ImscDestinationSettings struct { _ struct{} `type:"structure"` @@ -15216,30 +15025,26 @@ type Input struct { AdvancedInputFilterSettings *AdvancedInputFilterSettings `locationName:"advancedInputFilterSettings" type:"structure"` // Use audio selector groups to combine multiple sidecar audio inputs so that - // you can assign them to a single output audio tab (AudioDescription). Note - // that, if you're working with embedded audio, it's simpler to assign multiple - // input tracks into a single audio selector rather than use an audio selector - // group. + // you can assign them to a single output audio tab. Note that, if you're working + // with embedded audio, it's simpler to assign multiple input tracks into a + // single audio selector rather than use an audio selector group. AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` - // Use Audio selectors (AudioSelectors) to specify a track or set of tracks - // from the input that you will use in your outputs. You can use multiple Audio - // selectors per input. + // Use Audio selectors to specify a track or set of tracks from the input that + // you will use in your outputs. You can use multiple Audio selectors per input. AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"` // Use captions selectors to specify the captions data from your input that // you use in your outputs. You can use up to 100 captions selectors per input. CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` - // Use Cropping selection (crop) to specify the video area that the service - // will include in the output video frame. If you specify a value here, it will - // override any value that you specify in the output setting Cropping selection - // (crop). + // Use Cropping selection to specify the video area that the service will include + // in the output video frame. If you specify a value here, it will override + // any value that you specify in the output setting Cropping selection. Crop *Rectangle `locationName:"crop" type:"structure"` - // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. - // Default is disabled. Only manually controllable for MPEG2 and uncompressed - // video inputs. + // Enable Deblock to produce smoother motion in the output. Default is disabled. + // Only manually controllable for MPEG2 and uncompressed video inputs. DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` // Settings for decrypting any input files that you encrypt before you upload @@ -15248,9 +15053,8 @@ type Input struct { // your content. DecryptionSettings *InputDecryptionSettings `locationName:"decryptionSettings" type:"structure"` - // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default - // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video - // inputs. + // Enable Denoise to filter noise from the input. Default is disabled. Only + // applicable to MPEG2, H.264, H.265, and uncompressed video inputs. DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"` // Use this setting only when your video source has Dolby Vision studio mastering @@ -15268,8 +15072,8 @@ type Input struct { // you specify them in the job, to create the outputs. If your input format // is IMF, specify your input by providing the path to your CPL. For example, // "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to - // use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs - // that contain assets referenced by the CPL. + // use *Supplemental IMPs* to specify any supplemental IMPs that contain assets + // referenced by the CPL. FileInput *string `locationName:"fileInput" type:"string"` // Specify whether to apply input filtering to improve the video quality of @@ -15293,42 +15097,41 @@ type Input struct { // is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` - // (InputClippings) contains sets of start and end times that together specify - // a portion of the input to be used in the outputs. If you provide only a start - // time, the clip will be the entire input from that point to the end. If you - // provide only an end time, it will be the entire input up to that point. When - // you specify more than one input clip, the transcoding service creates the - // job outputs by stringing the clips together in the order you specify them. + // Contains sets of start and end times that together specify a portion of the + // input to be used in the outputs. If you provide only a start time, the clip + // will be the entire input from that point to the end. If you provide only + // an end time, it will be the entire input up to that point. When you specify + // more than one input clip, the transcoding service creates the job outputs + // by stringing the clips together in the order you specify them. InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` // When you have a progressive segmented frame (PsF) input, use this setting // to flag the input as PsF. MediaConvert doesn't automatically detect PsF. // Therefore, flagging your input as PsF results in better preservation of video // quality when you do deinterlacing and frame rate conversion. If you don't - // specify, the default value is Auto (AUTO). Auto is the correct setting for - // all inputs that are not PsF. Don't set this value to PsF when your input - // is interlaced. Doing so creates horizontal interlacing artifacts. + // specify, the default value is Auto. Auto is the correct setting for all inputs + // that are not PsF. Don't set this value to PsF when your input is interlaced. + // Doing so creates horizontal interlacing artifacts. InputScanType *string `locationName:"inputScanType" type:"string" enum:"InputScanType"` - // Use Selection placement (position) to define the video area in your output - // frame. The area outside of the rectangle that you specify here is black. - // If you specify a value here, it will override any value that you specify - // in the output setting Selection placement (position). If you specify a value - // here, this will override any AFD values in your input, even if you set Respond - // to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, - // this will ignore anything that you specify for the setting Scaling Behavior - // (scalingBehavior). + // Use Selection placement to define the video area in your output frame. The + // area outside of the rectangle that you specify here is black. If you specify + // a value here, it will override any value that you specify in the output setting + // Selection placement. If you specify a value here, this will override any + // AFD values in your input, even if you set Respond to AFD to Respond. If you + // specify a value here, this will ignore anything that you specify for the + // setting Scaling Behavior. Position *Rectangle `locationName:"position" type:"structure"` - // Use Program (programNumber) to select a specific program from within a multi-program - // transport stream. Note that Quad 4K is not currently supported. Default is - // the first program within the transport stream. If the program you specify - // doesn't exist, the transcoding service will use this default. + // Use Program to select a specific program from within a multi-program transport + // stream. Note that Quad 4K is not currently supported. Default is the first + // program within the transport stream. If the program you specify doesn't exist, + // the transcoding service will use this default. ProgramNumber *int64 `locationName:"programNumber" min:"1" type:"integer"` - // Set PSI control (InputPsiControl) for transport stream inputs to specify - // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio - // and video. * Use PSI - Scan only PSI data. + // Set PSI control for transport stream inputs to specify which data the demux + // process to scans.* Ignore PSI - Scan all PIDs for audio and video.* Use PSI + // - Scan only PSI data. PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"` // Provide a list of any necessary supplemental IMPs. You need supplemental @@ -15339,22 +15142,21 @@ type Input struct { // service automatically detects it. SupplementalImps []*string `locationName:"supplementalImps" type:"list"` - // Use this Timecode source setting, located under the input settings (InputTimecodeSource), - // to specify how the service counts input video frames. This input frame count - // affects only the behavior of features that apply to a single input at a time, - // such as input clipping and synchronizing some captions formats. Choose Embedded - // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero - // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) - // to start the first frame at the timecode that you specify in the setting - // Start timecode (timecodeStart). If you don't specify a value for Timecode - // source, the service will use Embedded by default. For more information about - // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. + // Use this Timecode source setting, located under the input settings, to specify + // how the service counts input video frames. This input frame count affects + // only the behavior of features that apply to a single input at a time, such + // as input clipping and synchronizing some captions formats. Choose Embedded + // to use the timecodes in your input video. Choose Start at zero to start the + // first frame at zero. Choose Specified start to start the first frame at the + // timecode that you specify in the setting Start timecode. If you don't specify + // a value for Timecode source, the service will use Embedded by default. For + // more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"` // Specify the timecode that you want the service to use for this input's initial // frame. To use this setting, you must set the Timecode source setting, located - // under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). - // For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. + // under the input settings, to Specified start. For more information about + // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"` // When you include Video generator, MediaConvert creates a video input with @@ -15607,25 +15409,24 @@ func (s *Input) SetVideoSelector(v *VideoSelector) *Input { type InputClipping struct { _ struct{} `type:"structure"` - // Set End timecode (EndTimecode) to the end of the portion of the input you - // are clipping. The frame corresponding to the End timecode value is included - // in the clip. Start timecode or End timecode may be left blank, but not both. - // Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the - // minute, SS is the second, and FF is the frame number. When choosing this - // value, take into account your setting for timecode source under input settings - // (InputTimecodeSource). For example, if you have embedded timecodes that start - // at 01:00:00:00 and you want your clip to end six minutes into the video, - // use 01:06:00:00. + // Set End timecode to the end of the portion of the input you are clipping. + // The frame corresponding to the End timecode value is included in the clip. + // Start timecode or End timecode may be left blank, but not both. Use the format + // HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is + // the second, and FF is the frame number. When choosing this value, take into + // account your setting for timecode source under input settings. For example, + // if you have embedded timecodes that start at 01:00:00:00 and you want your + // clip to end six minutes into the video, use 01:06:00:00. EndTimecode *string `locationName:"endTimecode" type:"string"` - // Set Start timecode (StartTimecode) to the beginning of the portion of the - // input you are clipping. The frame corresponding to the Start timecode value - // is included in the clip. Start timecode or End timecode may be left blank, - // but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the - // hour, MM is the minute, SS is the second, and FF is the frame number. When - // choosing this value, take into account your setting for Input timecode source. - // For example, if you have embedded timecodes that start at 01:00:00:00 and - // you want your clip to begin five minutes into the video, use 01:05:00:00. + // Set Start timecode to the beginning of the portion of the input you are clipping. + // The frame corresponding to the Start timecode value is included in the clip. + // Start timecode or End timecode may be left blank, but not both. Use the format + // HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is + // the second, and FF is the frame number. When choosing this value, take into + // account your setting for Input timecode source. For example, if you have + // embedded timecodes that start at 01:00:00:00 and you want your clip to begin + // five minutes into the video, use 01:05:00:00. StartTimecode *string `locationName:"startTimecode" type:"string"` } @@ -15772,35 +15573,30 @@ type InputTemplate struct { AdvancedInputFilterSettings *AdvancedInputFilterSettings `locationName:"advancedInputFilterSettings" type:"structure"` // Use audio selector groups to combine multiple sidecar audio inputs so that - // you can assign them to a single output audio tab (AudioDescription). Note - // that, if you're working with embedded audio, it's simpler to assign multiple - // input tracks into a single audio selector rather than use an audio selector - // group. + // you can assign them to a single output audio tab. Note that, if you're working + // with embedded audio, it's simpler to assign multiple input tracks into a + // single audio selector rather than use an audio selector group. AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` - // Use Audio selectors (AudioSelectors) to specify a track or set of tracks - // from the input that you will use in your outputs. You can use multiple Audio - // selectors per input. + // Use Audio selectors to specify a track or set of tracks from the input that + // you will use in your outputs. You can use multiple Audio selectors per input. AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"` // Use captions selectors to specify the captions data from your input that // you use in your outputs. You can use up to 100 captions selectors per input. CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` - // Use Cropping selection (crop) to specify the video area that the service - // will include in the output video frame. If you specify a value here, it will - // override any value that you specify in the output setting Cropping selection - // (crop). + // Use Cropping selection to specify the video area that the service will include + // in the output video frame. If you specify a value here, it will override + // any value that you specify in the output setting Cropping selection. Crop *Rectangle `locationName:"crop" type:"structure"` - // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. - // Default is disabled. Only manually controllable for MPEG2 and uncompressed - // video inputs. + // Enable Deblock to produce smoother motion in the output. Default is disabled. + // Only manually controllable for MPEG2 and uncompressed video inputs. DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` - // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default - // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video - // inputs. + // Enable Denoise to filter noise from the input. Default is disabled. Only + // applicable to MPEG2, H.264, H.265, and uncompressed video inputs. DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"` // Use this setting only when your video source has Dolby Vision studio mastering @@ -15834,60 +15630,58 @@ type InputTemplate struct { // is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` - // (InputClippings) contains sets of start and end times that together specify - // a portion of the input to be used in the outputs. If you provide only a start - // time, the clip will be the entire input from that point to the end. If you - // provide only an end time, it will be the entire input up to that point. When - // you specify more than one input clip, the transcoding service creates the - // job outputs by stringing the clips together in the order you specify them. + // Contains sets of start and end times that together specify a portion of the + // input to be used in the outputs. If you provide only a start time, the clip + // will be the entire input from that point to the end. If you provide only + // an end time, it will be the entire input up to that point. When you specify + // more than one input clip, the transcoding service creates the job outputs + // by stringing the clips together in the order you specify them. InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` // When you have a progressive segmented frame (PsF) input, use this setting // to flag the input as PsF. MediaConvert doesn't automatically detect PsF. // Therefore, flagging your input as PsF results in better preservation of video // quality when you do deinterlacing and frame rate conversion. If you don't - // specify, the default value is Auto (AUTO). Auto is the correct setting for - // all inputs that are not PsF. Don't set this value to PsF when your input - // is interlaced. Doing so creates horizontal interlacing artifacts. + // specify, the default value is Auto. Auto is the correct setting for all inputs + // that are not PsF. Don't set this value to PsF when your input is interlaced. + // Doing so creates horizontal interlacing artifacts. InputScanType *string `locationName:"inputScanType" type:"string" enum:"InputScanType"` - // Use Selection placement (position) to define the video area in your output - // frame. The area outside of the rectangle that you specify here is black. - // If you specify a value here, it will override any value that you specify - // in the output setting Selection placement (position). If you specify a value - // here, this will override any AFD values in your input, even if you set Respond - // to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, - // this will ignore anything that you specify for the setting Scaling Behavior - // (scalingBehavior). + // Use Selection placement to define the video area in your output frame. The + // area outside of the rectangle that you specify here is black. If you specify + // a value here, it will override any value that you specify in the output setting + // Selection placement. If you specify a value here, this will override any + // AFD values in your input, even if you set Respond to AFD to Respond. If you + // specify a value here, this will ignore anything that you specify for the + // setting Scaling Behavior. Position *Rectangle `locationName:"position" type:"structure"` - // Use Program (programNumber) to select a specific program from within a multi-program - // transport stream. Note that Quad 4K is not currently supported. Default is - // the first program within the transport stream. If the program you specify - // doesn't exist, the transcoding service will use this default. + // Use Program to select a specific program from within a multi-program transport + // stream. Note that Quad 4K is not currently supported. Default is the first + // program within the transport stream. If the program you specify doesn't exist, + // the transcoding service will use this default. ProgramNumber *int64 `locationName:"programNumber" min:"1" type:"integer"` - // Set PSI control (InputPsiControl) for transport stream inputs to specify - // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio - // and video. * Use PSI - Scan only PSI data. + // Set PSI control for transport stream inputs to specify which data the demux + // process to scans.* Ignore PSI - Scan all PIDs for audio and video.* Use PSI + // - Scan only PSI data. PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"` - // Use this Timecode source setting, located under the input settings (InputTimecodeSource), - // to specify how the service counts input video frames. This input frame count - // affects only the behavior of features that apply to a single input at a time, - // such as input clipping and synchronizing some captions formats. Choose Embedded - // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero - // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) - // to start the first frame at the timecode that you specify in the setting - // Start timecode (timecodeStart). If you don't specify a value for Timecode - // source, the service will use Embedded by default. For more information about - // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. + // Use this Timecode source setting, located under the input settings, to specify + // how the service counts input video frames. This input frame count affects + // only the behavior of features that apply to a single input at a time, such + // as input clipping and synchronizing some captions formats. Choose Embedded + // to use the timecodes in your input video. Choose Start at zero to start the + // first frame at zero. Choose Specified start to start the first frame at the + // timecode that you specify in the setting Start timecode. If you don't specify + // a value for Timecode source, the service will use Embedded by default. For + // more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"` // Specify the timecode that you want the service to use for this input's initial // frame. To use this setting, you must set the Timecode source setting, located - // under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). - // For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. + // under the input settings, to Specified start. For more information about + // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"` // Input video selectors contain the video settings for the input. Each of your @@ -16187,9 +15981,8 @@ type InsertableImage struct { // for Layer appear on top of images with lower values for Layer. Layer *int64 `locationName:"layer" type:"integer"` - // Use Opacity (Opacity) to specify how much of the underlying video shows through - // the inserted image. 0 is transparent and 100 is fully opaque. Default is - // 50. + // Use Opacity to specify how much of the underlying video shows through the + // inserted image. 0 is transparent and 100 is fully opaque. Default is 50. Opacity *int64 `locationName:"opacity" type:"integer"` // Specify the timecode of the frame that you want the overlay to first appear @@ -16741,8 +16534,8 @@ type JobSettings struct { // 05h Content Advisory. ExtendedDataServices *ExtendedDataServices `locationName:"extendedDataServices" type:"structure"` - // Use Inputs (inputs) to define source file used in the transcode job. There - // can be multiple inputs add in a job. These inputs will be concantenated together + // Use Inputs to define source file used in the transcode job. There can be + // multiple inputs add in a job. These inputs will be concantenated together // to create the output. Inputs []*Input `locationName:"inputs" type:"list"` @@ -16759,12 +16552,8 @@ type JobSettings struct { MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"` // Settings for your Nielsen configuration. If you don't do Nielsen measurement - // and analytics, ignore these settings. When you enable Nielsen configuration - // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs - // in the job. To enable Nielsen configuration programmatically, include an - // instance of nielsenConfiguration in your JSON job specification. Even if - // you don't include any children of nielsenConfiguration, you still enable - // the setting. + // and analytics, ignore these settings. When you enable Nielsen configuration, + // MediaConvert enables PCM to ID3 tagging for all outputs in the job. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` // Ignore these settings unless you are using Nielsen non-linear watermarking. @@ -16776,24 +16565,23 @@ type JobSettings struct { // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings `locationName:"nielsenNonLinearWatermark" type:"structure"` - // (OutputGroups) contains one group of settings for each set of outputs that - // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, - // MXF, and no container) are grouped in a single output group as well. Required - // in (OutputGroups) is a group of settings that apply to the whole group. This - // required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). - // Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings - // * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings - // * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, - // CmafGroupSettings + // Contains one group of settings for each set of outputs that share a common + // package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and + // no container) are grouped in a single output group as well. Required in is + // a group of settings that apply to the whole group. This required object depends + // on the value you set for Type. Type, settings object pairs are as follows. + // * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings + // * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, + // MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"` // These settings control how the service handles timecodes throughout the job. // These settings don't affect input clipping. TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` - // Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that - // you specify. In each output that you want to include this metadata, you must - // set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). + // Insert user-defined custom ID3 metadata at timecodes that you specify. In + // each output that you want to include this metadata, you must set ID3 metadata + // to Passthrough. TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` } @@ -17117,9 +16905,9 @@ type JobTemplateSettings struct { // 05h Content Advisory. ExtendedDataServices *ExtendedDataServices `locationName:"extendedDataServices" type:"structure"` - // Use Inputs (inputs) to define the source file used in the transcode job. - // There can only be one input in a job template. Using the API, you can include - // multiple inputs when referencing a job template. + // Use Inputs to define the source file used in the transcode job. There can + // only be one input in a job template. Using the API, you can include multiple + // inputs when referencing a job template. Inputs []*InputTemplate `locationName:"inputs" type:"list"` // Use these settings only when you use Kantar watermarking. Specify the values @@ -17135,12 +16923,8 @@ type JobTemplateSettings struct { MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"` // Settings for your Nielsen configuration. If you don't do Nielsen measurement - // and analytics, ignore these settings. When you enable Nielsen configuration - // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs - // in the job. To enable Nielsen configuration programmatically, include an - // instance of nielsenConfiguration in your JSON job specification. Even if - // you don't include any children of nielsenConfiguration, you still enable - // the setting. + // and analytics, ignore these settings. When you enable Nielsen configuration, + // MediaConvert enables PCM to ID3 tagging for all outputs in the job. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` // Ignore these settings unless you are using Nielsen non-linear watermarking. @@ -17152,24 +16936,23 @@ type JobTemplateSettings struct { // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings `locationName:"nielsenNonLinearWatermark" type:"structure"` - // (OutputGroups) contains one group of settings for each set of outputs that - // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, - // MXF, and no container) are grouped in a single output group as well. Required - // in (OutputGroups) is a group of settings that apply to the whole group. This - // required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). - // Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings - // * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings - // * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, - // CmafGroupSettings + // Contains one group of settings for each set of outputs that share a common + // package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and + // no container) are grouped in a single output group as well. Required in is + // a group of settings that apply to the whole group. This required object depends + // on the value you set for Type. Type, settings object pairs are as follows. + // * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings + // * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, + // MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"` // These settings control how the service handles timecodes throughout the job. // These settings don't affect input clipping. TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` - // Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that - // you specify. In each output that you want to include this metadata, you must - // set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). + // Insert user-defined custom ID3 metadata at timecodes that you specify. In + // each output that you want to include this metadata, you must set ID3 metadata + // to Passthrough. TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` } @@ -18119,7 +17902,7 @@ func (s *ListTagsForResourceOutput) SetResourceTags(v *ResourceTags) *ListTagsFo // Settings for SCTE-35 signals from ESAM. Include this in your job settings // to put SCTE-35 markers in your HLS and transport stream outputs at the insertion // points that you specify in an ESAM XML document. Provide the document in -// the setting SCC XML (sccXml). +// the setting SCC XML. type M2tsScte35Esam struct { _ struct{} `type:"structure"` @@ -18166,15 +17949,15 @@ func (s *M2tsScte35Esam) SetScte35EsamPid(v int64) *M2tsScte35Esam { } // MPEG-2 TS container settings. These apply to outputs in a File output group -// when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). -// In these assets, data is organized by the program map table (PMT). Each transport -// stream program contains subsets of data, including audio, video, and metadata. -// Each of these subsets of data has a numerical label called a packet identifier -// (PID). Each transport stream program corresponds to one MediaConvert output. -// The PMT lists the types of data in a program along with their PID. Downstream -// systems and players use the program map table to look up the PID for each -// type of data it accesses and then uses the PIDs to locate specific data within -// the asset. +// when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, +// data is organized by the program map table (PMT). Each transport stream program +// contains subsets of data, including audio, video, and metadata. Each of these +// subsets of data has a numerical label called a packet identifier (PID). Each +// transport stream program corresponds to one MediaConvert output. The PMT +// lists the types of data in a program along with their PID. Downstream systems +// and players use the program map table to look up the PID for each type of +// data it accesses and then uses the PIDs to locate specific data within the +// asset. type M2tsSettings struct { _ struct{} `type:"structure"` @@ -18183,17 +17966,17 @@ type M2tsSettings struct { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to - // ensure that the total duration of each audio stream is at least as long as - // the total duration of the video stream. After padding or trimming, the audio - // stream duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of - // the output. For unsegmented outputs, MediaConvert adds padding only to the - // end of the file. When you keep the default value, any minor discrepancies - // between audio and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. + // In all other cases, keep the default value, Default codec duration. When + // you choose Match video duration, MediaConvert pads the output audio streams + // with silence or trims them to ensure that the total duration of each audio + // stream is at least as long as the total duration of the video stream. After + // padding or trimming, the audio stream duration is no more than one frame + // longer than the video stream. MediaConvert applies audio padding or trimming + // only to the end of the last segment of the output. For unsegmented outputs, + // MediaConvert adds padding only to the end of the file. When you keep the + // default value, any minor discrepancies between audio and video duration will + // depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"M2tsAudioDuration"` // The number of audio frames to insert for each PES packet. @@ -18217,19 +18000,15 @@ type M2tsSettings struct { // If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets // with Presentation Timestamp (PTS) values greater than or equal to the first // video packet PTS (MediaConvert drops captions and data packets with lesser - // PTS values). Keep the default value (AUTO) to allow all PTS values. + // PTS values). Keep the default value to allow all PTS values. DataPTSControl *string `locationName:"dataPTSControl" type:"string" enum:"M2tsDataPtsControl"` // Use these settings to insert a DVB Network Information Table (NIT) in the - // transport stream of this output. When you work directly in your JSON job - // specification, include this object only when your job has a transport stream - // output and the container settings contain the object M2tsSettings. + // transport stream of this output. DvbNitSettings *DvbNitSettings `locationName:"dvbNitSettings" type:"structure"` // Use these settings to insert a DVB Service Description Table (SDT) in the - // transport stream of this output. When you work directly in your JSON job - // specification, include this object only when your job has a transport stream - // output and the container settings contain the object M2tsSettings. + // transport stream of this output. DvbSdtSettings *DvbSdtSettings `locationName:"dvbSdtSettings" type:"structure"` // Specify the packet identifiers (PIDs) for DVB subtitle data included in this @@ -18237,9 +18016,7 @@ type M2tsSettings struct { DvbSubPids []*int64 `locationName:"dvbSubPids" type:"list"` // Use these settings to insert a DVB Time and Date Table (TDT) in the transport - // stream of this output. When you work directly in your JSON job specification, - // include this object only when your job has a transport stream output and - // the container settings contain the object M2tsSettings. + // stream of this output. DvbTdtSettings *DvbTdtSettings `locationName:"dvbTdtSettings" type:"structure"` // Specify the packet identifier (PID) for DVB teletext data you include in @@ -18263,9 +18040,9 @@ type M2tsSettings struct { // Controls whether to include the ES Rate field in the PES header. EsRateInPes *string `locationName:"esRateInPes" type:"string" enum:"M2tsEsRateInPes"` - // Keep the default value (DEFAULT) unless you know that your audio EBP markers - // are incorrectly appearing before your video EBP markers. To correct this - // problem, set this value to Force (FORCE). + // Keep the default value unless you know that your audio EBP markers are incorrectly + // appearing before your video EBP markers. To correct this problem, set this + // value to Force. ForceTsVideoEbpOrder *string `locationName:"forceTsVideoEbpOrder" type:"string" enum:"M2tsForceTsVideoEbpOrder"` // The length, in seconds, of each fragment. Only used with EBP markers. @@ -18309,7 +18086,7 @@ type M2tsSettings struct { // Specify the packet identifier (PID) for the program clock reference (PCR) // in this output. If you do not specify a value, the service will use the value - // for Video PID (VideoPid). + // for Video PID. PcrPid *int64 `locationName:"pcrPid" min:"32" type:"integer"` // Specify the number of milliseconds between instances of the program map table @@ -18324,10 +18101,10 @@ type M2tsSettings struct { // is 503. PrivateMetadataPid *int64 `locationName:"privateMetadataPid" min:"32" type:"integer"` - // Use Program number (programNumber) to specify the program number used in - // the program map table (PMT) for this output. Default is 1. Program numbers - // and program map tables are parts of MPEG-2 transport stream containers, used - // for organizing data. + // Use Program number to specify the program number used in the program map + // table (PMT) for this output. Default is 1. Program numbers and program map + // tables are parts of MPEG-2 transport stream containers, used for organizing + // data. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` // When set to CBR, inserts null packets into transport stream to fill specified @@ -18337,19 +18114,19 @@ type M2tsSettings struct { // Include this in your job settings to put SCTE-35 markers in your HLS and // transport stream outputs at the insertion points that you specify in an ESAM - // XML document. Provide the document in the setting SCC XML (sccXml). + // XML document. Provide the document in the setting SCC XML. Scte35Esam *M2tsScte35Esam `locationName:"scte35Esam" type:"structure"` // Specify the packet identifier (PID) of the SCTE-35 stream in the transport // stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` - // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if - // you want SCTE-35 markers that appear in your input to also appear in this - // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. - // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also - // provide the ESAM XML as a string in the setting Signal processing notification - // XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam). + // For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 + // markers that appear in your input to also appear in this output. Choose None + // if you don't want SCTE-35 markers in this output. For SCTE-35 markers from + // an ESAM XML document-- Choose None. Also provide the ESAM XML as a string + // in the setting Signal processing notification XML. Also enable ESAM SCTE-35 + // (include the property scte35Esam). Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M2tsScte35Source"` // Inserts segmentation markers at each segmentation_time period. rai_segstart @@ -18701,17 +18478,17 @@ type M3u8Settings struct { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to - // ensure that the total duration of each audio stream is at least as long as - // the total duration of the video stream. After padding or trimming, the audio - // stream duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of - // the output. For unsegmented outputs, MediaConvert adds padding only to the - // end of the file. When you keep the default value, any minor discrepancies - // between audio and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. + // In all other cases, keep the default value, Default codec duration. When + // you choose Match video duration, MediaConvert pads the output audio streams + // with silence or trims them to ensure that the total duration of each audio + // stream is at least as long as the total duration of the video stream. After + // padding or trimming, the audio stream duration is no more than one frame + // longer than the video stream. MediaConvert applies audio padding or trimming + // only to the end of the last segment of the output. For unsegmented outputs, + // MediaConvert adds padding only to the end of the file. When you keep the + // default value, any minor discrepancies between audio and video duration will + // depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"M3u8AudioDuration"` // The number of audio frames to insert for each PES packet. @@ -18725,7 +18502,7 @@ type M3u8Settings struct { // If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets // with Presentation Timestamp (PTS) values greater than or equal to the first // video packet PTS (MediaConvert drops captions and data packets with lesser - // PTS values). Keep the default value (AUTO) to allow all PTS values. + // PTS values). Keep the default value AUTO to allow all PTS values. DataPTSControl *string `locationName:"dataPTSControl" type:"string" enum:"M3u8DataPtsControl"` // Specify the maximum time, in milliseconds, between Program Clock References @@ -18767,21 +18544,19 @@ type M3u8Settings struct { // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` - // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if - // you want SCTE-35 markers that appear in your input to also appear in this - // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. - // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you - // don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose - // Ad markers (adMarkers) if you do want manifest conditioning. In both cases, - // also provide the ESAM XML as a string in the setting Signal processing notification - // XML (sccXml). + // For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 + // markers that appear in your input to also appear in this output. Choose None + // if you don't want SCTE-35 markers in this output. For SCTE-35 markers from + // an ESAM XML document-- Choose None if you don't want manifest conditioning. + // Choose Passthrough and choose Ad markers if you do want manifest conditioning. + // In both cases, also provide the ESAM XML as a string in the setting Signal + // processing notification XML. Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M3u8Scte35Source"` - // Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include - // ID3 metadata in this output. This includes ID3 metadata from the following - // features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata - // inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: - // set ID3 metadata to None (NONE) or leave blank. + // Set ID3 metadata to Passthrough to include ID3 metadata in this output. This + // includes ID3 metadata from the following features: ID3 timestamp period, + // and Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: + // set ID3 metadata to None or leave blank. TimedMetadata *string `locationName:"timedMetadata" type:"string" enum:"TimedMetadata"` // Packet Identifier (PID) of the ID3 metadata stream in the transport stream. @@ -19129,8 +18904,6 @@ type MotionImageInserter struct { // 0. If you need to set up your job to follow timecodes embedded in your source // that don't start at zero, make sure that you specify a start time that is // after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html - // Find job-wide and input timecode configuration settings in your JSON job - // settings specification at settings>timecodeConfig>source and settings>inputs>timecodeSource. StartTime *string `locationName:"startTime" min:"11" type:"string"` } @@ -19393,8 +19166,7 @@ func (s *MovSettings) SetReference(v string) *MovSettings { return s } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value MP2. +// Required when you set Codec to the value MP2. type Mp2Settings struct { _ struct{} `type:"structure"` @@ -19402,8 +19174,8 @@ type Mp2Settings struct { Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"` // Set Channels to specify the number of channels in this output audio track. - // Choosing Mono in the console will give you 1 output channel; choosing Stereo - // will give you 2. In the API, valid values are 1 and 2. + // Choosing Mono in will give you 1 output channel; choosing Stereo will give + // you 2. In the API, valid values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Sample rate in hz. @@ -19474,8 +19246,8 @@ type Mp3Settings struct { Bitrate *int64 `locationName:"bitrate" min:"16000" type:"integer"` // Specify the number of channels in this output audio track. Choosing Mono - // on the console gives you 1 output channel; choosing Stereo gives you 2. In - // the API, valid values are 1 and 2. + // gives you 1 output channel; choosing Stereo gives you 2. In the API, valid + // values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Specify whether the service encodes this MP3 audio output with a constant @@ -19485,9 +19257,8 @@ type Mp3Settings struct { // Sample rate in hz. SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"` - // Required when you set Bitrate control mode (rateControlMode) to VBR. Specify - // the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest - // quality). + // Required when you set Bitrate control mode to VBR. Specify the audio quality + // of this MP3 output from 0 (highest quality) to 9 (lowest quality). VbrQuality *int64 `locationName:"vbrQuality" type:"integer"` } @@ -19565,17 +19336,17 @@ type Mp4Settings struct { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to - // ensure that the total duration of each audio stream is at least as long as - // the total duration of the video stream. After padding or trimming, the audio - // stream duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of - // the output. For unsegmented outputs, MediaConvert adds padding only to the - // end of the file. When you keep the default value, any minor discrepancies - // between audio and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. + // In all other cases, keep the default value, Default codec duration. When + // you choose Match video duration, MediaConvert pads the output audio streams + // with silence or trims them to ensure that the total duration of each audio + // stream is at least as long as the total duration of the video stream. After + // padding or trimming, the audio stream duration is no more than one frame + // longer than the video stream. MediaConvert applies audio padding or trimming + // only to the end of the last segment of the output. For unsegmented outputs, + // MediaConvert adds padding only to the end of the file. When you keep the + // default value, any minor discrepancies between audio and video duration will + // depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"CmfcAudioDuration"` // When enabled, file composition times will start at zero, composition times @@ -19587,9 +19358,9 @@ type Mp4Settings struct { // Ignore this setting unless compliance to the CTTS box version specification // matters in your workflow. Specify a value of 1 to set your CTTS box version // to 1 and make your output compliant with the specification. When you specify - // a value of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. - // Keep the default value 0 to set your CTTS box version to 0. This can provide - // backward compatibility for some players and packagers. + // a value of 1, you must also set CSLG atom to the value INCLUDE. Keep the + // default value 0 to set your CTTS box version to 0. This can provide backward + // compatibility for some players and packagers. CttsVersion *int64 `locationName:"cttsVersion" type:"integer"` // Inserts a free-space box immediately after the moov box. @@ -19664,35 +19435,34 @@ func (s *Mp4Settings) SetMp4MajorBrand(v string) *Mp4Settings { type MpdSettings struct { _ struct{} `type:"structure"` - // Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH - // manifest with elements for embedded 608 captions. This markup isn't generally - // required, but some video players require it to discover and play embedded - // 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements - // out. When you enable this setting, this is the markup that MediaConvert includes - // in your manifest: + // Optional. Choose Include to have MediaConvert mark up your DASH manifest + // with elements for embedded 608 captions. This markup isn't generally required, + // but some video players require it to discover and play embedded 608 captions. + // Keep the default value, Exclude, to leave these elements out. When you enable + // this setting, this is the markup that MediaConvert includes in your manifest: AccessibilityCaptionHints *string `locationName:"accessibilityCaptionHints" type:"string" enum:"MpdAccessibilityCaptionHints"` // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences - // between video and audio. For this situation, choose Match video duration - // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default - // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, - // MediaConvert pads the output audio streams with silence or trims them to - // ensure that the total duration of each audio stream is at least as long as - // the total duration of the video stream. After padding or trimming, the audio - // stream duration is no more than one frame longer than the video stream. MediaConvert - // applies audio padding or trimming only to the end of the last segment of - // the output. For unsegmented outputs, MediaConvert adds padding only to the - // end of the file. When you keep the default value, any minor discrepancies - // between audio and video duration will depend on your output audio codec. + // between video and audio. For this situation, choose Match video duration. + // In all other cases, keep the default value, Default codec duration. When + // you choose Match video duration, MediaConvert pads the output audio streams + // with silence or trims them to ensure that the total duration of each audio + // stream is at least as long as the total duration of the video stream. After + // padding or trimming, the audio stream duration is no more than one frame + // longer than the video stream. MediaConvert applies audio padding or trimming + // only to the end of the last segment of the output. For unsegmented outputs, + // MediaConvert adds padding only to the end of the file. When you keep the + // default value, any minor discrepancies between audio and video duration will + // depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"MpdAudioDuration"` // Use this setting only in DASH output groups that include sidecar TTML or // IMSC captions. You specify sidecar captions in a separate output from your - // audio and video. Choose Raw (RAW) for captions in a single XML file in a - // raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in - // XML format contained within fragmented MP4 files. This set of fragmented - // MP4 files is separate from your video and audio fragmented MP4 files. + // audio and video. Choose Raw for captions in a single XML file in a raw container. + // Choose Fragmented MPEG-4 for captions in XML format contained within fragmented + // MP4 files. This set of fragmented MP4 files is separate from your video and + // audio fragmented MP4 files. CaptionContainerType *string `locationName:"captionContainerType" type:"string" enum:"MpdCaptionContainerType"` // To include key-length-value metadata in this output: Set KLV metadata insertion @@ -19710,45 +19480,44 @@ type MpdSettings struct { // To leave these elements out of your output MPD manifest, set Manifest metadata // signaling to Disabled. To enable Manifest metadata signaling, you must also // set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata - // (TimedMetadata) to Passthrough. + // to Passthrough. ManifestMetadataSignaling *string `locationName:"manifestMetadataSignaling" type:"string" enum:"MpdManifestMetadataSignaling"` // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting - // SCC XML (sccXml). + // SCC XML. Scte35Esam *string `locationName:"scte35Esam" type:"string" enum:"MpdScte35Esam"` // Ignore this setting unless you have SCTE-35 markers in your input video file. - // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear - // in your input to also appear in this output. Choose None (NONE) if you don't - // want those SCTE-35 markers in this output. + // Choose Passthrough if you want SCTE-35 markers that appear in your input + // to also appear in this output. Choose None if you don't want those SCTE-35 + // markers in this output. Scte35Source *string `locationName:"scte35Source" type:"string" enum:"MpdScte35Source"` - // To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) - // to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata - // inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 - // metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: - // Set ID3 metadata to None (NONE) or leave blank. + // To include ID3 metadata in this output: Set ID3 metadata to Passthrough. + // Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes + // each instance of ID3 metadata in a separate Event Message (eMSG) box. To + // exclude this ID3 metadata: Set ID3 metadata to None or leave blank. TimedMetadata *string `locationName:"timedMetadata" type:"string" enum:"MpdTimedMetadata"` // Specify the event message box (eMSG) version for ID3 timed metadata in your // output.For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 // Syntax.Leave blank to use the default value Version 0.When you specify Version - // 1, you must also set ID3 metadata (timedMetadata) to Passthrough. + // 1, you must also set ID3 metadata to Passthrough. TimedMetadataBoxVersion *string `locationName:"timedMetadataBoxVersion" type:"string" enum:"MpdTimedMetadataBoxVersion"` - // Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 - // timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 - // section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 + // Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata + // in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 + // Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 // When you specify a value for ID3 metadata scheme ID URI, you must also set - // ID3 metadata (timedMetadata) to Passthrough. + // ID3 metadata to Passthrough. TimedMetadataSchemeIdUri *string `locationName:"timedMetadataSchemeIdUri" type:"string"` // Specify the event message box (eMSG) value for ID3 timed metadata in your // output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 // Semantics. When you specify a value for ID3 Metadata Value, you must also - // set ID3 metadata (timedMetadata) to Passthrough. + // set ID3 metadata to Passthrough. TimedMetadataValue *string `locationName:"timedMetadataValue" type:"string"` } @@ -19836,15 +19605,13 @@ func (s *MpdSettings) SetTimedMetadataValue(v string) *MpdSettings { return s } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value MPEG2. +// Required when you set Codec to the value MPEG2. type Mpeg2Settings struct { _ struct{} `type:"structure"` // Specify the strength of any adaptive quantization filters that you enable. // The value that you choose here applies to the following settings: Spatial - // adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive - // quantization (temporalAdaptiveQuantization). + // adaptive quantization, and Temporal adaptive quantization. AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Mpeg2AdaptiveQuantization"` // Specify the average bitrate in bits per second. Required for VBR and CBR. @@ -19852,17 +19619,17 @@ type Mpeg2Settings struct { // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` - // Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. + // Use Level to set the MPEG-2 level for the video output. CodecLevel *string `locationName:"codecLevel" type:"string" enum:"Mpeg2CodecLevel"` - // Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. + // Use Profile to set the MPEG-2 profile for the video output. CodecProfile *string `locationName:"codecProfile" type:"string" enum:"Mpeg2CodecProfile"` // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the - // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). + // value you provide for the setting B frames between reference frames. DynamicSubGop *string `locationName:"dynamicSubGop" type:"string" enum:"Mpeg2DynamicSubGop"` // If you are using the console, use the Framerate setting to specify the frame @@ -19870,12 +19637,7 @@ type Mpeg2Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Custom, specify your frame rate as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Mpeg2FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -19917,12 +19679,12 @@ type Mpeg2Settings struct { // Specify the interval between keyframes, in seconds or frames, for this output. // Default: 12 Related settings: When you specify the GOP size in seconds, set - // GOP mode control (GopSizeUnits) to Specified, seconds (SECONDS). The default - // value for GOP mode control (GopSizeUnits) is Frames (FRAMES). + // GOP mode control to Specified, seconds. The default value for GOP mode control + // is Frames. GopSize *float64 `locationName:"gopSize" type:"double"` - // Specify the units for GOP size (GopSize). If you don't specify a value here, - // by default the encoder measures GOP size in frames. + // Specify the units for GOP size. If you don't specify a value here, by default + // the encoder measures GOP size in frames. GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"Mpeg2GopSizeUnits"` // If your downstream systems have strict buffer requirements: Specify the minimum @@ -19939,41 +19701,39 @@ type Mpeg2Settings struct { HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type - // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) - // to produce outputs with the same field polarity as the source. For jobs that - // have multiple inputs, the output field polarity might change over the course - // of the output. Follow behavior depends on the input scan type. If the source - // is interlaced, the output will be interlaced with the same polarity as the - // source. If the source is progressive, the output will be interlaced with - // top field bottom field first, depending on which of the Follow options you - // choose. + // to create a progressive output, regardless of the scan type of your input. + // Use Top field first or Bottom field first to create an output that's interlaced + // with the same field polarity throughout. Use Follow, default top or Follow, + // default bottom to produce outputs with the same field polarity as the source. + // For jobs that have multiple inputs, the output field polarity might change + // over the course of the output. Follow behavior depends on the input scan + // type. If the source is interlaced, the output will be interlaced with the + // same polarity as the source. If the source is progressive, the output will + // be interlaced with top field bottom field first, depending on which of the + // Follow options you choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"Mpeg2InterlaceMode"` - // Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision - // for intra-block DC coefficients. If you choose the value auto, the service - // will automatically select the precision based on the per-frame compression - // ratio. + // Use Intra DC precision to set quantization precision for intra-block DC coefficients. + // If you choose the value auto, the service will automatically select the precision + // based on the per-frame compression ratio. IntraDcPrecision *string `locationName:"intraDcPrecision" type:"string" enum:"Mpeg2IntraDcPrecision"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` - // Use this setting only when you also enable Scene change detection (SceneChangeDetect). - // This setting determines how the encoder manages the spacing between I-frames - // that it inserts as part of the I-frame cadence and the I-frames that it inserts - // for Scene change detection. When you specify a value for this setting, the - // encoder determines whether to skip a cadence-driven I-frame by the value - // you set. For example, if you set Min I interval (minIInterval) to 5 and a - // cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, - // then the encoder skips the cadence-driven I-frame. In this way, one GOP is - // shrunk slightly and one GOP is stretched slightly. When the cadence-driven - // I-frames are farther from the scene-change I-frame than the value you set, - // then the encoder leaves all I-frames in place and the GOPs surrounding the - // scene change are smaller than the usual cadence GOPs. + // Use this setting only when you also enable Scene change detection. This setting + // determines how the encoder manages the spacing between I-frames that it inserts + // as part of the I-frame cadence and the I-frames that it inserts for Scene + // change detection. When you specify a value for this setting, the encoder + // determines whether to skip a cadence-driven I-frame by the value you set. + // For example, if you set Min I interval to 5 and a cadence-driven I-frame + // would fall within 5 frames of a scene-change I-frame, then the encoder skips + // the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one + // GOP is stretched slightly. When the cadence-driven I-frames are farther from + // the scene-change I-frame than the value you set, then the encoder leaves + // all I-frames in place and the GOPs surrounding the scene change are smaller + // than the usual cadence GOPs. MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Specify the number of B-frames that MediaConvert puts between reference frames @@ -19982,52 +19742,48 @@ type Mpeg2Settings struct { NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) - // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), - // uses the PAR from your input video for your output. To specify a different - // PAR in the console, choose any value other than Follow source. To specify - // a different PAR by editing the JSON job specification, choose SPECIFIED. - // When you choose SPECIFIED for this setting, you must also specify values - // for the parNumerator and parDenominator settings. + // for this output. The default behavior, Follow source, uses the PAR from your + // input video for your output. To specify a different PAR in the console, choose + // any value other than Follow source. When you choose SPECIFIED for this setting, + // you must also specify values for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"Mpeg2ParControl"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is + // 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you - // want to trade off encoding speed for output video quality. The default behavior - // is faster, lower quality, single-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Mpeg2QualityTuningLevel"` - // Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate - // is variable (vbr) or constant (cbr). + // Use Rate control mode to specify whether the bitrate is variable (vbr) or + // constant (cbr). RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Mpeg2RateControlMode"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced - // field in the output. Keep the default value, Basic interlacing (INTERLACED), - // for all other output frame rates. With basic interlacing, MediaConvert performs - // any frame rate conversion first and then interlaces the frames. When you - // choose Optimized interlacing and you set your output frame rate to a value - // that isn't suitable for optimized interlacing, MediaConvert automatically - // falls back to basic interlacing. Required settings: To use optimized interlacing, - // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't - // use optimized interlacing for hard telecine outputs. You must also set Interlace - // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // to create a better quality interlaced output. In this case, each progressive + // frame from the input corresponds to an interlaced field in the output. Keep + // the default value, Basic interlacing, for all other output frame rates. With + // basic interlacing, MediaConvert performs any frame rate conversion first + // and then interlaces the frames. When you choose Optimized interlacing and + // you set your output frame rate to a value that isn't suitable for optimized + // interlacing, MediaConvert automatically falls back to basic interlacing. + // Required settings: To use optimized interlacing, you must set Telecine to + // None or Soft. You can't use optimized interlacing for hard telecine outputs. + // You must also set Interlace mode to a value other than Progressive. ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"Mpeg2ScanTypeConversionMode"` // Enable this setting to insert I-frames at scene changes that the service @@ -20039,67 +19795,63 @@ type Mpeg2Settings struct { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: - // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) - // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to - // 1. + // You must also set Framerate to 25. SlowPal *string `locationName:"slowPal" type:"string" enum:"Mpeg2SlowPal"` // Ignore this setting unless you need to comply with a specification that requires // a specific value. If you don't have a specification requirement, we recommend // that you adjust the softness of your output by using a lower value for the - // setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). - // The Softness (softness) setting specifies the quantization matrices that - // the encoder uses. Keep the default value, 0, to use the AWS Elemental default - // matrices. Choose a value from 17 to 128 to use planar interpolation. Increasing - // values from 17 to 128 result in increasing reduction of high-frequency data. - // The value 128 results in the softest video. + // setting Sharpness or by enabling a noise reducer filter. The Softness setting + // specifies the quantization matrices that the encoder uses. Keep the default + // value, 0, to use the AWS Elemental default matrices. Choose a value from + // 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result + // in increasing reduction of high-frequency data. The value 128 results in + // the softest video. Softness *int64 `locationName:"softness" type:"integer"` - // Keep the default value, Enabled (ENABLED), to adjust quantization within - // each frame based on spatial variation of content complexity. When you enable - // this feature, the encoder uses fewer bits on areas that can sustain more - // distortion with no noticeable visual degradation and uses more bits on areas - // where any small distortion will be noticeable. For example, complex textured - // blocks are encoded with fewer bits and smooth textured blocks are encoded - // with more bits. Enabling this feature will almost always improve your video - // quality. Note, though, that this feature doesn't take into account where - // the viewer's attention is likely to be. If viewers are likely to be focusing - // their attention on a part of the screen with a lot of complex texture, you - // might choose to disable this feature. Related setting: When you enable spatial - // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) - // depending on your content. For homogeneous content, such as cartoons and - // video games, set it to Low. For content with a wider variety of textures, - // set it to High or Higher. + // Keep the default value, Enabled, to adjust quantization within each frame + // based on spatial variation of content complexity. When you enable this feature, + // the encoder uses fewer bits on areas that can sustain more distortion with + // no noticeable visual degradation and uses more bits on areas where any small + // distortion will be noticeable. For example, complex textured blocks are encoded + // with fewer bits and smooth textured blocks are encoded with more bits. Enabling + // this feature will almost always improve your video quality. Note, though, + // that this feature doesn't take into account where the viewer's attention + // is likely to be. If viewers are likely to be focusing their attention on + // a part of the screen with a lot of complex texture, you might choose to disable + // this feature. Related setting: When you enable spatial adaptive quantization, + // set the value for Adaptive quantization depending on your content. For homogeneous + // content, such as cartoons and video games, set it to Low. For content with + // a wider variety of textures, set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Mpeg2SpatialAdaptiveQuantization"` // Specify whether this output's video uses the D10 syntax. Keep the default - // value to not use the syntax. Related settings: When you choose D10 (D_10) - // for your MXF profile (profile), you must also set this value to D10 (D_10). + // value to not use the syntax. Related settings: When you choose D10 for your + // MXF profile, you must also set this value to D10. Syntax *string `locationName:"syntax" type:"string" enum:"Mpeg2Syntax"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable - // hard or soft telecine to create a smoother picture. Hard telecine (HARD) - // produces a 29.97i output. Soft telecine (SOFT) produces an output with a - // 23.976 output that signals to the video player device to do the conversion - // during play back. When you keep the default value, None (NONE), MediaConvert - // does a standard frame rate conversion to 29.97 without doing anything with - // the field polarity to create a smoother picture. + // hard or soft telecine to create a smoother picture. Hard telecine produces + // a 29.97i output. Soft telecine produces an output with a 23.976 output that + // signals to the video player device to do the conversion during play back. + // When you keep the default value, None, MediaConvert does a standard frame + // rate conversion to 29.97 without doing anything with the field polarity to + // create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"Mpeg2Telecine"` - // Keep the default value, Enabled (ENABLED), to adjust quantization within - // each frame based on temporal variation of content complexity. When you enable - // this feature, the encoder uses fewer bits on areas of the frame that aren't - // moving and uses more bits on complex objects with sharp edges that move a - // lot. For example, this feature improves the readability of text tickers on - // newscasts and scoreboards on sports matches. Enabling this feature will almost - // always improve your video quality. Note, though, that this feature doesn't - // take into account where the viewer's attention is likely to be. If viewers - // are likely to be focusing their attention on a part of the screen that doesn't - // have moving objects with sharp edges, such as sports athletes' faces, you - // might choose to disable this feature. Related setting: When you enable temporal - // quantization, adjust the strength of the filter with the setting Adaptive - // quantization (adaptiveQuantization). + // Keep the default value, Enabled, to adjust quantization within each frame + // based on temporal variation of content complexity. When you enable this feature, + // the encoder uses fewer bits on areas of the frame that aren't moving and + // uses more bits on complex objects with sharp edges that move a lot. For example, + // this feature improves the readability of text tickers on newscasts and scoreboards + // on sports matches. Enabling this feature will almost always improve your + // video quality. Note, though, that this feature doesn't take into account + // where the viewer's attention is likely to be. If viewers are likely to be + // focusing their attention on a part of the screen that doesn't have moving + // objects with sharp edges, such as sports athletes' faces, you might choose + // to disable this feature. Related setting: When you enable temporal quantization, + // adjust the strength of the filter with the setting Adaptive quantization. TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"Mpeg2TemporalAdaptiveQuantization"` } @@ -20407,8 +20159,7 @@ func (s *MsSmoothAdditionalManifest) SetSelectedOutputs(v []*string) *MsSmoothAd return s } -// If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify -// the value SpekeKeyProvider. +// If you are using DRM, set DRM System to specify the value SpekeKeyProvider. type MsSmoothEncryptionSettings struct { _ struct{} `type:"structure"` @@ -20444,9 +20195,6 @@ func (s *MsSmoothEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *M // Settings related to your Microsoft Smooth Streaming output package. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set Type, under OutputGroupSettings, to -// MS_SMOOTH_GROUP_SETTINGS. type MsSmoothGroupSettings struct { _ struct{} `type:"structure"` @@ -20461,37 +20209,36 @@ type MsSmoothGroupSettings struct { // a Microsoft Smooth output group into a single audio stream. AudioDeduplication *string `locationName:"audioDeduplication" type:"string" enum:"MsSmoothAudioDeduplication"` - // Use Destination (Destination) to specify the S3 output location and the output - // filename base. Destination accepts format identifiers. If you do not specify - // the base filename in the URI, the service will use the filename of the input - // file. If your job has multiple inputs, the service uses the filename of the - // first input file. + // Use Destination to specify the S3 output location and the output filename + // base. Destination accepts format identifiers. If you do not specify the base + // filename in the URI, the service will use the filename of the input file. + // If your job has multiple inputs, the service uses the filename of the first + // input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` - // If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify - // the value SpekeKeyProvider. + // If you are using DRM, set DRM System to specify the value SpekeKeyProvider. Encryption *MsSmoothEncryptionSettings `locationName:"encryption" type:"structure"` // Specify how you want MediaConvert to determine the fragment length. Choose - // Exact (EXACT) to have the encoder use the exact length that you specify with - // the setting Fragment length (FragmentLength). This might result in extra - // I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round - // up the segment lengths to match the next GOP boundary. + // Exact to have the encoder use the exact length that you specify with the + // setting Fragment length. This might result in extra I-frames. Choose Multiple + // of GOP to have the encoder round up the segment lengths to match the next + // GOP boundary. FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"` // Specify how you want MediaConvert to determine the fragment length. Choose - // Exact (EXACT) to have the encoder use the exact length that you specify with - // the setting Fragment length (FragmentLength). This might result in extra - // I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round - // up the segment lengths to match the next GOP boundary. + // Exact to have the encoder use the exact length that you specify with the + // setting Fragment length. This might result in extra I-frames. Choose Multiple + // of GOP to have the encoder round up the segment lengths to match the next + // GOP boundary. FragmentLengthControl *string `locationName:"fragmentLengthControl" type:"string" enum:"MsSmoothFragmentLengthControl"` - // Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding - // format for the server and client manifest. Valid options are utf8 and utf16. + // Use Manifest encoding to specify the encoding format for the server and client + // manifest. Valid options are utf8 and utf16. ManifestEncoding *string `locationName:"manifestEncoding" type:"string" enum:"MsSmoothManifestEncoding"` } @@ -20590,13 +20337,12 @@ type MxfSettings struct { // Optional. When you have AFD signaling set up in your output video stream, // use this setting to choose whether to also include it in the MXF wrapper. - // Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. - // Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from - // the video stream for this output to the MXF wrapper. Regardless of which - // option you choose, the AFD values remain in the video stream. Related settings: - // To set up your output to include or exclude AFD values, see AfdSignaling, - // under VideoDescription. On the console, find AFD signaling under the output's - // video encoding settings. + // Choose Don't copy to exclude AFD signaling from the MXF wrapper. Choose Copy + // from video stream to copy the AFD values from the video stream for this output + // to the MXF wrapper. Regardless of which option you choose, the AFD values + // remain in the video stream. Related settings: To set up your output to include + // or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, + // find AFD signaling under the output's video encoding settings. AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"MxfAfdSignaling"` // Specify the MXF profile, also called shim, for this output. To automatically @@ -20652,21 +20398,20 @@ type MxfXavcProfileSettings struct { _ struct{} `type:"structure"` // To create an output that complies with the XAVC file format guidelines for - // interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE). - // To include all frames from your input in this output, keep the default setting, - // Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert - // excludes when you set this to Drop frames for compliance depends on the output - // frame rate and duration. + // interoperability, keep the default value, Drop frames for compliance. To + // include all frames from your input in this output, keep the default setting, + // Allow any duration. The number of frames that MediaConvert excludes when + // you set this to Drop frames for compliance depends on the output frame rate + // and duration. DurationMode *string `locationName:"durationMode" type:"string" enum:"MxfXavcDurationMode"` // Specify a value for this setting only for outputs that you set up with one - // of these two XAVC profiles: XAVC HD Intra CBG (XAVC_HD_INTRA_CBG) or XAVC - // 4K Intra CBG (XAVC_4K_INTRA_CBG). Specify the amount of space in each frame - // that the service reserves for ancillary data, such as teletext captions. - // The default value for this setting is 1492 bytes per frame. This should be - // sufficient to prevent overflow unless you have multiple pages of teletext - // captions data. If you have a large amount of teletext data, specify a larger - // number. + // of these two XAVC profiles: XAVC HD Intra CBG or XAVC 4K Intra CBG. Specify + // the amount of space in each frame that the service reserves for ancillary + // data, such as teletext captions. The default value for this setting is 1492 + // bytes per frame. This should be sufficient to prevent overflow unless you + // have multiple pages of teletext captions data. If you have a large amount + // of teletext data, specify a larger number. MaxAncDataSize *int64 `locationName:"maxAncDataSize" type:"integer"` } @@ -20708,26 +20453,24 @@ type NexGuardFileMarkerSettings struct { // Use the base64 license string that Nagra provides you. Enter it directly // in your JSON job specification or in the console. Required when you include - // Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in - // your job. + // Nagra NexGuard File Marker watermarking in your job. License *string `locationName:"license" min:"1" type:"string"` // Specify the payload ID that you want associated with this output. Valid values // vary depending on your Nagra NexGuard forensic watermarking workflow. Required - // when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) - // in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 - // through 4,194,303. You must generate a unique ID for each asset you watermark, - // and keep a record of which ID you have assigned to each asset. Neither Nagra - // nor MediaConvert keep track of the relationship between output files and - // your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for - // each asset. Do this by setting up two output groups. For one output group, - // set the value of Payload ID (payload) to 0 in every output. For the other - // output group, set Payload ID (payload) to 1 in every output. + // when you include Nagra NexGuard File Marker watermarking in your job. For + // PreRelease Content (NGPR/G2), specify an integer from 1 through 4,194,303. + // You must generate a unique ID for each asset you watermark, and keep a record + // of which ID you have assigned to each asset. Neither Nagra nor MediaConvert + // keep track of the relationship between output files and your IDs. For OTT + // Streaming, create two adaptive bitrate (ABR) stacks for each asset. Do this + // by setting up two output groups. For one output group, set the value of Payload + // ID to 0 in every output. For the other output group, set Payload ID to 1 + // in every output. Payload *int64 `locationName:"payload" type:"integer"` // Enter one of the watermarking preset strings that Nagra provides you. Required - // when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) - // in your job. + // when you include Nagra NexGuard File Marker watermarking in your job. Preset *string `locationName:"preset" min:"1" type:"string"` // Optional. Ignore this setting unless Nagra support directs you to specify @@ -20795,12 +20538,8 @@ func (s *NexGuardFileMarkerSettings) SetStrength(v string) *NexGuardFileMarkerSe } // Settings for your Nielsen configuration. If you don't do Nielsen measurement -// and analytics, ignore these settings. When you enable Nielsen configuration -// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs -// in the job. To enable Nielsen configuration programmatically, include an -// instance of nielsenConfiguration in your JSON job specification. Even if -// you don't include any children of nielsenConfiguration, you still enable -// the setting. +// and analytics, ignore these settings. When you enable Nielsen configuration, +// MediaConvert enables PCM to ID3 tagging for all outputs in the job. type NielsenConfiguration struct { _ struct{} `type:"structure"` @@ -20808,8 +20547,8 @@ type NielsenConfiguration struct { // include this property, set the value to zero. BreakoutCode *int64 `locationName:"breakoutCode" type:"integer"` - // Use Distributor ID (DistributorID) to specify the distributor ID that is - // assigned to your organization by Neilsen. + // Use Distributor ID to specify the distributor ID that is assigned to your + // organization by Nielsen. DistributorId *string `locationName:"distributorId" type:"string"` } @@ -20854,17 +20593,16 @@ type NielsenNonLinearWatermarkSettings struct { _ struct{} `type:"structure"` // Choose the type of Nielsen watermarks that you want in your outputs. When - // you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the - // setting SID (sourceId). When you choose CBET (CBET), you must provide a value - // for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET - // (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings. + // you choose NAES 2 and NW, you must provide a value for the setting SID. When + // you choose CBET, you must provide a value for the setting CSID. When you + // choose NAES 2, NW, and CBET, you must provide values for both of these settings. ActiveWatermarkProcess *string `locationName:"activeWatermarkProcess" type:"string" enum:"NielsenActiveWatermarkProcessType"` // Optional. Use this setting when you want the service to include an ADI file // in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon // S3 and provide a URL to it here. The URL should be in the following format: // S3://bucket/path/ADI-file. For more information about the metadata .zip file, - // see the setting Metadata destination (metadataDestination). + // see the setting Metadata destination. AdiFilename *string `locationName:"adiFilename" type:"string"` // Use the asset ID that you provide to Nielsen to uniquely identify this asset. @@ -20878,7 +20616,7 @@ type NielsenNonLinearWatermarkSettings struct { // Use the CSID that Nielsen provides to you. This CBET source ID should be // unique to your Nielsen account but common to all of your output assets that // have CBET watermarking. Required when you choose a value for the setting - // Watermark types (ActiveWatermarkProcess) that includes CBET. + // Watermark types that includes CBET. CbetSourceId *string `locationName:"cbetSourceId" type:"string"` // Optional. If this asset uses an episode ID with Nielsen, provide it here. @@ -20887,11 +20625,10 @@ type NielsenNonLinearWatermarkSettings struct { // Specify the Amazon S3 location where you want MediaConvert to save your Nielsen // non-linear metadata .zip file. This Amazon S3 bucket must be in the same // Region as the one where you do your MediaConvert transcoding. If you want - // to include an ADI file in this .zip file, use the setting ADI file (adiFilename) - // to specify it. MediaConvert delivers the Nielsen metadata .zip files only - // to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip - // files to Nielsen. You are responsible for delivering the metadata .zip files - // to Nielsen. + // to include an ADI file in this .zip file, use the setting ADI file to specify + // it. MediaConvert delivers the Nielsen metadata .zip files only to your metadata + // destination Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. + // You are responsible for delivering the metadata .zip files to Nielsen. MetadataDestination *string `locationName:"metadataDestination" type:"string"` // Use the SID that Nielsen provides to you. This source ID should be unique @@ -20902,9 +20639,9 @@ type NielsenNonLinearWatermarkSettings struct { SourceId *int64 `locationName:"sourceId" type:"integer"` // Required. Specify whether your source content already contains Nielsen non-linear - // watermarks. When you set this value to Watermarked (WATERMARKED), the service - // fails the job. Nielsen requires that you add non-linear watermarking to only - // clean content that doesn't already have non-linear Nielsen watermarks. + // watermarks. When you set this value to Watermarked, the service fails the + // job. Nielsen requires that you add non-linear watermarking to only clean + // content that doesn't already have non-linear Nielsen watermarks. SourceWatermarkStatus *string `locationName:"sourceWatermarkStatus" type:"string" enum:"NielsenSourceWatermarkStatusType"` // Specify the endpoint for the TIC server that you have deployed and configured @@ -20916,8 +20653,8 @@ type NielsenNonLinearWatermarkSettings struct { TicServerUrl *string `locationName:"ticServerUrl" type:"string"` // To create assets that have the same TIC values in each audio track, keep - // the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that - // have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK). + // the default value Share TICs. To create assets that have unique TIC values + // for each audio track, choose Use unique TICs. UniqueTicPerAudioTrack *string `locationName:"uniqueTicPerAudioTrack" type:"string" enum:"NielsenUniqueTicPerAudioTrackType"` } @@ -21032,13 +20769,12 @@ func (s *NielsenNonLinearWatermarkSettings) SetUniqueTicPerAudioTrack(v string) type NoiseReducer struct { _ struct{} `type:"structure"` - // Use Noise reducer filter (NoiseReducerFilter) to select one of the following - // spatial image filtering functions. To use this setting, you must also enable - // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing - // noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution - // filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain - // filtering based on JND principles. * Temporal optimizes video quality for - // complex motion. + // Use Noise reducer filter to select one of the following spatial image filtering + // functions. To use this setting, you must also enable Noise reducer. * Bilateral + // preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, + // and Sharpen (sharpest) do convolution filtering. * Conserve does min/max + // noise reduction. * Spatial does frequency-domain filtering based on JND principles. + // * Temporal optimizes video quality for complex motion. Filter *string `locationName:"filter" type:"string" enum:"NoiseReducerFilter"` // Settings for a noise reducer filter @@ -21221,22 +20957,20 @@ type NoiseReducerTemporalFilterSettings struct { // and creates better VQ for low bitrate outputs. AggressiveMode *int64 `locationName:"aggressiveMode" type:"integer"` - // When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth - // and sharpness of your output is reduced. You can optionally use Post temporal - // sharpening (postTemporalSharpening) to apply sharpening to the edges of your - // output. Note that Post temporal sharpening will also make the bandwidth reduction - // from the Noise reducer smaller. The default behavior, Auto (AUTO), allows - // the transcoder to determine whether to apply sharpening, depending on your - // input type and quality. When you set Post temporal sharpening to Enabled - // (ENABLED), specify how much sharpening is applied using Post temporal sharpening - // strength (postTemporalSharpeningStrength). Set Post temporal sharpening to - // Disabled (DISABLED) to not apply sharpening. + // When you set Noise reducer to Temporal, the bandwidth and sharpness of your + // output is reduced. You can optionally use Post temporal sharpening to apply + // sharpening to the edges of your output. Note that Post temporal sharpening + // will also make the bandwidth reduction from the Noise reducer smaller. The + // default behavior, Auto, allows the transcoder to determine whether to apply + // sharpening, depending on your input type and quality. When you set Post temporal + // sharpening to Enabled, specify how much sharpening is applied using Post + // temporal sharpening strength. Set Post temporal sharpening to Disabled to + // not apply sharpening. PostTemporalSharpening *string `locationName:"postTemporalSharpening" type:"string" enum:"NoiseFilterPostTemporalSharpening"` - // Use Post temporal sharpening strength (postTemporalSharpeningStrength) to - // define the amount of sharpening the transcoder applies to your output. Set - // Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High - // (HIGH) to indicate the amount of sharpening. + // Use Post temporal sharpening strength to define the amount of sharpening + // the transcoder applies to your output. Set Post temporal sharpening strength + // to Low, Medium, or High to indicate the amount of sharpening. PostTemporalSharpeningStrength *string `locationName:"postTemporalSharpeningStrength" type:"string" enum:"NoiseFilterPostTemporalSharpeningStrength"` // The speed of the filter (higher number is faster). Low setting reduces bit @@ -21387,8 +21121,8 @@ type OpusSettings struct { Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"` // Specify the number of channels in this output audio track. Choosing Mono - // on the console gives you 1 output channel; choosing Stereo gives you 2. In - // the API, valid values are 1 and 2. + // on gives you 1 output channel; choosing Stereo gives you 2. In the API, valid + // values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The @@ -21457,42 +21191,39 @@ func (s *OpusSettings) SetSampleRate(v int64) *OpusSettings { type Output struct { _ struct{} `type:"structure"` - // (AudioDescriptions) contains groups of audio encoding settings organized - // by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) - // can contain multiple groups of encoding settings. + // Contains groups of audio encoding settings organized by audio codec. Include + // one instance of per output. Can contain multiple groups of encoding settings. AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"` - // (CaptionDescriptions) contains groups of captions settings. For each output - // that has captions, include one instance of (CaptionDescriptions). (CaptionDescriptions) - // can contain multiple groups of captions settings. + // Contains groups of captions settings. For each output that has captions, + // include one instance of CaptionDescriptions. Can contain multiple groups + // of captions settings. CaptionDescriptions []*CaptionDescription `locationName:"captionDescriptions" type:"list"` // Container specific settings. ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"` - // Use Extension (Extension) to specify the file extension for outputs in File - // output groups. If you do not specify a value, the service will use default - // extensions by container type as follows * MPEG-2 transport stream, m2ts * - // Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, - // webm * No Container, the service will use codec extensions (e.g. AAC, H265, - // H265, AC3) + // Use Extension to specify the file extension for outputs in File output groups. + // If you do not specify a value, the service will use default extensions by + // container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov + // * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No + // Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3) Extension *string `locationName:"extension" type:"string"` - // Use Name modifier (NameModifier) to have the service add a string to the - // end of each output filename. You specify the base filename as part of your - // destination URI. When you create multiple outputs in the same output group, - // Name modifier (NameModifier) is required. Name modifier also accepts format - // identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ - // or $Time$ in one output, you must use them in the same way in all outputs - // of the output group. + // Use Name modifier to have the service add a string to the end of each output + // filename. You specify the base filename as part of your destination URI. + // When you create multiple outputs in the same output group, Name modifier + // is required. Name modifier also accepts format identifiers. For DASH ISO + // outputs, if you use the format identifiers $Number$ or $Time$ in one output, + // you must use them in the same way in all outputs of the output group. NameModifier *string `locationName:"nameModifier" min:"1" type:"string"` // Specific settings for this type of output. OutputSettings *OutputSettings `locationName:"outputSettings" type:"structure"` - // Use Preset (Preset) to specify a preset for your transcoding settings. Provide - // the system or custom preset name. You can specify either Preset (Preset) - // or Container settings (ContainerSettings), but not both. + // Use Preset to specify a preset for your transcoding settings. Provide the + // system or custom preset name. You can specify either Preset or Container + // settings, but not both. Preset *string `locationName:"preset" type:"string"` // VideoDescription contains a group of video encoding settings. The specific @@ -21703,10 +21434,10 @@ type OutputGroup struct { // for you, based on characteristics of your input video. AutomatedEncodingSettings *AutomatedEncodingSettings `locationName:"automatedEncodingSettings" type:"structure"` - // Use Custom Group Name (CustomName) to specify a name for the output group. - // This value is displayed on the console and can make your job settings JSON - // more human-readable. It does not affect your outputs. Use up to twelve characters - // that are either letters, numbers, spaces, or underscores. + // Use Custom Group Name to specify a name for the output group. This value + // is displayed on the console and can make your job settings JSON more human-readable. + // It does not affect your outputs. Use up to twelve characters that are either + // letters, numbers, spaces, or underscores. CustomName *string `locationName:"customName" type:"string"` // Name of the output group @@ -21835,35 +21566,21 @@ type OutputGroupSettings struct { _ struct{} `type:"structure"` // Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set Type, under OutputGroupSettings, to - // CMAF_GROUP_SETTINGS. CmafGroupSettings *CmafGroupSettings `locationName:"cmafGroupSettings" type:"structure"` // Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set Type, under OutputGroupSettings, to - // DASH_ISO_GROUP_SETTINGS. DashIsoGroupSettings *DashIsoGroupSettings `locationName:"dashIsoGroupSettings" type:"structure"` // Settings related to your File output group. MediaConvert uses this group // of settings to generate a single standalone file, rather than a streaming - // package. When you work directly in your JSON job specification, include this - // object and any required children when you set Type, under OutputGroupSettings, - // to FILE_GROUP_SETTINGS. + // package. FileGroupSettings *FileGroupSettings `locationName:"fileGroupSettings" type:"structure"` // Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set Type, under OutputGroupSettings, to - // HLS_GROUP_SETTINGS. HlsGroupSettings *HlsGroupSettings `locationName:"hlsGroupSettings" type:"structure"` // Settings related to your Microsoft Smooth Streaming output package. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. - // When you work directly in your JSON job specification, include this object - // and any required children when you set Type, under OutputGroupSettings, to - // MS_SMOOTH_GROUP_SETTINGS. MsSmoothGroupSettings *MsSmoothGroupSettings `locationName:"msSmoothGroupSettings" type:"structure"` // Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming, @@ -22194,9 +21911,8 @@ func (s *Preset) SetType(v string) *Preset { type PresetSettings struct { _ struct{} `type:"structure"` - // (AudioDescriptions) contains groups of audio encoding settings organized - // by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) - // can contain multiple groups of encoding settings. + // Contains groups of audio encoding settings organized by audio codec. Include + // one instance of per output. Can contain multiple groups of encoding settings. AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"` // This object holds groups of settings related to captions for one output. @@ -22294,25 +22010,22 @@ func (s *PresetSettings) SetVideoDescription(v *VideoDescription) *PresetSetting return s } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value PRORES. +// Required when you set Codec to the value PRORES. type ProresSettings struct { _ struct{} `type:"structure"` // This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that // you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 - // sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma - // sampling. You must specify a value for this setting when your output codec - // profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes - // outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when - // your input has 4:4:4 chroma sampling and your output codec Profile is Apple - // ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, - // you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, - // or Noise reducer. + // sampling to allow outputs to also use 4:4:4 chroma sampling. You must specify + // a value for this setting when your output codec profile supports 4:4:4 chroma + // sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: + // Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling + // and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that + // when you choose Preserve 4:4:4 sampling, you cannot include any of the following + // Preprocessors: Dolby Vision, HDR10+, or Noise reducer. ChromaSampling *string `locationName:"chromaSampling" type:"string" enum:"ProresChromaSampling"` - // Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec - // to use for this output. + // Use Profile to specify the type of Apple ProRes codec to use for this output. CodecProfile *string `locationName:"codecProfile" type:"string" enum:"ProresCodecProfile"` // If you are using the console, use the Framerate setting to specify the frame @@ -22320,12 +22033,7 @@ type ProresSettings struct { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Custom, specify your frame rate as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"ProresFramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -22358,57 +22066,52 @@ type ProresSettings struct { FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type - // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) - // to produce outputs with the same field polarity as the source. For jobs that - // have multiple inputs, the output field polarity might change over the course - // of the output. Follow behavior depends on the input scan type. If the source - // is interlaced, the output will be interlaced with the same polarity as the - // source. If the source is progressive, the output will be interlaced with - // top field bottom field first, depending on which of the Follow options you - // choose. + // to create a progressive output, regardless of the scan type of your input. + // Use Top field first or Bottom field first to create an output that's interlaced + // with the same field polarity throughout. Use Follow, default top or Follow, + // default bottom to produce outputs with the same field polarity as the source. + // For jobs that have multiple inputs, the output field polarity might change + // over the course of the output. Follow behavior depends on the input scan + // type. If the source is interlaced, the output will be interlaced with the + // same polarity as the source. If the source is progressive, the output will + // be interlaced with top field bottom field first, depending on which of the + // Follow options you choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"ProresInterlaceMode"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) - // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), - // uses the PAR from your input video for your output. To specify a different - // PAR in the console, choose any value other than Follow source. To specify - // a different PAR by editing the JSON job specification, choose SPECIFIED. - // When you choose SPECIFIED for this setting, you must also specify values - // for the parNumerator and parDenominator settings. + // for this output. The default behavior, Follow source, uses the PAR from your + // input video for your output. To specify a different PAR, choose any value + // other than Follow source. When you choose SPECIFIED for this setting, you + // must also specify values for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"ProresParControl"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is + // 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced - // field in the output. Keep the default value, Basic interlacing (INTERLACED), - // for all other output frame rates. With basic interlacing, MediaConvert performs - // any frame rate conversion first and then interlaces the frames. When you - // choose Optimized interlacing and you set your output frame rate to a value - // that isn't suitable for optimized interlacing, MediaConvert automatically - // falls back to basic interlacing. Required settings: To use optimized interlacing, - // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't - // use optimized interlacing for hard telecine outputs. You must also set Interlace - // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // to create a better quality interlaced output. In this case, each progressive + // frame from the input corresponds to an interlaced field in the output. Keep + // the default value, Basic interlacing, for all other output frame rates. With + // basic interlacing, MediaConvert performs any frame rate conversion first + // and then interlaces the frames. When you choose Optimized interlacing and + // you set your output frame rate to a value that isn't suitable for optimized + // interlacing, MediaConvert automatically falls back to basic interlacing. + // Required settings: To use optimized interlacing, you must set Telecine to + // None or Soft. You can't use optimized interlacing for hard telecine outputs. + // You must also set Interlace mode to a value other than Progressive. ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"ProresScanTypeConversionMode"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per @@ -22416,17 +22119,14 @@ type ProresSettings struct { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: - // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) - // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to - // 1. + // You must also set Framerate to 25. SlowPal *string `locationName:"slowPal" type:"string" enum:"ProresSlowPal"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable - // hard telecine (HARD) to create a smoother picture. When you keep the default - // value, None (NONE), MediaConvert does a standard frame rate conversion to - // 29.97 without doing anything with the field polarity to create a smoother - // picture. + // hard telecine to create a smoother picture. When you keep the default value, + // None, MediaConvert does a standard frame rate conversion to 29.97 without + // doing anything with the field polarity to create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"ProresTelecine"` } @@ -22896,23 +22596,23 @@ func (s *Rectangle) SetY(v int64) *Rectangle { return s } -// Use Manual audio remixing (RemixSettings) to adjust audio levels for each -// audio channel in each output of your job. With audio remixing, you can output -// more or fewer audio channels than your input audio source provides. +// Use Manual audio remixing to adjust audio levels for each audio channel in +// each output of your job. With audio remixing, you can output more or fewer +// audio channels than your input audio source provides. type RemixSettings struct { _ struct{} `type:"structure"` - // Channel mapping (ChannelMapping) contains the group of fields that hold the - // remixing value for each channel, in dB. Specify remix values to indicate - // how much of the content from your input audio channel you want in your output - // audio channels. Each instance of the InputChannels or InputChannelsFineTune - // array specifies these values for one output channel. Use one instance of - // this array for each output channel. In the console, each array corresponds - // to a column in the graphical depiction of the mapping matrix. The rows of - // the graphical matrix correspond to input channels. Valid values are within - // the range from -60 (mute) through 6. A setting of 0 passes the input channel - // unchanged to the output channel (no attenuation or amplification). Use InputChannels - // or InputChannelsFineTune to specify your remix values. Don't use both. + // Channel mapping contains the group of fields that hold the remixing value + // for each channel, in dB. Specify remix values to indicate how much of the + // content from your input audio channel you want in your output audio channels. + // Each instance of the InputChannels or InputChannelsFineTune array specifies + // these values for one output channel. Use one instance of this array for each + // output channel. In the console, each array corresponds to a column in the + // graphical depiction of the mapping matrix. The rows of the graphical matrix + // correspond to input channels. Valid values are within the range from -60 + // (mute) through 6. A setting of 0 passes the input channel unchanged to the + // output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune + // to specify your remix values. Don't use both. ChannelMapping *ChannelMapping `locationName:"channelMapping" type:"structure"` // Specify the number of audio channels from your input that you want to use @@ -23284,30 +22984,28 @@ type S3EncryptionSettings struct { // your content. AWS also encrypts the data keys themselves, using a customer // master key (CMK), and then stores the encrypted data keys alongside your // encrypted content. Use this setting to specify which AWS service manages - // the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). - // If you want your master key to be managed by AWS Key Management Service (KMS), - // choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose - // AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with - // Amazon S3 to encrypt your data keys. You can optionally choose to specify - // a different, customer managed CMK. Do so by specifying the Amazon Resource - // Name (ARN) of the key for the setting KMS ARN (kmsKeyArn). + // the CMK. For simplest set up, choose Amazon S3. If you want your master key + // to be managed by AWS Key Management Service (KMS), choose AWS KMS. By default, + // when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) + // associated with Amazon S3 to encrypt your data keys. You can optionally choose + // to specify a different, customer managed CMK. Do so by specifying the Amazon + // Resource Name (ARN) of the key for the setting KMS ARN. EncryptionType *string `locationName:"encryptionType" type:"string" enum:"S3ServerSideEncryptionType"` // Optionally, specify the encryption context that you want to use alongside // your KMS key. AWS KMS uses this encryption context as additional authenticated // data (AAD) to support authenticated encryption. This value must be a base64-encoded // UTF-8 string holding JSON which represents a string-string map. To use this - // setting, you must also set Server-side encryption (S3ServerSideEncryptionType) - // to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). For more information about encryption - // context, see: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context. + // setting, you must also set Server-side encryption to AWS KMS. For more information + // about encryption context, see: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context. KmsEncryptionContext *string `locationName:"kmsEncryptionContext" type:"string"` // Optionally, specify the customer master key (CMK) that you want to use to // encrypt the data key that AWS uses to encrypt your output content. Enter // the Amazon Resource Name (ARN) of the CMK. To use this setting, you must - // also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). - // If you set Server-side encryption to AWS KMS but don't specify a CMK here, - // AWS uses the AWS managed CMK associated with Amazon S3. + // also set Server-side encryption to AWS KMS. If you set Server-side encryption + // to AWS KMS but don't specify a CMK here, AWS uses the AWS managed CMK associated + // with Amazon S3. KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` } @@ -23351,17 +23049,14 @@ func (s *S3EncryptionSettings) SetKmsKeyArn(v string) *S3EncryptionSettings { // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set destinationType to SCC. type SccDestinationSettings struct { _ struct{} `type:"structure"` - // Set Framerate (SccDestinationFramerate) to make sure that the captions and - // the video are synchronized in the output. Specify a frame rate that matches - // the frame rate of the associated video. If the video frame rate is 29.97, - // choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has - // video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 - // non-dropframe (FRAMERATE_29_97_NON_DROPFRAME). + // Set Framerate to make sure that the captions and the video are synchronized + // in the output. Specify a frame rate that matches the frame rate of the associated + // video. If the video frame rate is 29.97, choose 29.97 dropframe only if the + // video has video_insertion=true and drop_frame_timecode=true; otherwise, choose + // 29.97 non-dropframe. Framerate *string `locationName:"framerate" type:"string" enum:"SccDestinationFramerate"` } @@ -23536,18 +23231,15 @@ func (s *SpekeKeyProviderCmaf) SetUrl(v string) *SpekeKeyProviderCmaf { // Settings related to SRT captions. SRT is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions -// in the same output group, but different output from your video. When you -// work directly in your JSON job specification, include this object and any -// required children when you set destinationType to SRT. +// in the same output group, but different output from your video. type SrtDestinationSettings struct { _ struct{} `type:"structure"` - // Set Style passthrough (StylePassthrough) to ENABLED to use the available - // style, color, and position information from your input captions. MediaConvert - // uses default settings for any missing style and position information in your - // input captions. Set Style passthrough to DISABLED, or leave blank, to ignore - // the style and position information from your input captions and use simplified - // output captions. + // Set Style passthrough to ENABLED to use the available style, color, and position + // information from your input captions. MediaConvert uses default settings + // for any missing style and position information in your input captions. Set + // Style passthrough to DISABLED, or leave blank, to ignore the style and position + // information from your input captions and use simplified output captions. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"SrtStylePassthrough"` } @@ -23589,7 +23281,7 @@ type StaticKeyProvider struct { KeyFormatVersions *string `locationName:"keyFormatVersions" type:"string"` // Relates to DRM implementation. Use a 32-character hexidecimal string to specify - // Key Value (StaticKeyValue). + // Key Value. StaticKeyValue *string `locationName:"staticKeyValue" type:"string"` // Relates to DRM implementation. The location of the license server used for @@ -23729,8 +23421,6 @@ func (s TagResourceOutput) GoString() string { // Settings related to teletext captions. Set up teletext captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set destinationType to TELETEXT. type TeletextDestinationSettings struct { _ struct{} `type:"structure"` @@ -23741,10 +23431,10 @@ type TeletextDestinationSettings struct { PageNumber *string `locationName:"pageNumber" min:"3" type:"string"` // Specify the page types for this Teletext page. If you don't specify a value - // here, the service sets the page type to the default value Subtitle (PAGE_TYPE_SUBTITLE). - // If you pass through the entire set of Teletext data, don't use this field. - // When you pass through a set of Teletext pages, your output has the same page - // types as your input. + // here, the service sets the page type to the default value Subtitle. If you + // pass through the entire set of Teletext data, don't use this field. When + // you pass through a set of Teletext pages, your output has the same page types + // as your input. PageTypes []*string `locationName:"pageTypes" type:"list" enum:"TeletextPageType"` } @@ -23795,9 +23485,9 @@ func (s *TeletextDestinationSettings) SetPageTypes(v []*string) *TeletextDestina type TeletextSourceSettings struct { _ struct{} `type:"structure"` - // Use Page Number (PageNumber) to specify the three-digit hexadecimal page - // number that will be used for Teletext captions. Do not use this setting if - // you are passing through teletext from the input source to output. + // Use Page Number to specify the three-digit hexadecimal page number that will + // be used for Teletext captions. Do not use this setting if you are passing + // through teletext from the input source to output. PageNumber *string `locationName:"pageNumber" min:"3" type:"string"` } @@ -23842,19 +23532,19 @@ func (s *TeletextSourceSettings) SetPageNumber(v string) *TeletextSourceSettings type TimecodeBurnin struct { _ struct{} `type:"structure"` - // Use Font Size (FontSize) to set the font size of any burned-in timecode. - // Valid values are 10, 16, 32, 48. + // Use Font size to set the font size of any burned-in timecode. Valid values + // are 10, 16, 32, 48. FontSize *int64 `locationName:"fontSize" min:"10" type:"integer"` - // Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to - // specify the location the burned-in timecode on output video. + // Use Position under Timecode burn-in to specify the location the burned-in + // timecode on output video. Position *string `locationName:"position" type:"string" enum:"TimecodeBurninPosition"` - // Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. - // For example, a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00". - // Provide either the characters themselves or the ASCII code equivalents. The - // supported range of characters is 0x20 through 0x7e. This includes letters, - // numbers, and all special characters represented on a standard English keyboard. + // Use Prefix to place ASCII characters before any burned-in timecode. For example, + // a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00". Provide either + // the characters themselves or the ASCII code equivalents. The supported range + // of characters is 0x20 through 0x7e. This includes letters, numbers, and all + // special characters represented on a standard English keyboard. Prefix *string `locationName:"prefix" type:"string"` } @@ -23913,43 +23603,39 @@ type TimecodeConfig struct { _ struct{} `type:"structure"` // If you use an editing platform that relies on an anchor timecode, use Anchor - // Timecode (Anchor) to specify a timecode that will match the input video frame - // to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) - // or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior - // for Anchor Timecode varies depending on your setting for Source (TimecodeSource). - // * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), - // the first input frame is the specified value in Start Timecode (Start). Anchor - // Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. - // * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame - // is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), - // the first frame is the timecode value on the first input frame of the input. + // Timecode to specify a timecode that will match the input video frame to the + // output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or + // (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior + // for Anchor Timecode varies depending on your setting for Source. * If Source + // is set to Specified Start, the first input frame is the specified value in + // Start Timecode. Anchor Timecode and Start Timecode are used calculate output + // timecode. * If Source is set to Start at 0 the first frame is 00:00:00:00. + // * If Source is set to Embedded, the first frame is the timecode value on + // the first input frame of the input. Anchor *string `locationName:"anchor" type:"string"` - // Use Source (TimecodeSource) to set how timecodes are handled within this - // job. To make sure that your video, audio, captions, and markers are synchronized - // and that time-based features, such as image inserter, work correctly, choose - // the Timecode source option that matches your assets. All timecodes are in - // a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - - // Use the timecode that is in the input video. If no embedded timecode is in - // the source, the service will use Start at 0 (ZEROBASED) instead. * Start - // at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. - // * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame - // to a value other than zero. You use Start timecode (Start) to provide this - // value. + // Use Source to set how timecodes are handled within this job. To make sure + // that your video, audio, captions, and markers are synchronized and that time-based + // features, such as image inserter, work correctly, choose the Timecode source + // option that matches your assets. All timecodes are in a 24-hour format with + // frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the + // input video. If no embedded timecode is in the source, the service will use + // Start at 0 instead. * Start at 0 - Set the timecode of the initial frame + // to 00:00:00:00. * Specified Start - Set the timecode of the initial frame + // to a value other than zero. You use Start timecode to provide this value. Source *string `locationName:"source" type:"string" enum:"TimecodeSource"` - // Only use when you set Source (TimecodeSource) to Specified start (SPECIFIEDSTART). - // Use Start timecode (Start) to specify the timecode for the initial frame. - // Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). + // Only use when you set Source to Specified start. Use Start timecode to specify + // the timecode for the initial frame. Use 24-hour format with frame number, + // (HH:MM:SS:FF) or (HH:MM:SS;FF). Start *string `locationName:"start" type:"string"` // Only applies to outputs that support program-date-time stamp. Use Timestamp - // offset (TimestampOffset) to overwrite the timecode date without affecting - // the time and frame number. Provide the new date as a string in the format - // "yyyy-mm-dd". To use Time stamp offset, you must also enable Insert program-date-time - // (InsertProgramDateTime) in the output settings. For example, if the date - // part of your timecodes is 2002-1-25 and you want to change it to one year - // later, set Timestamp offset (TimestampOffset) to 2003-1-25. + // offset to overwrite the timecode date without affecting the time and frame + // number. Provide the new date as a string in the format "yyyy-mm-dd". To use + // Timestamp offset, you must also enable Insert program-date-time in the output + // settings. For example, if the date part of your timecodes is 2002-1-25 and + // you want to change it to one year later, set Timestamp offset to 2003-1-25. TimestampOffset *string `locationName:"timestampOffset" type:"string"` } @@ -23995,9 +23681,9 @@ func (s *TimecodeConfig) SetTimestampOffset(v string) *TimecodeConfig { return s } -// Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that -// you specify. In each output that you want to include this metadata, you must -// set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). +// Insert user-defined custom ID3 metadata at timecodes that you specify. In +// each output that you want to include this metadata, you must set ID3 metadata +// to Passthrough. type TimedMetadataInsertion struct { _ struct{} `type:"structure"` @@ -24200,8 +23886,6 @@ func (s *TrackSourceSettings) SetTrackNumber(v int64) *TrackSourceSettings { // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set destinationType to TTML. type TtmlDestinationSettings struct { _ struct{} `type:"structure"` @@ -24753,8 +24437,7 @@ func (s *UpdateQueueOutput) SetQueue(v *Queue) *UpdateQueueOutput { return s } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value VC3 +// Required when you set Codec to the value VC3 type Vc3Settings struct { _ struct{} `type:"structure"` @@ -24763,12 +24446,7 @@ type Vc3Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Custom, specify your frame rate as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vc3FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -24806,44 +24484,39 @@ type Vc3Settings struct { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing - // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this - // case, each progressive frame from the input corresponds to an interlaced - // field in the output. Keep the default value, Basic interlacing (INTERLACED), - // for all other output frame rates. With basic interlacing, MediaConvert performs - // any frame rate conversion first and then interlaces the frames. When you - // choose Optimized interlacing and you set your output frame rate to a value - // that isn't suitable for optimized interlacing, MediaConvert automatically - // falls back to basic interlacing. Required settings: To use optimized interlacing, - // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't - // use optimized interlacing for hard telecine outputs. You must also set Interlace - // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). + // to create a better quality interlaced output. In this case, each progressive + // frame from the input corresponds to an interlaced field in the output. Keep + // the default value, Basic interlacing, for all other output frame rates. With + // basic interlacing, MediaConvert performs any frame rate conversion first + // and then interlaces the frames. When you choose Optimized interlacing and + // you set your output frame rate to a value that isn't suitable for optimized + // interlacing, MediaConvert automatically falls back to basic interlacing. + // Required settings: To use optimized interlacing, you must set Telecine to + // None or Soft. You can't use optimized interlacing for hard telecine outputs. + // You must also set Interlace mode to a value other than Progressive. ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"Vc3ScanTypeConversionMode"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output by relabeling the // video frames and resampling your audio. Note that enabling this setting will // slightly reduce the duration of your video. Related settings: You must also - // set Framerate to 25. In your JSON job specification, set (framerateControl) - // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to - // 1. + // set Framerate to 25. SlowPal *string `locationName:"slowPal" type:"string" enum:"Vc3SlowPal"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable - // hard telecine (HARD) to create a smoother picture. When you keep the default - // value, None (NONE), MediaConvert does a standard frame rate conversion to - // 29.97 without doing anything with the field polarity to create a smoother - // picture. + // hard telecine to create a smoother picture. When you keep the default value, + // None, MediaConvert does a standard frame rate conversion to 29.97 without + // doing anything with the field polarity to create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"Vc3Telecine"` // Specify the VC3 class to choose the quality characteristics for this output. // VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) // and Resolution (height and width), determine your output bitrate. For example, // say that your video resolution is 1920x1080 and your framerate is 29.97. - // Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately - // 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of - // approximately 220 Mbps. VC3 class also specifies the color bit depth of your - // output. + // Then Class 145 gives you an output with a bitrate of approximately 145 Mbps + // and Class 220 gives you and output with a bitrate of approximately 220 Mbps. + // VC3 class also specifies the color bit depth of your output. Vc3Class *string `locationName:"vc3Class" type:"string" enum:"Vc3Class"` } @@ -24935,14 +24608,14 @@ func (s *Vc3Settings) SetVc3Class(v string) *Vc3Settings { return s } -// Video codec settings, (CodecSettings) under (VideoDescription), contains -// the group of settings related to video encoding. The settings in this group -// vary depending on the value that you choose for Video codec (Codec). For -// each codec enum that you choose, define the corresponding settings object. -// The following lists the codec enum, settings object pairs. * AV1, Av1Settings -// * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, -// H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings -// * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings +// Video codec settings contains the group of settings related to video encoding. +// The settings in this group vary depending on the value that you choose for +// Video codec. For each codec enum that you choose, define the corresponding +// settings object. The following lists the codec enum, settings object pairs. +// * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings +// * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, +// ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * +// XAVC, XavcSettings type VideoCodecSettings struct { _ struct{} `type:"structure"` @@ -24964,39 +24637,31 @@ type VideoCodecSettings struct { // must be MXF or QuickTime MOV. Codec *string `locationName:"codec" type:"string" enum:"VideoCodec"` - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value FRAME_CAPTURE. + // Required when you set Codec to the value FRAME_CAPTURE. FrameCaptureSettings *FrameCaptureSettings `locationName:"frameCaptureSettings" type:"structure"` - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value H_264. + // Required when you set Codec to the value H_264. H264Settings *H264Settings `locationName:"h264Settings" type:"structure"` // Settings for H265 codec H265Settings *H265Settings `locationName:"h265Settings" type:"structure"` - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value MPEG2. + // Required when you set Codec to the value MPEG2. Mpeg2Settings *Mpeg2Settings `locationName:"mpeg2Settings" type:"structure"` - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value PRORES. + // Required when you set Codec to the value PRORES. ProresSettings *ProresSettings `locationName:"proresSettings" type:"structure"` - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value VC3 + // Required when you set Codec to the value VC3 Vc3Settings *Vc3Settings `locationName:"vc3Settings" type:"structure"` - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value VP8. + // Required when you set Codec to the value VP8. Vp8Settings *Vp8Settings `locationName:"vp8Settings" type:"structure"` - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value VP9. + // Required when you set Codec to the value VP9. Vp9Settings *Vp9Settings `locationName:"vp9Settings" type:"structure"` - // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to - // the value XAVC. + // Required when you set Codec to the value XAVC. XavcSettings *XavcSettings `locationName:"xavcSettings" type:"structure"` } @@ -25156,18 +24821,16 @@ func (s *VideoCodecSettings) SetXavcSettings(v *XavcSettings) *VideoCodecSetting } // Settings related to video encoding of your output. The specific video settings -// depend on the video codec that you choose. When you work directly in your -// JSON job specification, include one instance of Video description (VideoDescription) -// per output. +// depend on the video codec that you choose. type VideoDescription struct { _ struct{} `type:"structure"` // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert - // AFD signaling (AfdSignaling) to specify whether the service includes AFD - // values in the output video data and what those values are. * Choose None - // to remove all AFD values from this output. * Choose Fixed to ignore input - // AFD values and instead encode the value specified in the job. * Choose Auto - // to calculate output AFD values based on the input AFD scaler data. + // AFD signaling to specify whether the service includes AFD values in the output + // video data and what those values are. * Choose None to remove all AFD values + // from this output. * Choose Fixed to ignore input AFD values and instead encode + // the value specified in the job. * Choose Auto to calculate output AFD values + // based on the input AFD scaler data. AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"AfdSignaling"` // The anti-alias filter is automatically applied to all outputs. The service @@ -25175,34 +24838,33 @@ type VideoDescription struct { // your job, the service will ignore the setting. AntiAlias *string `locationName:"antiAlias" type:"string" enum:"AntiAlias"` - // Video codec settings, (CodecSettings) under (VideoDescription), contains - // the group of settings related to video encoding. The settings in this group - // vary depending on the value that you choose for Video codec (Codec). For - // each codec enum that you choose, define the corresponding settings object. - // The following lists the codec enum, settings object pairs. * AV1, Av1Settings - // * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, - // H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings - // * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings + // Video codec settings contains the group of settings related to video encoding. + // The settings in this group vary depending on the value that you choose for + // Video codec. For each codec enum that you choose, define the corresponding + // settings object. The following lists the codec enum, settings object pairs. + // * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings + // * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, + // ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * + // XAVC, XavcSettings CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"` - // Choose Insert (INSERT) for this setting to include color metadata in this - // output. Choose Ignore (IGNORE) to exclude color metadata from this output. - // If you don't specify a value, the service sets this to Insert by default. + // Choose Insert for this setting to include color metadata in this output. + // Choose Ignore to exclude color metadata from this output. If you don't specify + // a value, the service sets this to Insert by default. ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"ColorMetadata"` - // Use Cropping selection (crop) to specify the video area that the service - // will include in the output video frame. + // Use Cropping selection to specify the video area that the service will include + // in the output video frame. Crop *Rectangle `locationName:"crop" type:"structure"` // Applies only to 29.97 fps outputs. When this feature is enabled, the service // will use drop-frame timecode on outputs. If it is not possible to use drop-frame // timecode, the system will fall back to non-drop-frame. This setting is enabled - // by default when Timecode insertion (TimecodeInsertion) is enabled. + // by default when Timecode insertion is enabled. DropFrameTimecode *string `locationName:"dropFrameTimecode" type:"string" enum:"DropFrameTimecode"` - // Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use - // Fixed (FixedAfd) to specify a four-bit AFD value which the service will write - // on all frames of this video output. + // Applies only if you set AFD Signaling to Fixed. Use Fixed to specify a four-bit + // AFD value which the service will write on all frames of this video output. FixedAfd *int64 `locationName:"fixedAfd" type:"integer"` // Use Height to define the video resolution height, in pixels, for this output. @@ -25212,52 +24874,47 @@ type VideoDescription struct { // to 1280, your output will be 1280x720. Height *int64 `locationName:"height" min:"32" type:"integer"` - // Use Selection placement (position) to define the video area in your output - // frame. The area outside of the rectangle that you specify here is black. + // Use Selection placement to define the video area in your output frame. The + // area outside of the rectangle that you specify here is black. Position *Rectangle `locationName:"position" type:"structure"` - // Use Respond to AFD (RespondToAfd) to specify how the service changes the - // video itself in response to AFD values in the input. * Choose Respond to - // clip the input video frame according to the AFD value, input display aspect - // ratio, and output display aspect ratio. * Choose Passthrough to include the - // input AFD values. Do not choose this when AfdSignaling is set to (NONE). - // A preferred implementation of this workflow is to set RespondToAfd to (NONE) - // and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values - // from this output. + // Use Respond to AFD to specify how the service changes the video itself in + // response to AFD values in the input. * Choose Respond to clip the input video + // frame according to the AFD value, input display aspect ratio, and output + // display aspect ratio. * Choose Passthrough to include the input AFD values. + // Do not choose this when AfdSignaling is set to NONE. A preferred implementation + // of this workflow is to set RespondToAfd to and set AfdSignaling to AUTO. + // * Choose None to remove all input AFD values from this output. RespondToAfd *string `locationName:"respondToAfd" type:"string" enum:"RespondToAfd"` // Specify how the service handles outputs that have a different aspect ratio - // from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) - // to have the service stretch your video image to fit. Keep the setting Default - // (DEFAULT) to have the service letterbox your video instead. This setting - // overrides any value that you specify for the setting Selection placement - // (position) in this output. + // from the input aspect ratio. Choose Stretch to output to have the service + // stretch your video image to fit. Keep the setting Default to have the service + // letterbox your video instead. This setting overrides any value that you specify + // for the setting Selection placement in this output. ScalingBehavior *string `locationName:"scalingBehavior" type:"string" enum:"ScalingBehavior"` - // Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. - // This setting changes the width of the anti-alias filter kernel used for scaling. - // Sharpness only applies if your output resolution is different from your input - // resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended - // for most content. + // Use Sharpness setting to specify the strength of anti-aliasing. This setting + // changes the width of the anti-alias filter kernel used for scaling. Sharpness + // only applies if your output resolution is different from your input resolution. + // 0 is the softest setting, 100 the sharpest, and 50 recommended for most content. Sharpness *int64 `locationName:"sharpness" type:"integer"` // Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode // insertion when the input frame rate is identical to the output frame rate. - // To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) - // to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. - // When the service inserts timecodes in an output, by default, it uses any - // embedded timecodes from the input. If none are present, the service will - // set the timecode for the first output frame to zero. To change this default - // behavior, adjust the settings under Timecode configuration (TimecodeConfig). - // In the console, these settings are located under Job > Job settings > Timecode - // configuration. Note - Timecode source under input settings (InputTimecodeSource) - // does not affect the timecodes that are inserted in the output. Source under - // Job settings > Timecode configuration (TimecodeSource) does. + // To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. + // To leave them out, set it to DISABLED. Default is DISABLED. When the service + // inserts timecodes in an output, by default, it uses any embedded timecodes + // from the input. If none are present, the service will set the timecode for + // the first output frame to zero. To change this default behavior, adjust the + // settings under Timecode configuration. In the console, these settings are + // located under Job > Job settings > Timecode configuration. Note - Timecode + // source under input settings does not affect the timecodes that are inserted + // in the output. Source under Job settings > Timecode configuration does. TimecodeInsertion *string `locationName:"timecodeInsertion" type:"string" enum:"VideoTimecodeInsertion"` - // Find additional transcoding features under Preprocessors (VideoPreprocessors). - // Enable the features at each output individually. These features are disabled - // by default. + // Find additional transcoding features under Preprocessors. Enable the features + // at each output individually. These features are disabled by default. VideoPreprocessors *VideoPreprocessor `locationName:"videoPreprocessors" type:"structure"` // Use Width to define the video resolution width, in pixels, for this output. @@ -25453,9 +25110,8 @@ func (s *VideoDetail) SetWidthInPx(v int64) *VideoDetail { return s } -// Find additional transcoding features under Preprocessors (VideoPreprocessors). -// Enable the features at each output individually. These features are disabled -// by default. +// Find additional transcoding features under Preprocessors. Enable the features +// at each output individually. These features are disabled by default. type VideoPreprocessor struct { _ struct{} `type:"structure"` @@ -25473,9 +25129,9 @@ type VideoPreprocessor struct { // Enable HDR10+ analysis and metadata injection. Compatible with HEVC only. Hdr10Plus *Hdr10Plus `locationName:"hdr10Plus" type:"structure"` - // Enable the Image inserter (ImageInserter) feature to include a graphic overlay - // on your video. Enable or disable this feature for each output individually. - // This setting is disabled by default. + // Enable the Image inserter feature to include a graphic overlay on your video. + // Enable or disable this feature for each output individually. This setting + // is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` // Enable the Noise reducer feature to remove noise from your video output if @@ -25626,21 +25282,19 @@ type VideoSelector struct { ColorSpace *string `locationName:"colorSpace" type:"string" enum:"ColorSpace"` // There are two sources for color metadata, the input file and the job input - // settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). - // The Color space usage setting determines which takes precedence. Choose Force - // (FORCE) to use color metadata from the input job settings. If you don't specify - // values for those settings, the service defaults to using metadata from your - // input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the - // source when it is present. If there's no color metadata in your input file, - // the service defaults to using values you specify in the input settings. + // settings Color space and HDR master display information settings. The Color + // space usage setting determines which takes precedence. Choose Force to use + // color metadata from the input job settings. If you don't specify values for + // those settings, the service defaults to using metadata from your input. FALLBACK + // - Choose Fallback to use color metadata from the source when it is present. + // If there's no color metadata in your input file, the service defaults to + // using values you specify in the input settings. ColorSpaceUsage *string `locationName:"colorSpaceUsage" type:"string" enum:"ColorSpaceUsage"` - // Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) - // when your AVCHD input contains timecode tag data in the Modified Digital - // Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode - // source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode - // override blank, or set to None (NONE), when your input does not contain MDPM - // timecode. + // Set Embedded timecode override to Use MDPM when your AVCHD input contains + // timecode tag data in the Modified Digital Video Pack Metadata. When you do, + // we recommend you also set Timecode source to Embedded. Leave Embedded timecode + // override blank, or set to None, when your input does not contain MDPM timecode. EmbeddedTimecodeOverride *string `locationName:"embeddedTimecodeOverride" type:"string" enum:"EmbeddedTimecodeOverride"` // Use these settings to provide HDR 10 metadata that is missing or inaccurate @@ -25649,54 +25303,51 @@ type VideoSelector struct { // values during the HDR 10 mastering process. The valid range for each of these // settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color // coordinate. Related settings - When you specify these values, you must also - // set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the - // values you specify here take precedence over the values in the metadata of - // your input file, set Color space usage (ColorSpaceUsage). To specify whether - // color metadata is included in an output, set Color metadata (ColorMetadata). - // For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. + // set Color space to HDR 10. To specify whether the the values you specify + // here take precedence over the values in the metadata of your input file, + // set Color space usage. To specify whether color metadata is included in an + // output, set Color metadata. For more information about MediaConvert HDR jobs, + // see https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` // Use this setting if your input has video and audio durations that don't align, // and your output or player has strict alignment requirements. Examples: Input // audio track has a delayed start. Input video track ends before audio ends. - // When you set Pad video (padVideo) to Black (BLACK), MediaConvert generates - // black video frames so that output video and audio durations match. Black - // video frames are added at the beginning or end, depending on your input. - // To keep the default behavior and not generate black video, set Pad video - // to Disabled (DISABLED) or leave blank. + // When you set Pad video to Black, MediaConvert generates black video frames + // so that output video and audio durations match. Black video frames are added + // at the beginning or end, depending on your input. To keep the default behavior + // and not generate black video, set Pad video to Disabled or leave blank. PadVideo *string `locationName:"padVideo" type:"string" enum:"PadVideo"` - // Use PID (Pid) to select specific video data from an input file. Specify this - // value as an integer; the system automatically converts it to the hexidecimal - // value. For example, 257 selects PID 0x101. A PID, or packet identifier, is - // an identifier for a set of data in an MPEG-2 transport stream container. + // Use PID to select specific video data from an input file. Specify this value + // as an integer; the system automatically converts it to the hexidecimal value. + // For example, 257 selects PID 0x101. A PID, or packet identifier, is an identifier + // for a set of data in an MPEG-2 transport stream container. Pid *int64 `locationName:"pid" min:"1" type:"integer"` // Selects a specific program from within a multi-program transport stream. // Note that Quad 4K is not currently supported. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` - // Use Rotate (InputRotate) to specify how the service rotates your video. You - // can choose automatic rotation or specify a rotation. You can specify a clockwise - // rotation of 0, 90, 180, or 270 degrees. If your input video container is - // .mov or .mp4 and your input has rotation metadata, you can choose Automatic - // to have the service rotate your video according to the rotation specified - // in the metadata. The rotation must be within one degree of 90, 180, or 270 - // degrees. If the rotation metadata specifies any other rotation, the service - // will default to no rotation. By default, the service does no rotation, even - // if your input video has rotation metadata. The service doesn't pass through - // rotation metadata. + // Use Rotate to specify how the service rotates your video. You can choose + // automatic rotation or specify a rotation. You can specify a clockwise rotation + // of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 + // and your input has rotation metadata, you can choose Automatic to have the + // service rotate your video according to the rotation specified in the metadata. + // The rotation must be within one degree of 90, 180, or 270 degrees. If the + // rotation metadata specifies any other rotation, the service will default + // to no rotation. By default, the service does no rotation, even if your input + // video has rotation metadata. The service doesn't pass through rotation metadata. Rotate *string `locationName:"rotate" type:"string" enum:"InputRotate"` // If the sample range metadata in your input video is accurate, or if you don't - // know about sample range, keep the default value, Follow (FOLLOW), for this - // setting. When you do, the service automatically detects your input sample - // range. If your input video has metadata indicating the wrong sample range, - // specify the accurate sample range here. When you do, MediaConvert ignores - // any sample range information in the input metadata. Regardless of whether - // MediaConvert uses the input sample range or the sample range that you specify, - // MediaConvert uses the sample range for transcoding and also writes it to - // the output metadata. + // know about sample range, keep the default value, Follow, for this setting. + // When you do, the service automatically detects your input sample range. If + // your input video has metadata indicating the wrong sample range, specify + // the accurate sample range here. When you do, MediaConvert ignores any sample + // range information in the input metadata. Regardless of whether MediaConvert + // uses the input sample range or the sample range that you specify, MediaConvert + // uses the sample range for transcoding and also writes it to the output metadata. SampleRange *string `locationName:"sampleRange" type:"string" enum:"InputSampleRange"` } @@ -25870,8 +25521,7 @@ func (s *VorbisSettings) SetVbrQuality(v int64) *VorbisSettings { return s } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value VP8. +// Required when you set Codec to the value VP8. type Vp8Settings struct { _ struct{} `type:"structure"` @@ -25884,12 +25534,7 @@ type Vp8Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Custom, specify your frame rate as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vp8FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -25935,33 +25580,30 @@ type Vp8Settings struct { MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) - // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), - // uses the PAR from your input video for your output. To specify a different - // PAR in the console, choose any value other than Follow source. To specify - // a different PAR by editing the JSON job specification, choose SPECIFIED. - // When you choose SPECIFIED for this setting, you must also specify values - // for the parNumerator and parDenominator settings. + // for this output. The default behavior, Follow source, uses the PAR from your + // input video for your output. To specify a different PAR in the console, choose + // any value other than Follow source. When you choose SPECIFIED for this setting, + // you must also specify values for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"Vp8ParControl"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is + // 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you - // want to trade off encoding speed for output video quality. The default behavior - // is faster, lower quality, multi-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // multi-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Vp8QualityTuningLevel"` // With the VP8 codec, you can use only the variable bitrate (VBR) rate control @@ -26093,8 +25735,7 @@ func (s *Vp8Settings) SetRateControlMode(v string) *Vp8Settings { return s } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value VP9. +// Required when you set Codec to the value VP9. type Vp9Settings struct { _ struct{} `type:"structure"` @@ -26107,12 +25748,7 @@ type Vp9Settings struct { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose - // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate you specify in the settings FramerateNumerator and FramerateDenominator. + // Custom, specify your frame rate as a fraction. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vp9FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -26162,25 +25798,24 @@ type Vp9Settings struct { // input video. ParControl *string `locationName:"parControl" type:"string" enum:"Vp9ParControl"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parDenominator is 33. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parDenominator is + // 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the - // console, this corresponds to any value other than Follow source. When you - // specify an output pixel aspect ratio (PAR) that is different from your input - // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC - // widescreen, you would specify the ratio 40:33. In this example, the value - // for parNumerator is 40. + // Required when you set Pixel aspect ratio to SPECIFIED. On the console, this + // corresponds to any value other than Follow source. When you specify an output + // pixel aspect ratio (PAR) that is different from your input video PAR, provide + // your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would + // specify the ratio 40:33. In this example, the value for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you - // want to trade off encoding speed for output video quality. The default behavior - // is faster, lower quality, multi-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // multi-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Vp9QualityTuningLevel"` // With the VP9 codec, you can use only the variable bitrate (VBR) rate control @@ -26358,13 +25993,12 @@ func (s *WarningGroup) SetCount(v int64) *WarningGroup { return s } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value WAV. +// Required when you set Codec to the value WAV. type WavSettings struct { _ struct{} `type:"structure"` - // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding - // quality for this audio track. + // Specify Bit depth, in bits per sample, to choose the encoding quality for + // this audio track. BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` // Specify the number of channels in this output audio track. Valid values are @@ -26445,8 +26079,6 @@ func (s *WavSettings) SetSampleRate(v int64) *WavSettings { // captions in a file that is separate from the video container. Set up sidecar // captions in the same output group, but different output from your video. // For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. -// When you work directly in your JSON job specification, include this object -// and any required children when you set destinationType to WebVTT. type WebvttDestinationSettings struct { _ struct{} `type:"structure"` @@ -26463,14 +26095,14 @@ type WebvttDestinationSettings struct { Accessibility *string `locationName:"accessibility" type:"string" enum:"WebvttAccessibilitySubs"` // To use the available style, color, and position information from your input - // captions: Set Style passthrough (stylePassthrough) to Enabled (ENABLED). - // MediaConvert uses default settings when style and position information is - // missing from your input captions. To recreate the input captions exactly: - // Set Style passthrough to Strict (STRICT). MediaConvert automatically applies - // timing adjustments, including adjustments for frame rate conversion, ad avails, - // and input clipping. Your input captions format must be WebVTT. To ignore - // the style and position information from your input captions and use simplified - // output captions: Set Style passthrough to Disabled (DISABLED), or leave blank. + // captions: Set Style passthrough to Enabled. MediaConvert uses default settings + // when style and position information is missing from your input captions. + // To recreate the input captions exactly: Set Style passthrough to Strict. + // MediaConvert automatically applies timing adjustments, including adjustments + // for frame rate conversion, ad avails, and input clipping. Your input captions + // format must be WebVTT. To ignore the style and position information from + // your input captions and use simplified output captions: Set Style passthrough + // to Disabled, or leave blank. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"WebvttStylePassthrough"` } @@ -26561,8 +26193,7 @@ func (s *WebvttHlsSourceSettings) SetRenditionName(v string) *WebvttHlsSourceSet return s } -// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) -// to the value XAVC_4K_INTRA_CBG. +// Required when you set Profile to the value XAVC_4K_INTRA_CBG. type Xavc4kIntraCbgProfileSettings struct { _ struct{} `type:"structure"` @@ -26596,8 +26227,7 @@ func (s *Xavc4kIntraCbgProfileSettings) SetXavcClass(v string) *Xavc4kIntraCbgPr return s } -// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) -// to the value XAVC_4K_INTRA_VBR. +// Required when you set Profile to the value XAVC_4K_INTRA_VBR. type Xavc4kIntraVbrProfileSettings struct { _ struct{} `type:"structure"` @@ -26631,8 +26261,7 @@ func (s *Xavc4kIntraVbrProfileSettings) SetXavcClass(v string) *Xavc4kIntraVbrPr return s } -// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) -// to the value XAVC_4K. +// Required when you set Profile to the value XAVC_4K. type Xavc4kProfileSettings struct { _ struct{} `type:"structure"` @@ -26647,25 +26276,24 @@ type Xavc4kProfileSettings struct { CodecProfile *string `locationName:"codecProfile" type:"string" enum:"Xavc4kProfileCodecProfile"` // The best way to set up adaptive quantization is to keep the default value, - // Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). - // When you do so, MediaConvert automatically applies the best types of quantization - // for your video content. Include this setting in your JSON job specification - // only when you choose to change the default value for Adaptive quantization. - // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears - // as a visual flicker that can arise when the encoder saves bits by copying - // some macroblocks many times from frame to frame, and then refreshes them - // at the I-frame. When you enable this setting, the encoder updates these macroblocks - // slightly more often to smooth out the flicker. This setting is disabled by - // default. Related setting: In addition to enabling this setting, you must - // also set Adaptive quantization (adaptiveQuantization) to a value other than - // Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree - // of smoothing that Flicker adaptive quantization provides. + // Auto, for the setting Adaptive quantization. When you do so, MediaConvert + // automatically applies the best types of quantization for your video content. + // Include this setting in your JSON job specification only when you choose + // to change the default value for Adaptive quantization. Enable this setting + // to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker + // that can arise when the encoder saves bits by copying some macroblocks many + // times from frame to frame, and then refreshes them at the I-frame. When you + // enable this setting, the encoder updates these macroblocks slightly more + // often to smooth out the flicker. This setting is disabled by default. Related + // setting: In addition to enabling this setting, you must also set Adaptive + // quantization to a value other than Off or Auto. Use Adaptive quantization + // to adjust the degree of smoothing that Flicker adaptive quantization provides. FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"XavcFlickerAdaptiveQuantization"` // Specify whether the encoder uses B-frames as reference frames for other pictures - // in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames - // as reference frames. Choose Don't allow (DISABLED) to prevent the encoder - // from using B-frames as reference frames. + // in the same GOP. Choose Allow to allow the encoder to use B-frames as reference + // frames. Choose Don't allow to prevent the encoder from using B-frames as + // reference frames. GopBReference *string `locationName:"gopBReference" type:"string" enum:"XavcGopBReference"` // Frequency of closed GOPs. In streaming applications, it is recommended that @@ -26679,9 +26307,9 @@ type Xavc4kProfileSettings struct { // calculates the default by doubling the bitrate of this output point. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you - // want to trade off encoding speed for output video quality. The default behavior - // is faster, lower quality, single-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Xavc4kProfileQualityTuningLevel"` // Number of slices per picture. Must be less than or equal to the number of @@ -26769,8 +26397,7 @@ func (s *Xavc4kProfileSettings) SetSlices(v int64) *Xavc4kProfileSettings { return s } -// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) -// to the value XAVC_HD_INTRA_CBG. +// Required when you set Profile to the value XAVC_HD_INTRA_CBG. type XavcHdIntraCbgProfileSettings struct { _ struct{} `type:"structure"` @@ -26804,8 +26431,7 @@ func (s *XavcHdIntraCbgProfileSettings) SetXavcClass(v string) *XavcHdIntraCbgPr return s } -// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) -// to the value XAVC_HD. +// Required when you set Profile to the value XAVC_HD. type XavcHdProfileSettings struct { _ struct{} `type:"structure"` @@ -26815,25 +26441,24 @@ type XavcHdProfileSettings struct { BitrateClass *string `locationName:"bitrateClass" type:"string" enum:"XavcHdProfileBitrateClass"` // The best way to set up adaptive quantization is to keep the default value, - // Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). - // When you do so, MediaConvert automatically applies the best types of quantization - // for your video content. Include this setting in your JSON job specification - // only when you choose to change the default value for Adaptive quantization. - // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears - // as a visual flicker that can arise when the encoder saves bits by copying - // some macroblocks many times from frame to frame, and then refreshes them - // at the I-frame. When you enable this setting, the encoder updates these macroblocks - // slightly more often to smooth out the flicker. This setting is disabled by - // default. Related setting: In addition to enabling this setting, you must - // also set Adaptive quantization (adaptiveQuantization) to a value other than - // Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree - // of smoothing that Flicker adaptive quantization provides. + // Auto, for the setting Adaptive quantization. When you do so, MediaConvert + // automatically applies the best types of quantization for your video content. + // Include this setting in your JSON job specification only when you choose + // to change the default value for Adaptive quantization. Enable this setting + // to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker + // that can arise when the encoder saves bits by copying some macroblocks many + // times from frame to frame, and then refreshes them at the I-frame. When you + // enable this setting, the encoder updates these macroblocks slightly more + // often to smooth out the flicker. This setting is disabled by default. Related + // setting: In addition to enabling this setting, you must also set Adaptive + // quantization to a value other than Off or Auto. Use Adaptive quantization + // to adjust the degree of smoothing that Flicker adaptive quantization provides. FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"XavcFlickerAdaptiveQuantization"` // Specify whether the encoder uses B-frames as reference frames for other pictures - // in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames - // as reference frames. Choose Don't allow (DISABLED) to prevent the encoder - // from using B-frames as reference frames. + // in the same GOP. Choose Allow to allow the encoder to use B-frames as reference + // frames. Choose Don't allow to prevent the encoder from using B-frames as + // reference frames. GopBReference *string `locationName:"gopBReference" type:"string" enum:"XavcGopBReference"` // Frequency of closed GOPs. In streaming applications, it is recommended that @@ -26848,22 +26473,21 @@ type XavcHdProfileSettings struct { HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive - // (PROGRESSIVE) to create a progressive output, regardless of the scan type - // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) - // to create an output that's interlaced with the same field polarity throughout. - // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) - // to produce outputs with the same field polarity as the source. For jobs that - // have multiple inputs, the output field polarity might change over the course - // of the output. Follow behavior depends on the input scan type. If the source - // is interlaced, the output will be interlaced with the same polarity as the - // source. If the source is progressive, the output will be interlaced with - // top field bottom field first, depending on which of the Follow options you - // choose. + // to create a progressive output, regardless of the scan type of your input. + // Use Top field first or Bottom field first to create an output that's interlaced + // with the same field polarity throughout. Use Follow, default top or Follow, + // default bottom to produce outputs with the same field polarity as the source. + // For jobs that have multiple inputs, the output field polarity might change + // over the course of the output. Follow behavior depends on the input scan + // type. If the source is interlaced, the output will be interlaced with the + // same polarity as the source. If the source is progressive, the output will + // be interlaced with top field bottom field first, depending on which of the + // Follow options you choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"XavcInterlaceMode"` - // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you - // want to trade off encoding speed for output video quality. The default behavior - // is faster, lower quality, single-pass encoding. + // Optional. Use Quality tuning level to choose how you want to trade off encoding + // speed for output video quality. The default behavior is faster, lower quality, + // single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"XavcHdProfileQualityTuningLevel"` // Number of slices per picture. Must be less than or equal to the number of @@ -26873,8 +26497,7 @@ type XavcHdProfileSettings struct { // Ignore this setting unless you set Frame rate (framerateNumerator divided // by framerateDenominator) to 29.970. If your input framerate is 23.976, choose - // Hard (HARD). Otherwise, keep the default value None (NONE). For more information, - // see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html. + // Hard. Otherwise, keep the default value None. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html. Telecine *string `locationName:"telecine" type:"string" enum:"XavcHdProfileTelecine"` } @@ -26963,21 +26586,19 @@ func (s *XavcHdProfileSettings) SetTelecine(v string) *XavcHdProfileSettings { return s } -// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to -// the value XAVC. +// Required when you set Codec to the value XAVC. type XavcSettings struct { _ struct{} `type:"structure"` - // Keep the default value, Auto (AUTO), for this setting to have MediaConvert - // automatically apply the best types of quantization for your video content. - // When you want to apply your quantization settings manually, you must set - // Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO). - // Use this setting to specify the strength of any adaptive quantization filters - // that you enable. If you don't want MediaConvert to do any adaptive quantization - // in this transcode, set Adaptive quantization to Off (OFF). Related settings: - // The value that you choose here applies to the following settings: Flicker - // adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization - // (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). + // Keep the default value, Auto, for this setting to have MediaConvert automatically + // apply the best types of quantization for your video content. When you want + // to apply your quantization settings manually, you must set Adaptive quantization + // to a value other than Auto. Use this setting to specify the strength of any + // adaptive quantization filters that you enable. If you don't want MediaConvert + // to do any adaptive quantization in this transcode, set Adaptive quantization + // to Off. Related settings: The value that you choose here applies to the following + // settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial + // adaptive quantization, and Temporal adaptive quantization. AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"XavcAdaptiveQuantization"` // Optional. Choose a specific entropy encoding mode only when you want to override @@ -26989,12 +26610,7 @@ type XavcSettings struct { // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list. The framerates shown in the dropdown - // list are decimal approximations of fractions. If you are creating your transcoding - // job specification as a JSON file without the console, use FramerateControl - // to specify which value the service uses for the frame rate for this output. - // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate - // from the input. Choose SPECIFIED if you want the service to use the frame - // rate that you specify in the settings FramerateNumerator and FramerateDenominator. + // list are decimal approximations of fractions. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"XavcFramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing @@ -27036,83 +26652,74 @@ type XavcSettings struct { // second (fps). Enable slow PAL to create a 25 fps output by relabeling the // video frames and resampling your audio. Note that enabling this setting will // slightly reduce the duration of your video. Related settings: You must also - // set Frame rate to 25. In your JSON job specification, set (framerateControl) - // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to - // 1. + // set Frame rate to 25. SlowPal *string `locationName:"slowPal" type:"string" enum:"XavcSlowPal"` // Ignore this setting unless your downstream workflow requires that you specify // it explicitly. Otherwise, we recommend that you adjust the softness of your - // output by using a lower value for the setting Sharpness (sharpness) or by - // enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) - // setting specifies the quantization matrices that the encoder uses. Keep the - // default value, 0, for flat quantization. Choose the value 1 or 16 to use - // the default JVT softening quantization matricies from the H.264 specification. - // Choose a value from 17 to 128 to use planar interpolation. Increasing values - // from 17 to 128 result in increasing reduction of high-frequency data. The - // value 128 results in the softest video. + // output by using a lower value for the setting Sharpness or by enabling a + // noise reducer filter. The Softness setting specifies the quantization matrices + // that the encoder uses. Keep the default value, 0, for flat quantization. + // Choose the value 1 or 16 to use the default JVT softening quantization matricies + // from the H.264 specification. Choose a value from 17 to 128 to use planar + // interpolation. Increasing values from 17 to 128 result in increasing reduction + // of high-frequency data. The value 128 results in the softest video. Softness *int64 `locationName:"softness" type:"integer"` // The best way to set up adaptive quantization is to keep the default value, - // Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). - // When you do so, MediaConvert automatically applies the best types of quantization - // for your video content. Include this setting in your JSON job specification - // only when you choose to change the default value for Adaptive quantization. - // For this setting, keep the default value, Enabled (ENABLED), to adjust quantization - // within each frame based on spatial variation of content complexity. When - // you enable this feature, the encoder uses fewer bits on areas that can sustain - // more distortion with no noticeable visual degradation and uses more bits - // on areas where any small distortion will be noticeable. For example, complex - // textured blocks are encoded with fewer bits and smooth textured blocks are - // encoded with more bits. Enabling this feature will almost always improve - // your video quality. Note, though, that this feature doesn't take into account - // where the viewer's attention is likely to be. If viewers are likely to be - // focusing their attention on a part of the screen with a lot of complex texture, - // you might choose to disable this feature. Related setting: When you enable - // spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) - // depending on your content. For homogeneous content, such as cartoons and - // video games, set it to Low. For content with a wider variety of textures, - // set it to High or Higher. + // Auto, for the setting Adaptive quantization. When you do so, MediaConvert + // automatically applies the best types of quantization for your video content. + // Include this setting in your JSON job specification only when you choose + // to change the default value for Adaptive quantization. For this setting, + // keep the default value, Enabled, to adjust quantization within each frame + // based on spatial variation of content complexity. When you enable this feature, + // the encoder uses fewer bits on areas that can sustain more distortion with + // no noticeable visual degradation and uses more bits on areas where any small + // distortion will be noticeable. For example, complex textured blocks are encoded + // with fewer bits and smooth textured blocks are encoded with more bits. Enabling + // this feature will almost always improve your video quality. Note, though, + // that this feature doesn't take into account where the viewer's attention + // is likely to be. If viewers are likely to be focusing their attention on + // a part of the screen with a lot of complex texture, you might choose to disable + // this feature. Related setting: When you enable spatial adaptive quantization, + // set the value for Adaptive quantization depending on your content. For homogeneous + // content, such as cartoons and video games, set it to Low. For content with + // a wider variety of textures, set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"XavcSpatialAdaptiveQuantization"` // The best way to set up adaptive quantization is to keep the default value, - // Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). - // When you do so, MediaConvert automatically applies the best types of quantization - // for your video content. Include this setting in your JSON job specification - // only when you choose to change the default value for Adaptive quantization. - // For this setting, keep the default value, Enabled (ENABLED), to adjust quantization - // within each frame based on temporal variation of content complexity. When - // you enable this feature, the encoder uses fewer bits on areas of the frame - // that aren't moving and uses more bits on complex objects with sharp edges - // that move a lot. For example, this feature improves the readability of text - // tickers on newscasts and scoreboards on sports matches. Enabling this feature - // will almost always improve your video quality. Note, though, that this feature - // doesn't take into account where the viewer's attention is likely to be. If - // viewers are likely to be focusing their attention on a part of the screen - // that doesn't have moving objects with sharp edges, such as sports athletes' - // faces, you might choose to disable this feature. Related setting: When you - // enable temporal adaptive quantization, adjust the strength of the filter - // with the setting Adaptive quantization (adaptiveQuantization). + // Auto, for the setting Adaptive quantization. When you do so, MediaConvert + // automatically applies the best types of quantization for your video content. + // Include this setting in your JSON job specification only when you choose + // to change the default value for Adaptive quantization. For this setting, + // keep the default value, Enabled, to adjust quantization within each frame + // based on temporal variation of content complexity. When you enable this feature, + // the encoder uses fewer bits on areas of the frame that aren't moving and + // uses more bits on complex objects with sharp edges that move a lot. For example, + // this feature improves the readability of text tickers on newscasts and scoreboards + // on sports matches. Enabling this feature will almost always improve your + // video quality. Note, though, that this feature doesn't take into account + // where the viewer's attention is likely to be. If viewers are likely to be + // focusing their attention on a part of the screen that doesn't have moving + // objects with sharp edges, such as sports athletes' faces, you might choose + // to disable this feature. Related setting: When you enable temporal adaptive + // quantization, adjust the strength of the filter with the setting Adaptive + // quantization. TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"XavcTemporalAdaptiveQuantization"` - // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) - // to the value XAVC_4K_INTRA_CBG. + // Required when you set Profile to the value XAVC_4K_INTRA_CBG. Xavc4kIntraCbgProfileSettings *Xavc4kIntraCbgProfileSettings `locationName:"xavc4kIntraCbgProfileSettings" type:"structure"` - // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) - // to the value XAVC_4K_INTRA_VBR. + // Required when you set Profile to the value XAVC_4K_INTRA_VBR. Xavc4kIntraVbrProfileSettings *Xavc4kIntraVbrProfileSettings `locationName:"xavc4kIntraVbrProfileSettings" type:"structure"` - // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) - // to the value XAVC_4K. + // Required when you set Profile to the value XAVC_4K. Xavc4kProfileSettings *Xavc4kProfileSettings `locationName:"xavc4kProfileSettings" type:"structure"` - // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) - // to the value XAVC_HD_INTRA_CBG. + // Required when you set Profile to the value XAVC_HD_INTRA_CBG. XavcHdIntraCbgProfileSettings *XavcHdIntraCbgProfileSettings `locationName:"xavcHdIntraCbgProfileSettings" type:"structure"` - // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) - // to the value XAVC_HD. + // Required when you set Profile to the value XAVC_HD. XavcHdProfileSettings *XavcHdProfileSettings `locationName:"xavcHdProfileSettings" type:"structure"` } @@ -27487,9 +27094,9 @@ func Ac3CodingMode_Values() []string { // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert -// ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). -// For information about the Dolby Digital DRC operating modes and profiles, -// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. +// ignores any value you provide for Dynamic range compression profile. For +// information about the Dolby Digital DRC operating modes and profiles, see +// the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Ac3DynamicRangeCompressionLineFilmStandard is a Ac3DynamicRangeCompressionLine enum value Ac3DynamicRangeCompressionLineFilmStandard = "FILM_STANDARD" @@ -27524,14 +27131,13 @@ func Ac3DynamicRangeCompressionLine_Values() []string { // When you want to add Dolby dynamic range compression (DRC) signaling to your // output stream, we recommend that you use the mode-specific settings instead -// of Dynamic range compression profile (DynamicRangeCompressionProfile). The -// mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) -// and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). -// Note that when you specify values for all three settings, MediaConvert ignores -// the value of this setting in favor of the mode-specific settings. If you -// do use this setting instead of the mode-specific settings, choose None (NONE) -// to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) -// to set the profile to Dolby's film standard profile for all operating modes. +// of Dynamic range compression profile. The mode-specific settings are Dynamic +// range compression profile, line mode and Dynamic range compression profile, +// RF mode. Note that when you specify values for all three settings, MediaConvert +// ignores the value of this setting in favor of the mode-specific settings. +// If you do use this setting instead of the mode-specific settings, choose +// None to leave out DRC signaling. Keep the default Film standard to set the +// profile to Dolby's film standard profile for all operating modes. const ( // Ac3DynamicRangeCompressionProfileFilmStandard is a Ac3DynamicRangeCompressionProfile enum value Ac3DynamicRangeCompressionProfileFilmStandard = "FILM_STANDARD" @@ -27551,9 +27157,9 @@ func Ac3DynamicRangeCompressionProfile_Values() []string { // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any -// value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). -// For information about the Dolby Digital DRC operating modes and profiles, -// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. +// value you provide for Dynamic range compression profile. For information +// about the Dolby Digital DRC operating modes and profiles, see the Dynamic +// Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Ac3DynamicRangeCompressionRfFilmStandard is a Ac3DynamicRangeCompressionRf enum value Ac3DynamicRangeCompressionRfFilmStandard = "FILM_STANDARD" @@ -27760,11 +27366,11 @@ func AdvancedInputFilterSharpen_Values() []string { } // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert -// AFD signaling (AfdSignaling) to specify whether the service includes AFD -// values in the output video data and what those values are. * Choose None -// to remove all AFD values from this output. * Choose Fixed to ignore input -// AFD values and instead encode the value specified in the job. * Choose Auto -// to calculate output AFD values based on the input AFD scaler data. +// AFD signaling to specify whether the service includes AFD values in the output +// video data and what those values are. * Choose None to remove all AFD values +// from this output. * Choose Fixed to ignore input AFD values and instead encode +// the value specified in the job. * Choose Auto to calculate output AFD values +// based on the input AFD scaler data. const ( // AfdSignalingNone is a AfdSignaling enum value AfdSignalingNone = "NONE" @@ -27808,10 +27414,10 @@ func AlphaBehavior_Values() []string { } // Specify whether this set of input captions appears in your outputs in both -// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes -// the captions data in two ways: it passes the 608 data through using the 608 -// compatibility bytes fields of the 708 wrapper, and it also translates the -// 608 data into 708. +// 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions +// data in two ways: it passes the 608 data through using the 608 compatibility +// bytes fields of the 708 wrapper, and it also translates the 608 data into +// 708. const ( // AncillaryConvert608To708Upconvert is a AncillaryConvert608To708 enum value AncillaryConvert608To708Upconvert = "UPCONVERT" @@ -27938,9 +27544,9 @@ func AudioChannelTag_Values() []string { } // Choose the audio codec for this output. Note that the option Dolby Digital -// passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital -// Plus audio inputs. Make sure that you choose a codec that's supported with -// your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio +// passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. +// Make sure that you choose a codec that's supported with your output container: +// https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio // For audio-only outputs, make sure that both your input audio codec and your // output audio codec are supported for audio-only workflows. For more information, // see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only @@ -28055,11 +27661,11 @@ func AudioDurationCorrection_Values() []string { } // Specify which source for language code takes precedence for this audio track. -// When you choose Follow input (FOLLOW_INPUT), the service uses the language -// code from the input track if it's present. If there's no languge code on -// the input track, the service uses the code that you specify in the setting -// Language code (languageCode or customLanguageCode). When you choose Use configured -// (USE_CONFIGURED), the service uses the language code that you specify. +// When you choose Follow input, the service uses the language code from the +// input track if it's present. If there's no languge code on the input track, +// the service uses the code that you specify in the setting Language code. +// When you choose Use configured, the service uses the language code that you +// specify. const ( // AudioLanguageCodeControlFollowInput is a AudioLanguageCodeControl enum value AudioLanguageCodeControlFollowInput = "FOLLOW_INPUT" @@ -28210,7 +27816,7 @@ func AudioTypeControl_Values() []string { } // Specify the strength of any adaptive quantization filters that you enable. -// The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization). +// The value that you choose here applies to Spatial adaptive quantization. const ( // Av1AdaptiveQuantizationOff is a Av1AdaptiveQuantization enum value Av1AdaptiveQuantizationOff = "OFF" @@ -28243,8 +27849,7 @@ func Av1AdaptiveQuantization_Values() []string { } } -// Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit -// (BIT_10). +// Specify the Bit depth. You can choose 8-bit or 10-bit. const ( // Av1BitDepthBit8 is a Av1BitDepth enum value Av1BitDepthBit8 = "BIT_8" @@ -28261,17 +27866,12 @@ func Av1BitDepth_Values() []string { } } -// If you are using the console, use the Framerate setting to specify the frame -// rate for this output. If you want to keep the same frame rate as the input -// video, choose Follow source. If you want to do frame rate conversion, choose -// a frame rate from the dropdown list or choose Custom. The framerates shown -// in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Use the Framerate setting to specify the frame rate for this output. If you +// want to keep the same frame rate as the input video, choose Follow source. +// If you want to do frame rate conversion, choose a frame rate from the dropdown +// list or choose Custom. The framerates shown in the dropdown list are decimal +// approximations of fractions. If you choose Custom, specify your frame rate +// as a fraction. const ( // Av1FramerateControlInitializeFromSource is a Av1FramerateControl enum value Av1FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -28333,21 +27933,20 @@ func Av1RateControlMode_Values() []string { } } -// Keep the default value, Enabled (ENABLED), to adjust quantization within -// each frame based on spatial variation of content complexity. When you enable -// this feature, the encoder uses fewer bits on areas that can sustain more -// distortion with no noticeable visual degradation and uses more bits on areas -// where any small distortion will be noticeable. For example, complex textured -// blocks are encoded with fewer bits and smooth textured blocks are encoded -// with more bits. Enabling this feature will almost always improve your video -// quality. Note, though, that this feature doesn't take into account where -// the viewer's attention is likely to be. If viewers are likely to be focusing -// their attention on a part of the screen with a lot of complex texture, you -// might choose to disable this feature. Related setting: When you enable spatial -// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) -// depending on your content. For homogeneous content, such as cartoons and -// video games, set it to Low. For content with a wider variety of textures, -// set it to High or Higher. +// Keep the default value, Enabled, to adjust quantization within each frame +// based on spatial variation of content complexity. When you enable this feature, +// the encoder uses fewer bits on areas that can sustain more distortion with +// no noticeable visual degradation and uses more bits on areas where any small +// distortion will be noticeable. For example, complex textured blocks are encoded +// with fewer bits and smooth textured blocks are encoded with more bits. Enabling +// this feature will almost always improve your video quality. Note, though, +// that this feature doesn't take into account where the viewer's attention +// is likely to be. If viewers are likely to be focusing their attention on +// a part of the screen with a lot of complex texture, you might choose to disable +// this feature. Related setting: When you enable spatial adaptive quantization, +// set the value for Adaptive quantization depending on your content. For homogeneous +// content, such as cartoons and video games, set it to Low. For content with +// a wider variety of textures, set it to High or Higher. const ( // Av1SpatialAdaptiveQuantizationDisabled is a Av1SpatialAdaptiveQuantization enum value Av1SpatialAdaptiveQuantizationDisabled = "DISABLED" @@ -28398,12 +27997,7 @@ func AvcIntraClass_Values() []string { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Custom, specify your frame rate as a fraction. const ( // AvcIntraFramerateControlInitializeFromSource is a AvcIntraFramerateControl enum value AvcIntraFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -28452,17 +28046,16 @@ func AvcIntraFramerateConversionAlgorithm_Values() []string { } // Choose the scan line type for the output. Keep the default value, Progressive -// (PROGRESSIVE) to create a progressive output, regardless of the scan type -// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) -// to create an output that's interlaced with the same field polarity throughout. -// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) -// to produce outputs with the same field polarity as the source. For jobs that -// have multiple inputs, the output field polarity might change over the course -// of the output. Follow behavior depends on the input scan type. If the source -// is interlaced, the output will be interlaced with the same polarity as the -// source. If the source is progressive, the output will be interlaced with -// top field bottom field first, depending on which of the Follow options you -// choose. +// to create a progressive output, regardless of the scan type of your input. +// Use Top field first or Bottom field first to create an output that's interlaced +// with the same field polarity throughout. Use Follow, default top or Follow, +// default bottom to produce outputs with the same field polarity as the source. +// For jobs that have multiple inputs, the output field polarity might change +// over the course of the output. Follow behavior depends on the input scan +// type. If the source is interlaced, the output will be interlaced with the +// same polarity as the source. If the source is progressive, the output will +// be interlaced with top field bottom field first, depending on which of the +// Follow options you choose. const ( // AvcIntraInterlaceModeProgressive is a AvcIntraInterlaceMode enum value AvcIntraInterlaceModeProgressive = "PROGRESSIVE" @@ -28493,17 +28086,16 @@ func AvcIntraInterlaceMode_Values() []string { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing -// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this -// case, each progressive frame from the input corresponds to an interlaced -// field in the output. Keep the default value, Basic interlacing (INTERLACED), -// for all other output frame rates. With basic interlacing, MediaConvert performs -// any frame rate conversion first and then interlaces the frames. When you -// choose Optimized interlacing and you set your output frame rate to a value -// that isn't suitable for optimized interlacing, MediaConvert automatically -// falls back to basic interlacing. Required settings: To use optimized interlacing, -// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't -// use optimized interlacing for hard telecine outputs. You must also set Interlace -// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). +// to create a better quality interlaced output. In this case, each progressive +// frame from the input corresponds to an interlaced field in the output. Keep +// the default value, Basic interlacing, for all other output frame rates. With +// basic interlacing, MediaConvert performs any frame rate conversion first +// and then interlaces the frames. When you choose Optimized interlacing and +// you set your output frame rate to a value that isn't suitable for optimized +// interlacing, MediaConvert automatically falls back to basic interlacing. +// Required settings: To use optimized interlacing, you must set Telecine to +// None or Soft. You can't use optimized interlacing for hard telecine outputs. +// You must also set Interlace mode to a value other than Progressive. const ( // AvcIntraScanTypeConversionModeInterlaced is a AvcIntraScanTypeConversionMode enum value AvcIntraScanTypeConversionModeInterlaced = "INTERLACED" @@ -28525,9 +28117,7 @@ func AvcIntraScanTypeConversionMode_Values() []string { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: -// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) -// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to -// 1. +// You must also set Framerate to 25. const ( // AvcIntraSlowPalDisabled is a AvcIntraSlowPal enum value AvcIntraSlowPalDisabled = "DISABLED" @@ -28546,10 +28136,9 @@ func AvcIntraSlowPal_Values() []string { // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable -// hard telecine (HARD) to create a smoother picture. When you keep the default -// value, None (NONE), MediaConvert does a standard frame rate conversion to -// 29.97 without doing anything with the field polarity to create a smoother -// picture. +// hard telecine to create a smoother picture. When you keep the default value, +// None, MediaConvert does a standard frame rate conversion to 29.97 without +// doing anything with the field polarity to create a smoother picture. const ( // AvcIntraTelecineNone is a AvcIntraTelecine enum value AvcIntraTelecineNone = "NONE" @@ -28566,12 +28155,12 @@ func AvcIntraTelecine_Values() []string { } } -// Optional. Use Quality tuning level (qualityTuningLevel) to choose how many -// transcoding passes MediaConvert does with your video. When you choose Multi-pass -// (MULTI_PASS), your video quality is better and your output bitrate is more -// accurate. That is, the actual bitrate of your output is closer to the target -// bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), -// your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS). +// Optional. Use Quality tuning level to choose how many transcoding passes +// MediaConvert does with your video. When you choose Multi-pass, your video +// quality is better and your output bitrate is more accurate. That is, the +// actual bitrate of your output is closer to the target bitrate defined in +// the specification. When you choose Single-pass, your encoding time is faster. +// The default behavior is Single-pass. const ( // AvcIntraUhdQualityTuningLevelSinglePass is a AvcIntraUhdQualityTuningLevel enum value AvcIntraUhdQualityTuningLevelSinglePass = "SINGLE_PASS" @@ -28677,15 +28266,14 @@ func BillingTagsSource_Values() []string { } } -// Set Style passthrough (StylePassthrough) to ENABLED to use the available -// style, color, and position information from your input captions. MediaConvert -// uses default settings for any missing style and position information in your -// input captions. Set Style passthrough to DISABLED, or leave blank, to ignore -// the style and position information from your input captions and use default -// settings: white text with black outlining, bottom-center positioning, and -// automatic sizing. Whether you set Style passthrough to enabled or not, you -// can also choose to manually override any of the individual style and position -// settings. +// Set Style passthrough to ENABLED to use the available style, color, and position +// information from your input captions. MediaConvert uses default settings +// for any missing style and position information in your input captions. Set +// Style passthrough to DISABLED, or leave blank, to ignore the style and position +// information from your input captions and use default settings: white text +// with black outlining, bottom-center positioning, and automatic sizing. Whether +// you set Style passthrough to enabled or not, you can also choose to manually +// override any of the individual style and position settings. const ( // BurnInSubtitleStylePassthroughEnabled is a BurnInSubtitleStylePassthrough enum value BurnInSubtitleStylePassthroughEnabled = "ENABLED" @@ -28728,15 +28316,14 @@ func BurninSubtitleAlignment_Values() []string { } } -// Ignore this setting unless Style passthrough (StylePassthrough) is set to -// Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, -// or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. -// When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font -// color setting only applies to white text in your input captions. For example, -// if your font color setting is Yellow, and your input captions have red and -// white text, your output captions will have red and yellow text. When you -// choose ALL_TEXT, your font color setting applies to all of your output captions -// text. +// Ignore this setting unless Style passthrough is set to Enabled and Font color +// set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for +// additional font color controls. When you choose White text only, or leave +// blank, your font color setting only applies to white text in your input captions. +// For example, if your font color setting is Yellow, and your input captions +// have red and white text, your output captions will have red and yellow text. +// When you choose ALL_TEXT, your font color setting applies to all of your +// output captions text. const ( // BurninSubtitleApplyFontColorWhiteTextOnly is a BurninSubtitleApplyFontColor enum value BurninSubtitleApplyFontColorWhiteTextOnly = "WHITE_TEXT_ONLY" @@ -28754,9 +28341,8 @@ func BurninSubtitleApplyFontColor_Values() []string { } // Specify the color of the rectangle behind the captions. Leave background -// color (BackgroundColor) blank and set Style passthrough (StylePassthrough) -// to enabled to use the background color data from your input captions, if -// present. +// color blank and set Style passthrough to enabled to use the background color +// data from your input captions, if present. const ( // BurninSubtitleBackgroundColorNone is a BurninSubtitleBackgroundColor enum value BurninSubtitleBackgroundColorNone = "NONE" @@ -28783,12 +28369,12 @@ func BurninSubtitleBackgroundColor_Values() []string { // Specify the font that you want the service to use for your burn in captions // when your input captions specify a font that MediaConvert doesn't support. -// When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or -// leave blank, MediaConvert uses a supported font that most closely matches -// the font that your input captions specify. When there are multiple unsupported -// fonts in your input captions, MediaConvert matches each font with the supported -// font that matches best. When you explicitly choose a replacement font, MediaConvert -// uses that font to replace all unsupported fonts from your input. +// When you set Fallback font to best match, or leave blank, MediaConvert uses +// a supported font that most closely matches the font that your input captions +// specify. When there are multiple unsupported fonts in your input captions, +// MediaConvert matches each font with the supported font that matches best. +// When you explicitly choose a replacement font, MediaConvert uses that font +// to replace all unsupported fonts from your input. const ( // BurninSubtitleFallbackFontBestMatch is a BurninSubtitleFallbackFont enum value BurninSubtitleFallbackFontBestMatch = "BEST_MATCH" @@ -28817,9 +28403,9 @@ func BurninSubtitleFallbackFont_Values() []string { } } -// Specify the color of the burned-in captions text. Leave Font color (FontColor) -// blank and set Style passthrough (StylePassthrough) to enabled to use the -// font color data from your input captions, if present. +// Specify the color of the burned-in captions text. Leave Font color blank +// and set Style passthrough to enabled to use the font color data from your +// input captions, if present. const ( // BurninSubtitleFontColorWhite is a BurninSubtitleFontColor enum value BurninSubtitleFontColorWhite = "WHITE" @@ -28860,9 +28446,9 @@ func BurninSubtitleFontColor_Values() []string { } } -// Specify font outline color. Leave Outline color (OutlineColor) blank and -// set Style passthrough (StylePassthrough) to enabled to use the font outline -// color data from your input captions, if present. +// Specify font outline color. Leave Outline color blank and set Style passthrough +// to enabled to use the font outline color data from your input captions, if +// present. const ( // BurninSubtitleOutlineColorBlack is a BurninSubtitleOutlineColor enum value BurninSubtitleOutlineColorBlack = "BLACK" @@ -28900,8 +28486,8 @@ func BurninSubtitleOutlineColor_Values() []string { } // Specify the color of the shadow cast by the captions. Leave Shadow color -// (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled -// to use the shadow color data from your input captions, if present. +// blank and set Style passthrough to enabled to use the shadow color data from +// your input captions, if present. const ( // BurninSubtitleShadowColorNone is a BurninSubtitleShadowColor enum value BurninSubtitleShadowColorNone = "NONE" @@ -28926,11 +28512,10 @@ func BurninSubtitleShadowColor_Values() []string { } } -// Specify whether the text spacing (TeletextSpacing) in your captions is set -// by the captions grid, or varies depending on letter width. Choose fixed grid -// (FIXED_GRID) to conform to the spacing specified in the captions file more -// accurately. Choose proportional (PROPORTIONAL) to make the text easier to -// read for closed captions. +// Specify whether the text spacing in your captions is set by the captions +// grid, or varies depending on letter width. Choose fixed grid to conform to +// the spacing specified in the captions file more accurately. Choose proportional +// to make the text easier to read for closed captions. const ( // BurninSubtitleTeletextSpacingFixedGrid is a BurninSubtitleTeletextSpacing enum value BurninSubtitleTeletextSpacingFixedGrid = "FIXED_GRID" @@ -28956,9 +28541,8 @@ func BurninSubtitleTeletextSpacing_Values() []string { // constrains your choice of output captions format. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. // If you are using SCTE-20 and you want to create an output that complies with -// the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To -// create a non-compliant output where the embedded captions come first, choose -// Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20). +// the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant +// output where the embedded captions come first, choose Embedded plus SCTE-20. const ( // CaptionDestinationTypeBurnIn is a CaptionDestinationType enum value CaptionDestinationTypeBurnIn = "BURN_IN" @@ -29035,8 +28619,8 @@ func CaptionSourceConvertPaintOnToPopOn_Values() []string { } } -// Use Source (SourceType) to identify the format of your input captions. The -// service cannot auto-detect caption format. +// Use Source to identify the format of your input captions. The service cannot +// auto-detect caption format. const ( // CaptionSourceTypeAncillary is a CaptionSourceType enum value CaptionSourceTypeAncillary = "ANCILLARY" @@ -29102,9 +28686,8 @@ func CaptionSourceType_Values() []string { } // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no -// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching -// in your video distribution set up. For example, use the Cache-Control http -// header. +// tag. Otherwise, keep the default value Enabled and control caching in your +// video distribution set up. For example, use the Cache-Control http header. const ( // CmafClientCacheDisabled is a CmafClientCache enum value CmafClientCacheDisabled = "DISABLED" @@ -29140,7 +28723,7 @@ func CmafCodecSpecification_Values() []string { } // Specify the encryption scheme that you want the service to use when encrypting -// your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). +// your CMAF segments. Choose AES-CBC subsample or AES_CTR. const ( // CmafEncryptionTypeSampleAes is a CmafEncryptionType enum value CmafEncryptionTypeSampleAes = "SAMPLE_AES" @@ -29158,16 +28741,15 @@ func CmafEncryptionType_Values() []string { } // Specify whether MediaConvert generates images for trick play. Keep the default -// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) -// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) -// to generate tiled thumbnails and full-resolution images of single frames. -// When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates -// a child manifest for each set of images that you generate and adds corresponding -// entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), -// MediaConvert adds an entry in the .mpd manifest for each set of images that -// you generate. A common application for these images is Roku trick mode. The -// thumbnails and full-frame images that MediaConvert creates with this feature -// are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md +// value, None, to not generate any images. Choose Thumbnail to generate tiled +// thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails +// and full-resolution images of single frames. When you enable Write HLS manifest, +// MediaConvert creates a child manifest for each set of images that you generate +// and adds corresponding entries to the parent manifest. When you enable Write +// DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set +// of images that you generate. A common application for these images is Roku +// trick mode. The thumbnails and full-frame images that MediaConvert creates +// with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md const ( // CmafImageBasedTrickPlayNone is a CmafImageBasedTrickPlay enum value CmafImageBasedTrickPlayNone = "NONE" @@ -29306,11 +28888,10 @@ func CmafMpdManifestBandwidthType_Values() []string { } // Specify whether your DASH profile is on-demand or main. When you choose Main -// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 -// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), -// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. -// When you choose On-demand, you must also set the output group setting Segment -// control (SegmentControl) to Single file (SINGLE_FILE). +// profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your +// .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 +// in your .mpd. When you choose On-demand, you must also set the output group +// setting Segment control to Single file. const ( // CmafMpdProfileMainProfile is a CmafMpdProfile enum value CmafMpdProfileMainProfile = "MAIN_PROFILE" @@ -29330,12 +28911,12 @@ func CmafMpdProfile_Values() []string { // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time -// stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) -// when you want MediaConvert to use the initial PTS as the first time stamp -// in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore -// the initial PTS in the video stream and instead write the initial time stamp -// as zero in the manifest. For outputs that don't have B-frames, the time stamps -// in your DASH manifests start at zero regardless of your choice here. +// stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert +// to use the initial PTS as the first time stamp in the manifest. Choose Zero-based +// to have MediaConvert ignore the initial PTS in the video stream and instead +// write the initial time stamp as zero in the manifest. For outputs that don't +// have B-frames, the time stamps in your DASH manifests start at zero regardless +// of your choice here. const ( // CmafPtsOffsetHandlingForBFramesZeroBased is a CmafPtsOffsetHandlingForBFrames enum value CmafPtsOffsetHandlingForBFramesZeroBased = "ZERO_BASED" @@ -29372,10 +28953,10 @@ func CmafSegmentControl_Values() []string { } // Specify how you want MediaConvert to determine the segment length. Choose -// Exact (EXACT) to have the encoder use the exact length that you specify with -// the setting Segment length (SegmentLength). This might result in extra I-frames. -// Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment -// lengths to match the next GOP boundary. +// Exact to have the encoder use the exact length that you specify with the +// setting Segment length. This might result in extra I-frames. Choose Multiple +// of GOP to have the encoder round up the segment lengths to match the next +// GOP boundary. const ( // CmafSegmentLengthControlExact is a CmafSegmentLengthControl enum value CmafSegmentLengthControlExact = "EXACT" @@ -29491,12 +29072,12 @@ func CmafWriteHLSManifest_Values() []string { } } -// When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), -// your DASH manifest shows precise segment durations. The segment duration -// information appears inside the SegmentTimeline element, inside SegmentTemplate -// at the Representation level. When this feature isn't enabled, the segment -// durations in your DASH manifest are approximate. The segment duration information -// appears in the duration attribute of the SegmentTemplate element. +// When you enable Precise segment duration in DASH manifests, your DASH manifest +// shows precise segment durations. The segment duration information appears +// inside the SegmentTimeline element, inside SegmentTemplate at the Representation +// level. When this feature isn't enabled, the segment durations in your DASH +// manifest are approximate. The segment duration information appears in the +// duration attribute of the SegmentTemplate element. const ( // CmafWriteSegmentTimelineInRepresentationEnabled is a CmafWriteSegmentTimelineInRepresentation enum value CmafWriteSegmentTimelineInRepresentationEnabled = "ENABLED" @@ -29515,17 +29096,17 @@ func CmafWriteSegmentTimelineInRepresentation_Values() []string { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences -// between video and audio. For this situation, choose Match video duration -// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default -// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, -// MediaConvert pads the output audio streams with silence or trims them to -// ensure that the total duration of each audio stream is at least as long as -// the total duration of the video stream. After padding or trimming, the audio -// stream duration is no more than one frame longer than the video stream. MediaConvert -// applies audio padding or trimming only to the end of the last segment of -// the output. For unsegmented outputs, MediaConvert adds padding only to the -// end of the file. When you keep the default value, any minor discrepancies -// between audio and video duration will depend on your output audio codec. +// between video and audio. For this situation, choose Match video duration. +// In all other cases, keep the default value, Default codec duration. When +// you choose Match video duration, MediaConvert pads the output audio streams +// with silence or trims them to ensure that the total duration of each audio +// stream is at least as long as the total duration of the video stream. After +// padding or trimming, the audio stream duration is no more than one frame +// longer than the video stream. MediaConvert applies audio padding or trimming +// only to the end of the last segment of the output. For unsegmented outputs, +// MediaConvert adds padding only to the end of the file. When you keep the +// default value, any minor discrepancies between audio and video duration will +// depend on your output audio codec. const ( // CmfcAudioDurationDefaultCodecDuration is a CmfcAudioDuration enum value CmfcAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" @@ -29548,14 +29129,13 @@ func CmfcAudioDuration_Values() []string { // writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry // for the audio variant. For more information about these attributes, see the // Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. -// Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) -// to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant -// in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) -// to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select -// to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this -// setting, MediaConvert defaults to Alternate audio, auto select, default. -// When there is more than one variant in your output group, you must explicitly -// choose a value for this setting. +// Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. +// Choose this value for only one variant in your output group. Choose Alternate +// audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose +// Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When +// you don't specify a value for this setting, MediaConvert defaults to Alternate +// audio, auto select, default. When there is more than one variant in your +// output group, you must explicitly choose a value for this setting. const ( // CmfcAudioTrackTypeAlternateAudioAutoSelectDefault is a CmfcAudioTrackType enum value CmfcAudioTrackTypeAlternateAudioAutoSelectDefault = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" @@ -29577,12 +29157,11 @@ func CmfcAudioTrackType_Values() []string { } // Specify whether to flag this audio track as descriptive video service (DVS) -// in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes +// in your HLS parent manifest. When you choose Flag, MediaConvert includes // the parameter CHARACTERISTICS="public.accessibility.describes-video" in the // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't -// flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can -// help with accessibility on Apple devices. For more information, see the Apple -// documentation. +// flag, MediaConvert leaves this parameter out. The DVS flag can help with +// accessibility on Apple devices. For more information, see the Apple documentation. const ( // CmfcDescriptiveVideoServiceFlagDontFlag is a CmfcDescriptiveVideoServiceFlag enum value CmfcDescriptiveVideoServiceFlagDontFlag = "DONT_FLAG" @@ -29599,13 +29178,13 @@ func CmfcDescriptiveVideoServiceFlag_Values() []string { } } -// Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest -// that lists only the I-frames for this rendition, in addition to your regular -// manifest for this rendition. You might use this manifest as part of a workflow -// that creates preview functions for your video. MediaConvert adds both the -// I-frame only child manifest and the regular child manifest to the parent -// manifest. When you don't need the I-frame only child manifest, keep the default -// value Exclude (EXCLUDE). +// Choose Include to have MediaConvert generate an HLS child manifest that lists +// only the I-frames for this rendition, in addition to your regular manifest +// for this rendition. You might use this manifest as part of a workflow that +// creates preview functions for your video. MediaConvert adds both the I-frame +// only child manifest and the regular child manifest to the parent manifest. +// When you don't need the I-frame only child manifest, keep the default value +// Exclude. const ( // CmfcIFrameOnlyManifestInclude is a CmfcIFrameOnlyManifest enum value CmfcIFrameOnlyManifestInclude = "INCLUDE" @@ -29651,7 +29230,7 @@ func CmfcKlvMetadata_Values() []string { // To leave these elements out of your output MPD manifest, set Manifest metadata // signaling to Disabled. To enable Manifest metadata signaling, you must also // set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata -// (TimedMetadata) to Passthrough. +// to Passthrough. const ( // CmfcManifestMetadataSignalingEnabled is a CmfcManifestMetadataSignaling enum value CmfcManifestMetadataSignalingEnabled = "ENABLED" @@ -29671,7 +29250,7 @@ func CmfcManifestMetadataSignaling_Values() []string { // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting -// SCC XML (sccXml). +// SCC XML. const ( // CmfcScte35EsamInsert is a CmfcScte35Esam enum value CmfcScte35EsamInsert = "INSERT" @@ -29689,9 +29268,9 @@ func CmfcScte35Esam_Values() []string { } // Ignore this setting unless you have SCTE-35 markers in your input video file. -// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear -// in your input to also appear in this output. Choose None (NONE) if you don't -// want those SCTE-35 markers in this output. +// Choose Passthrough if you want SCTE-35 markers that appear in your input +// to also appear in this output. Choose None if you don't want those SCTE-35 +// markers in this output. const ( // CmfcScte35SourcePassthrough is a CmfcScte35Source enum value CmfcScte35SourcePassthrough = "PASSTHROUGH" @@ -29708,11 +29287,10 @@ func CmfcScte35Source_Values() []string { } } -// To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) -// to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata -// inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 -// metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: -// Set ID3 metadata to None (NONE) or leave blank. +// To include ID3 metadata in this output: Set ID3 metadata to Passthrough. +// Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes +// each instance of ID3 metadata in a separate Event Message (eMSG) box. To +// exclude this ID3 metadata: Set ID3 metadata to None or leave blank. const ( // CmfcTimedMetadataPassthrough is a CmfcTimedMetadata enum value CmfcTimedMetadataPassthrough = "PASSTHROUGH" @@ -29732,7 +29310,7 @@ func CmfcTimedMetadata_Values() []string { // Specify the event message box (eMSG) version for ID3 timed metadata in your // output.For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 // Syntax.Leave blank to use the default value Version 0.When you specify Version -// 1, you must also set ID3 metadata (timedMetadata) to Passthrough. +// 1, you must also set ID3 metadata to Passthrough. const ( // CmfcTimedMetadataBoxVersionVersion0 is a CmfcTimedMetadataBoxVersion enum value CmfcTimedMetadataBoxVersionVersion0 = "VERSION_0" @@ -29749,9 +29327,9 @@ func CmfcTimedMetadataBoxVersion_Values() []string { } } -// Choose Insert (INSERT) for this setting to include color metadata in this -// output. Choose Ignore (IGNORE) to exclude color metadata from this output. -// If you don't specify a value, the service sets this to Insert by default. +// Choose Insert for this setting to include color metadata in this output. +// Choose Ignore to exclude color metadata from this output. If you don't specify +// a value, the service sets this to Insert by default. const ( // ColorMetadataIgnore is a ColorMetadata enum value ColorMetadataIgnore = "IGNORE" @@ -29875,13 +29453,13 @@ func ColorSpaceConversion_Values() []string { } // There are two sources for color metadata, the input file and the job input -// settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). -// The Color space usage setting determines which takes precedence. Choose Force -// (FORCE) to use color metadata from the input job settings. If you don't specify -// values for those settings, the service defaults to using metadata from your -// input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the -// source when it is present. If there's no color metadata in your input file, -// the service defaults to using values you specify in the input settings. +// settings Color space and HDR master display information settings. The Color +// space usage setting determines which takes precedence. Choose Force to use +// color metadata from the input job settings. If you don't specify values for +// those settings, the service defaults to using metadata from your input. FALLBACK +// - Choose Fallback to use color metadata from the source when it is present. +// If there's no color metadata in your input file, the service defaults to +// using values you specify in the input settings. const ( // ColorSpaceUsageForce is a ColorSpaceUsage enum value ColorSpaceUsageForce = "FORCE" @@ -29989,10 +29567,9 @@ func CopyProtectionAction_Values() []string { // the Dolby channel configuration tag, rather than the MPEG one. For example, // you might need to use this to make dynamic ad insertion work. Specify which // audio channel configuration scheme ID URI MediaConvert writes in your DASH -// manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), -// to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. -// Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have -// MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011. +// manifest. Keep the default value, MPEG channel configuration, to have MediaConvert +// write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel +// configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011. const ( // DashIsoGroupAudioChannelConfigSchemeIdUriMpegChannelConfiguration is a DashIsoGroupAudioChannelConfigSchemeIdUri enum value DashIsoGroupAudioChannelConfigSchemeIdUriMpegChannelConfiguration = "MPEG_CHANNEL_CONFIGURATION" @@ -30027,13 +29604,13 @@ func DashIsoHbbtvCompliance_Values() []string { } // Specify whether MediaConvert generates images for trick play. Keep the default -// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) -// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) -// to generate tiled thumbnails and full-resolution images of single frames. -// MediaConvert adds an entry in the .mpd manifest for each set of images that -// you generate. A common application for these images is Roku trick mode. The -// thumbnails and full-frame images that MediaConvert creates with this feature -// are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md +// value, None, to not generate any images. Choose Thumbnail to generate tiled +// thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails +// and full-resolution images of single frames. MediaConvert adds an entry in +// the .mpd manifest for each set of images that you generate. A common application +// for these images is Roku trick mode. The thumbnails and full-frame images +// that MediaConvert creates with this feature are compatible with this Roku +// specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md const ( // DashIsoImageBasedTrickPlayNone is a DashIsoImageBasedTrickPlay enum value DashIsoImageBasedTrickPlayNone = "NONE" @@ -30101,11 +29678,10 @@ func DashIsoMpdManifestBandwidthType_Values() []string { } // Specify whether your DASH profile is on-demand or main. When you choose Main -// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 -// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), -// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. -// When you choose On-demand, you must also set the output group setting Segment -// control (SegmentControl) to Single file (SINGLE_FILE). +// profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your +// .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 +// in your .mpd. When you choose On-demand, you must also set the output group +// setting Segment control to Single file. const ( // DashIsoMpdProfileMainProfile is a DashIsoMpdProfile enum value DashIsoMpdProfileMainProfile = "MAIN_PROFILE" @@ -30124,10 +29700,10 @@ func DashIsoMpdProfile_Values() []string { // This setting can improve the compatibility of your output with video players // on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. -// Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback -// on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). -// If you choose Unencrypted SEI, for that output, the service will exclude -// the access unit delimiter and will leave the SEI NAL units unencrypted. +// Choose Unencrypted SEI only to correct problems with playback on older devices. +// Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, +// for that output, the service will exclude the access unit delimiter and will +// leave the SEI NAL units unencrypted. const ( // DashIsoPlaybackDeviceCompatibilityCencV1 is a DashIsoPlaybackDeviceCompatibility enum value DashIsoPlaybackDeviceCompatibilityCencV1 = "CENC_V1" @@ -30147,12 +29723,12 @@ func DashIsoPlaybackDeviceCompatibility_Values() []string { // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time -// stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) -// when you want MediaConvert to use the initial PTS as the first time stamp -// in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore -// the initial PTS in the video stream and instead write the initial time stamp -// as zero in the manifest. For outputs that don't have B-frames, the time stamps -// in your DASH manifests start at zero regardless of your choice here. +// stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert +// to use the initial PTS as the first time stamp in the manifest. Choose Zero-based +// to have MediaConvert ignore the initial PTS in the video stream and instead +// write the initial time stamp as zero in the manifest. For outputs that don't +// have B-frames, the time stamps in your DASH manifests start at zero regardless +// of your choice here. const ( // DashIsoPtsOffsetHandlingForBFramesZeroBased is a DashIsoPtsOffsetHandlingForBFrames enum value DashIsoPtsOffsetHandlingForBFramesZeroBased = "ZERO_BASED" @@ -30189,10 +29765,10 @@ func DashIsoSegmentControl_Values() []string { } // Specify how you want MediaConvert to determine the segment length. Choose -// Exact (EXACT) to have the encoder use the exact length that you specify with -// the setting Segment length (SegmentLength). This might result in extra I-frames. -// Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment -// lengths to match the next GOP boundary. +// Exact to have the encoder use the exact length that you specify with the +// setting Segment length. This might result in extra I-frames. Choose Multiple +// of GOP to have the encoder round up the segment lengths to match the next +// GOP boundary. const ( // DashIsoSegmentLengthControlExact is a DashIsoSegmentLengthControl enum value DashIsoSegmentLengthControlExact = "EXACT" @@ -30232,12 +29808,12 @@ func DashIsoVideoCompositionOffsets_Values() []string { } } -// When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), -// your DASH manifest shows precise segment durations. The segment duration -// information appears inside the SegmentTimeline element, inside SegmentTemplate -// at the Representation level. When this feature isn't enabled, the segment -// durations in your DASH manifest are approximate. The segment duration information -// appears in the duration attribute of the SegmentTemplate element. +// When you enable Precise segment duration in manifests, your DASH manifest +// shows precise segment durations. The segment duration information appears +// inside the SegmentTimeline element, inside SegmentTemplate at the Representation +// level. When this feature isn't enabled, the segment durations in your DASH +// manifest are approximate. The segment duration information appears in the +// duration attribute of the SegmentTemplate element. const ( // DashIsoWriteSegmentTimelineInRepresentationEnabled is a DashIsoWriteSegmentTimelineInRepresentation enum value DashIsoWriteSegmentTimelineInRepresentationEnabled = "ENABLED" @@ -30360,10 +29936,10 @@ func DeinterlacerControl_Values() []string { } } -// Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. -// Default is Deinterlace. - Deinterlace converts interlaced to progressive. -// - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. -// - Adaptive auto-detects and converts to progressive. +// Use Deinterlacer to choose how the service will do deinterlacing. Default +// is Deinterlace.- Deinterlace converts interlaced to progressive.- Inverse +// telecine converts Hard Telecine 29.97i to progressive 23.976p.- Adaptive +// auto-detects and converts to progressive. const ( // DeinterlacerModeDeinterlace is a DeinterlacerMode enum value DeinterlacerModeDeinterlace = "DEINTERLACE" @@ -30475,7 +30051,7 @@ func DolbyVisionProfile_Values() []string { // Applies only to 29.97 fps outputs. When this feature is enabled, the service // will use drop-frame timecode on outputs. If it is not possible to use drop-frame // timecode, the system will fall back to non-drop-frame. This setting is enabled -// by default when Timecode insertion (TimecodeInsertion) is enabled. +// by default when Timecode insertion is enabled. const ( // DropFrameTimecodeDisabled is a DropFrameTimecode enum value DropFrameTimecodeDisabled = "DISABLED" @@ -30494,12 +30070,12 @@ func DropFrameTimecode_Values() []string { // Specify the font that you want the service to use for your burn in captions // when your input captions specify a font that MediaConvert doesn't support. -// When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or -// leave blank, MediaConvert uses a supported font that most closely matches -// the font that your input captions specify. When there are multiple unsupported -// fonts in your input captions, MediaConvert matches each font with the supported -// font that matches best. When you explicitly choose a replacement font, MediaConvert -// uses that font to replace all unsupported fonts from your input. +// When you set Fallback font to best match, or leave blank, MediaConvert uses +// a supported font that most closely matches the font that your input captions +// specify. When there are multiple unsupported fonts in your input captions, +// MediaConvert matches each font with the supported font that matches best. +// When you explicitly choose a replacement font, MediaConvert uses that font +// to replace all unsupported fonts from your input. const ( // DvbSubSubtitleFallbackFontBestMatch is a DvbSubSubtitleFallbackFont enum value DvbSubSubtitleFallbackFontBestMatch = "BEST_MATCH" @@ -30555,15 +30131,14 @@ func DvbSubtitleAlignment_Values() []string { } } -// Ignore this setting unless Style Passthrough (StylePassthrough) is set to -// Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, -// or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. -// When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font -// color setting only applies to white text in your input captions. For example, -// if your font color setting is Yellow, and your input captions have red and -// white text, your output captions will have red and yellow text. When you -// choose ALL_TEXT, your font color setting applies to all of your output captions -// text. +// Ignore this setting unless Style Passthrough is set to Enabled and Font color +// set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for +// additional font color controls. When you choose White text only, or leave +// blank, your font color setting only applies to white text in your input captions. +// For example, if your font color setting is Yellow, and your input captions +// have red and white text, your output captions will have red and yellow text. +// When you choose ALL_TEXT, your font color setting applies to all of your +// output captions text. const ( // DvbSubtitleApplyFontColorWhiteTextOnly is a DvbSubtitleApplyFontColor enum value DvbSubtitleApplyFontColorWhiteTextOnly = "WHITE_TEXT_ONLY" @@ -30581,9 +30156,8 @@ func DvbSubtitleApplyFontColor_Values() []string { } // Specify the color of the rectangle behind the captions. Leave background -// color (BackgroundColor) blank and set Style passthrough (StylePassthrough) -// to enabled to use the background color data from your input captions, if -// present. +// color blank and set Style passthrough to enabled to use the background color +// data from your input captions, if present. const ( // DvbSubtitleBackgroundColorNone is a DvbSubtitleBackgroundColor enum value DvbSubtitleBackgroundColorNone = "NONE" @@ -30608,10 +30182,10 @@ func DvbSubtitleBackgroundColor_Values() []string { } } -// Specify the color of the captions text. Leave Font color (FontColor) blank -// and set Style passthrough (StylePassthrough) to enabled to use the font color -// data from your input captions, if present. Within your job settings, all -// of your DVB-Sub settings must be identical. +// Specify the color of the captions text. Leave Font color blank and set Style +// passthrough to enabled to use the font color data from your input captions, +// if present. Within your job settings, all of your DVB-Sub settings must be +// identical. const ( // DvbSubtitleFontColorWhite is a DvbSubtitleFontColor enum value DvbSubtitleFontColorWhite = "WHITE" @@ -30652,10 +30226,9 @@ func DvbSubtitleFontColor_Values() []string { } } -// Specify font outline color. Leave Outline color (OutlineColor) blank and -// set Style passthrough (StylePassthrough) to enabled to use the font outline -// color data from your input captions, if present. Within your job settings, -// all of your DVB-Sub settings must be identical. +// Specify font outline color. Leave Outline color blank and set Style passthrough +// to enabled to use the font outline color data from your input captions, if +// present. Within your job settings, all of your DVB-Sub settings must be identical. const ( // DvbSubtitleOutlineColorBlack is a DvbSubtitleOutlineColor enum value DvbSubtitleOutlineColorBlack = "BLACK" @@ -30693,9 +30266,9 @@ func DvbSubtitleOutlineColor_Values() []string { } // Specify the color of the shadow cast by the captions. Leave Shadow color -// (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled -// to use the shadow color data from your input captions, if present. Within -// your job settings, all of your DVB-Sub settings must be identical. +// blank and set Style passthrough to enabled to use the shadow color data from +// your input captions, if present. Within your job settings, all of your DVB-Sub +// settings must be identical. const ( // DvbSubtitleShadowColorNone is a DvbSubtitleShadowColor enum value DvbSubtitleShadowColorNone = "NONE" @@ -30720,15 +30293,14 @@ func DvbSubtitleShadowColor_Values() []string { } } -// Set Style passthrough (StylePassthrough) to ENABLED to use the available -// style, color, and position information from your input captions. MediaConvert -// uses default settings for any missing style and position information in your -// input captions. Set Style passthrough to DISABLED, or leave blank, to ignore -// the style and position information from your input captions and use default -// settings: white text with black outlining, bottom-center positioning, and -// automatic sizing. Whether you set Style passthrough to enabled or not, you -// can also choose to manually override any of the individual style and position -// settings. +// Set Style passthrough to ENABLED to use the available style, color, and position +// information from your input captions. MediaConvert uses default settings +// for any missing style and position information in your input captions. Set +// Style passthrough to DISABLED, or leave blank, to ignore the style and position +// information from your input captions and use default settings: white text +// with black outlining, bottom-center positioning, and automatic sizing. Whether +// you set Style passthrough to enabled or not, you can also choose to manually +// override any of the individual style and position settings. const ( // DvbSubtitleStylePassthroughEnabled is a DvbSubtitleStylePassthrough enum value DvbSubtitleStylePassthroughEnabled = "ENABLED" @@ -30745,12 +30317,11 @@ func DvbSubtitleStylePassthrough_Values() []string { } } -// Specify whether the Text spacing (TeletextSpacing) in your captions is set -// by the captions grid, or varies depending on letter width. Choose fixed grid -// (FIXED_GRID) to conform to the spacing specified in the captions file more -// accurately. Choose proportional (PROPORTIONAL) to make the text easier to -// read for closed captions. Within your job settings, all of your DVB-Sub settings -// must be identical. +// Specify whether the Text spacing in your captions is set by the captions +// grid, or varies depending on letter width. Choose fixed grid to conform to +// the spacing specified in the captions file more accurately. Choose proportional +// to make the text easier to read for closed captions. Within your job settings, +// all of your DVB-Sub settings must be identical. const ( // DvbSubtitleTeletextSpacingFixedGrid is a DvbSubtitleTeletextSpacing enum value DvbSubtitleTeletextSpacingFixedGrid = "FIXED_GRID" @@ -30878,15 +30449,13 @@ func Eac3AtmosDialogueIntelligence_Values() []string { } // Specify whether MediaConvert should use any downmix metadata from your input -// file. Keep the default value, Custom (SPECIFIED) to provide downmix values -// in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use -// the metadata from your input. Related settings--Use these settings to specify -// your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), -// Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right -// total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), -// and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for -// Downmix control (DownmixControl) and you don't specify values for the related -// settings, MediaConvert uses default values for those settings. +// file. Keep the default value, Custom to provide downmix values in your job +// settings. Choose Follow source to use the metadata from your input. Related +// settings--Use these settings to specify your downmix values: Left only/Right +// only surround, Left total/Right total surround, Left total/Right total center, +// Left only/Right only center, and Stereo downmix. When you keep Custom for +// Downmix control and you don't specify values for the related settings, MediaConvert +// uses default values for those settings. const ( // Eac3AtmosDownmixControlSpecified is a Eac3AtmosDownmixControl enum value Eac3AtmosDownmixControlSpecified = "SPECIFIED" @@ -30905,12 +30474,11 @@ func Eac3AtmosDownmixControl_Values() []string { // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the line operating mode. -// Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: -// To have MediaConvert use the value you specify here, keep the default value, -// Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). -// Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). -// For information about the Dolby DRC operating modes and profiles, see the -// Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. +// Default value: Film light Related setting: To have MediaConvert use the value +// you specify here, keep the default value, Custom for the setting Dynamic +// range control. Otherwise, MediaConvert ignores Dynamic range compression +// line. For information about the Dolby DRC operating modes and profiles, see +// the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Eac3AtmosDynamicRangeCompressionLineNone is a Eac3AtmosDynamicRangeCompressionLine enum value Eac3AtmosDynamicRangeCompressionLineNone = "NONE" @@ -30945,12 +30513,11 @@ func Eac3AtmosDynamicRangeCompressionLine_Values() []string { // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the RF operating mode. -// Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: -// To have MediaConvert use the value you specify here, keep the default value, -// Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). -// Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). -// For information about the Dolby DRC operating modes and profiles, see the -// Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. +// Default value: Film light Related setting: To have MediaConvert use the value +// you specify here, keep the default value, Custom for the setting Dynamic +// range control. Otherwise, MediaConvert ignores Dynamic range compression +// RF. For information about the Dolby DRC operating modes and profiles, see +// the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Eac3AtmosDynamicRangeCompressionRfNone is a Eac3AtmosDynamicRangeCompressionRf enum value Eac3AtmosDynamicRangeCompressionRfNone = "NONE" @@ -30984,12 +30551,11 @@ func Eac3AtmosDynamicRangeCompressionRf_Values() []string { } // Specify whether MediaConvert should use any dynamic range control metadata -// from your input file. Keep the default value, Custom (SPECIFIED), to provide -// dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) -// to use the metadata from your input. Related settings--Use these settings -// to specify your dynamic range control values: Dynamic range compression line -// (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). -// When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) +// from your input file. Keep the default value, Custom, to provide dynamic +// range control values in your job settings. Choose Follow source to use the +// metadata from your input. Related settings--Use these settings to specify +// your dynamic range control values: Dynamic range compression line and Dynamic +// range compression RF. When you keep the value Custom for Dynamic range control // and you don't specify values for the related settings, MediaConvert uses // default values for those settings. const ( @@ -31038,10 +30604,9 @@ func Eac3AtmosMeteringMode_Values() []string { } // Choose how the service does stereo downmixing. Default value: Not indicated -// (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert -// use this value, keep the default value, Custom (SPECIFIED) for the setting -// Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo -// downmix (StereoDownmix). +// Related setting: To have MediaConvert use this value, keep the default value, +// Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo +// downmix. const ( // Eac3AtmosStereoDownmixNotIndicated is a Eac3AtmosStereoDownmix enum value Eac3AtmosStereoDownmixNotIndicated = "NOT_INDICATED" @@ -31178,9 +30743,9 @@ func Eac3DcFilter_Values() []string { // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert -// ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). -// For information about the Dolby Digital DRC operating modes and profiles, -// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. +// ignores any value you provide for Dynamic range compression profile. For +// information about the Dolby Digital DRC operating modes and profiles, see +// the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Eac3DynamicRangeCompressionLineNone is a Eac3DynamicRangeCompressionLine enum value Eac3DynamicRangeCompressionLineNone = "NONE" @@ -31216,9 +30781,9 @@ func Eac3DynamicRangeCompressionLine_Values() []string { // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any -// value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). -// For information about the Dolby Digital DRC operating modes and profiles, -// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. +// value you provide for Dynamic range compression profile. For information +// about the Dolby Digital DRC operating modes and profiles, see the Dynamic +// Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Eac3DynamicRangeCompressionRfNone is a Eac3DynamicRangeCompressionRf enum value Eac3DynamicRangeCompressionRfNone = "NONE" @@ -31344,9 +30909,9 @@ func Eac3PhaseControl_Values() []string { } // Choose how the service does stereo downmixing. This setting only applies -// if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) -// for the setting Coding mode (Eac3CodingMode). If you choose a different value -// for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix). +// if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding +// mode. If you choose a different value for Coding mode, the service ignores +// Stereo downmix. const ( // Eac3StereoDownmixNotIndicated is a Eac3StereoDownmix enum value Eac3StereoDownmixNotIndicated = "NOT_INDICATED" @@ -31416,10 +30981,10 @@ func Eac3SurroundMode_Values() []string { } // Specify whether this set of input captions appears in your outputs in both -// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes -// the captions data in two ways: it passes the 608 data through using the 608 -// compatibility bytes fields of the 708 wrapper, and it also translates the -// 608 data into 708. +// 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions +// data in two ways: it passes the 608 data through using the 608 compatibility +// bytes fields of the 708 wrapper, and it also translates the 608 data into +// 708. const ( // EmbeddedConvert608To708Upconvert is a EmbeddedConvert608To708 enum value EmbeddedConvert608To708Upconvert = "UPCONVERT" @@ -31455,12 +31020,10 @@ func EmbeddedTerminateCaptions_Values() []string { } } -// Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) -// when your AVCHD input contains timecode tag data in the Modified Digital -// Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode -// source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode -// override blank, or set to None (NONE), when your input does not contain MDPM -// timecode. +// Set Embedded timecode override to Use MDPM when your AVCHD input contains +// timecode tag data in the Modified Digital Video Pack Metadata. When you do, +// we recommend you also set Timecode source to Embedded. Leave Embedded timecode +// override blank, or set to None, when your input does not contain MDPM timecode. const ( // EmbeddedTimecodeOverrideNone is a EmbeddedTimecodeOverride enum value EmbeddedTimecodeOverrideNone = "NONE" @@ -31497,10 +31060,10 @@ func F4vMoovPlacement_Values() []string { } // Specify whether this set of input captions appears in your outputs in both -// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes -// the captions data in two ways: it passes the 608 data through using the 608 -// compatibility bytes fields of the 708 wrapper, and it also translates the -// 608 data into 708. +// 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions +// data in two ways: it passes the 608 data through using the 608 compatibility +// bytes fields of the 708 wrapper, and it also translates the 608 data into +// 708. const ( // FileSourceConvert608To708Upconvert is a FileSourceConvert608To708 enum value FileSourceConvert608To708Upconvert = "UPCONVERT" @@ -31517,10 +31080,10 @@ func FileSourceConvert608To708_Values() []string { } } -// When you use the setting Time delta (TimeDelta) to adjust the sync between -// your sidecar captions and your video, use this setting to specify the units -// for the delta that you specify. When you don't specify a value for Time delta -// units (TimeDeltaUnits), MediaConvert uses seconds by default. +// When you use the setting Time delta to adjust the sync between your sidecar +// captions and your video, use this setting to specify the units for the delta +// that you specify. When you don't specify a value for Time delta units, MediaConvert +// uses seconds by default. const ( // FileSourceTimeDeltaUnitsSeconds is a FileSourceTimeDeltaUnits enum value FileSourceTimeDeltaUnitsSeconds = "SECONDS" @@ -31560,15 +31123,15 @@ func FontScript_Values() []string { } } -// Keep the default value, Auto (AUTO), for this setting to have MediaConvert -// automatically apply the best types of quantization for your video content. -// When you want to apply your quantization settings manually, you must set -// H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting -// to specify the strength of any adaptive quantization filters that you enable. -// If you don't want MediaConvert to do any adaptive quantization in this transcode, -// set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related -// settings: The value that you choose here applies to the following settings: -// H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization. +// Keep the default value, Auto, for this setting to have MediaConvert automatically +// apply the best types of quantization for your video content. When you want +// to apply your quantization settings manually, you must set H264AdaptiveQuantization +// to a value other than Auto. Use this setting to specify the strength of any +// adaptive quantization filters that you enable. If you don't want MediaConvert +// to do any adaptive quantization in this transcode, set Adaptive quantization +// to Off. Related settings: The value that you choose here applies to the following +// settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, +// and H264TemporalAdaptiveQuantization. const ( // H264AdaptiveQuantizationOff is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationOff = "OFF" @@ -31606,7 +31169,7 @@ func H264AdaptiveQuantization_Values() []string { } // Specify an H.264 level that is consistent with your output video settings. -// If you aren't sure what level to specify, choose Auto (AUTO). +// If you aren't sure what level to specify, choose Auto. const ( // H264CodecLevelAuto is a H264CodecLevel enum value H264CodecLevelAuto = "AUTO" @@ -31721,7 +31284,7 @@ func H264CodecProfile_Values() []string { // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the -// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). +// value you provide for the setting B frames between reference frames. const ( // H264DynamicSubGopAdaptive is a H264DynamicSubGop enum value H264DynamicSubGopAdaptive = "ADAPTIVE" @@ -31757,9 +31320,9 @@ func H264EntropyEncoding_Values() []string { // The video encoding method for your MPEG-4 AVC output. Keep the default value, // PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose -// Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced -// fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding -// for interlaced outputs. +// Force field to disable PAFF encoding and create separate interlaced fields. +// Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for +// interlaced outputs. const ( // H264FieldEncodingPaff is a H264FieldEncoding enum value H264FieldEncodingPaff = "PAFF" @@ -31785,14 +31348,13 @@ func H264FieldEncoding_Values() []string { // and all other adaptive quantization from your JSON job specification, MediaConvert // automatically applies the best types of quantization for your video content. // When you set H264AdaptiveQuantization to a value other than AUTO, the default -// value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change -// this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears -// as a visual flicker that can arise when the encoder saves bits by copying -// some macroblocks many times from frame to frame, and then refreshes them -// at the I-frame. When you enable this setting, the encoder updates these macroblocks -// slightly more often to smooth out the flicker. To manually enable or disable -// H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) -// to a value other than AUTO. +// value for H264FlickerAdaptiveQuantization is Disabled. Change this value +// to Enabled to reduce I-frame pop. I-frame pop appears as a visual flicker +// that can arise when the encoder saves bits by copying some macroblocks many +// times from frame to frame, and then refreshes them at the I-frame. When you +// enable this setting, the encoder updates these macroblocks slightly more +// often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, +// you must set Adaptive quantization to a value other than AUTO. const ( // H264FlickerAdaptiveQuantizationDisabled is a H264FlickerAdaptiveQuantization enum value H264FlickerAdaptiveQuantizationDisabled = "DISABLED" @@ -31814,12 +31376,7 @@ func H264FlickerAdaptiveQuantization_Values() []string { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Custom, specify your frame rate as a fraction. const ( // H264FramerateControlInitializeFromSource is a H264FramerateControl enum value H264FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -31891,13 +31448,12 @@ func H264GopBReference_Values() []string { // Specify how the transcoder determines GOP size for this output. We recommend // that you have the transcoder automatically choose this value for you based // on characteristics of your input video. To enable this automatic behavior, -// choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if -// you don't specify GOP mode control (GopSizeUnits), MediaConvert will use -// automatic behavior. If your output group specifies HLS, DASH, or CMAF, set -// GOP mode control to Auto and leave GOP size blank in each output in your -// output group. To explicitly specify the GOP length, choose Specified, frames -// (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length -// in the related setting GOP size (GopSize). +// choose Auto and and leave GOP size blank. By default, if you don't specify +// GOP mode control, MediaConvert will use automatic behavior. If your output +// group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave +// GOP size blank in each output in your output group. To explicitly specify +// the GOP length, choose Specified, frames or Specified, seconds and then provide +// the GOP length in the related setting GOP size. const ( // H264GopSizeUnitsFrames is a H264GopSizeUnits enum value H264GopSizeUnitsFrames = "FRAMES" @@ -31919,17 +31475,16 @@ func H264GopSizeUnits_Values() []string { } // Choose the scan line type for the output. Keep the default value, Progressive -// (PROGRESSIVE) to create a progressive output, regardless of the scan type -// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) -// to create an output that's interlaced with the same field polarity throughout. -// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) -// to produce outputs with the same field polarity as the source. For jobs that -// have multiple inputs, the output field polarity might change over the course -// of the output. Follow behavior depends on the input scan type. If the source -// is interlaced, the output will be interlaced with the same polarity as the -// source. If the source is progressive, the output will be interlaced with -// top field bottom field first, depending on which of the Follow options you -// choose. +// to create a progressive output, regardless of the scan type of your input. +// Use Top field first or Bottom field first to create an output that's interlaced +// with the same field polarity throughout. Use Follow, default top or Follow, +// default bottom to produce outputs with the same field polarity as the source. +// For jobs that have multiple inputs, the output field polarity might change +// over the course of the output. Follow behavior depends on the input scan +// type. If the source is interlaced, the output will be interlaced with the +// same polarity as the source. If the source is progressive, the output will +// be interlaced with top field bottom field first, depending on which of the +// Follow options you choose. const ( // H264InterlaceModeProgressive is a H264InterlaceMode enum value H264InterlaceModeProgressive = "PROGRESSIVE" @@ -31959,12 +31514,10 @@ func H264InterlaceMode_Values() []string { } // Optional. Specify how the service determines the pixel aspect ratio (PAR) -// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), -// uses the PAR from your input video for your output. To specify a different -// PAR in the console, choose any value other than Follow source. To specify -// a different PAR by editing the JSON job specification, choose SPECIFIED. -// When you choose SPECIFIED for this setting, you must also specify values -// for the parNumerator and parDenominator settings. +// for this output. The default behavior, Follow source, uses the PAR from your +// input video for your output. To specify a different PAR in the console, choose +// any value other than Follow source. When you choose SPECIFIED for this setting, +// you must also specify values for the parNumerator and parDenominator settings. const ( // H264ParControlInitializeFromSource is a H264ParControl enum value H264ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -32050,17 +31603,16 @@ func H264RepeatPps_Values() []string { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing -// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this -// case, each progressive frame from the input corresponds to an interlaced -// field in the output. Keep the default value, Basic interlacing (INTERLACED), -// for all other output frame rates. With basic interlacing, MediaConvert performs -// any frame rate conversion first and then interlaces the frames. When you -// choose Optimized interlacing and you set your output frame rate to a value -// that isn't suitable for optimized interlacing, MediaConvert automatically -// falls back to basic interlacing. Required settings: To use optimized interlacing, -// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't -// use optimized interlacing for hard telecine outputs. You must also set Interlace -// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). +// to create a better quality interlaced output. In this case, each progressive +// frame from the input corresponds to an interlaced field in the output. Keep +// the default value, Basic interlacing, for all other output frame rates. With +// basic interlacing, MediaConvert performs any frame rate conversion first +// and then interlaces the frames. When you choose Optimized interlacing and +// you set your output frame rate to a value that isn't suitable for optimized +// interlacing, MediaConvert automatically falls back to basic interlacing. +// Required settings: To use optimized interlacing, you must set Telecine to +// None or Soft. You can't use optimized interlacing for hard telecine outputs. +// You must also set Interlace mode to a value other than Progressive. const ( // H264ScanTypeConversionModeInterlaced is a H264ScanTypeConversionMode enum value H264ScanTypeConversionModeInterlaced = "INTERLACED" @@ -32079,9 +31631,8 @@ func H264ScanTypeConversionMode_Values() []string { // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. -// If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) -// for further video quality improvement. For more information about QVBR, see -// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. +// If this output uses QVBR, choose Transition detection for further video quality +// improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. const ( // H264SceneChangeDetectDisabled is a H264SceneChangeDetect enum value H264SceneChangeDetectDisabled = "DISABLED" @@ -32107,9 +31658,7 @@ func H264SceneChangeDetect_Values() []string { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: -// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) -// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to -// 1. +// You must also set Framerate to 25. const ( // H264SlowPalDisabled is a H264SlowPal enum value H264SlowPalDisabled = "DISABLED" @@ -32126,29 +31675,28 @@ func H264SlowPal_Values() []string { } } -// Only use this setting when you change the default value, Auto (AUTO), for -// the setting H264AdaptiveQuantization. When you keep all defaults, excluding -// H264AdaptiveQuantization and all other adaptive quantization from your JSON -// job specification, MediaConvert automatically applies the best types of quantization -// for your video content. When you set H264AdaptiveQuantization to a value -// other than AUTO, the default value for H264SpatialAdaptiveQuantization is -// Enabled (ENABLED). Keep this default value to adjust quantization within -// each frame based on spatial variation of content complexity. When you enable -// this feature, the encoder uses fewer bits on areas that can sustain more -// distortion with no noticeable visual degradation and uses more bits on areas -// where any small distortion will be noticeable. For example, complex textured -// blocks are encoded with fewer bits and smooth textured blocks are encoded -// with more bits. Enabling this feature will almost always improve your video -// quality. Note, though, that this feature doesn't take into account where -// the viewer's attention is likely to be. If viewers are likely to be focusing -// their attention on a part of the screen with a lot of complex texture, you -// might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). -// Related setting: When you enable spatial adaptive quantization, set the value -// for Adaptive quantization (H264AdaptiveQuantization) depending on your content. -// For homogeneous content, such as cartoons and video games, set it to Low. -// For content with a wider variety of textures, set it to High or Higher. To -// manually enable or disable H264SpatialAdaptiveQuantization, you must set -// Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO. +// Only use this setting when you change the default value, Auto, for the setting +// H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization +// and all other adaptive quantization from your JSON job specification, MediaConvert +// automatically applies the best types of quantization for your video content. +// When you set H264AdaptiveQuantization to a value other than AUTO, the default +// value for H264SpatialAdaptiveQuantization is Enabled. Keep this default value +// to adjust quantization within each frame based on spatial variation of content +// complexity. When you enable this feature, the encoder uses fewer bits on +// areas that can sustain more distortion with no noticeable visual degradation +// and uses more bits on areas where any small distortion will be noticeable. +// For example, complex textured blocks are encoded with fewer bits and smooth +// textured blocks are encoded with more bits. Enabling this feature will almost +// always improve your video quality. Note, though, that this feature doesn't +// take into account where the viewer's attention is likely to be. If viewers +// are likely to be focusing their attention on a part of the screen with a +// lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization +// to Disabled. Related setting: When you enable spatial adaptive quantization, +// set the value for Adaptive quantization depending on your content. For homogeneous +// content, such as cartoons and video games, set it to Low. For content with +// a wider variety of textures, set it to High or Higher. To manually enable +// or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization +// to a value other than AUTO. const ( // H264SpatialAdaptiveQuantizationDisabled is a H264SpatialAdaptiveQuantization enum value H264SpatialAdaptiveQuantizationDisabled = "DISABLED" @@ -32184,12 +31732,12 @@ func H264Syntax_Values() []string { // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable -// hard or soft telecine to create a smoother picture. Hard telecine (HARD) -// produces a 29.97i output. Soft telecine (SOFT) produces an output with a -// 23.976 output that signals to the video player device to do the conversion -// during play back. When you keep the default value, None (NONE), MediaConvert -// does a standard frame rate conversion to 29.97 without doing anything with -// the field polarity to create a smoother picture. +// hard or soft telecine to create a smoother picture. Hard telecine produces +// a 29.97i output. Soft telecine produces an output with a 23.976 output that +// signals to the video player device to do the conversion during play back. +// When you keep the default value, None, MediaConvert does a standard frame +// rate conversion to 29.97 without doing anything with the field polarity to +// create a smoother picture. const ( // H264TelecineNone is a H264Telecine enum value H264TelecineNone = "NONE" @@ -32215,22 +31763,21 @@ func H264Telecine_Values() []string { // and all other adaptive quantization from your JSON job specification, MediaConvert // automatically applies the best types of quantization for your video content. // When you set H264AdaptiveQuantization to a value other than AUTO, the default -// value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this -// default value to adjust quantization within each frame based on temporal -// variation of content complexity. When you enable this feature, the encoder -// uses fewer bits on areas of the frame that aren't moving and uses more bits -// on complex objects with sharp edges that move a lot. For example, this feature -// improves the readability of text tickers on newscasts and scoreboards on -// sports matches. Enabling this feature will almost always improve your video -// quality. Note, though, that this feature doesn't take into account where -// the viewer's attention is likely to be. If viewers are likely to be focusing -// their attention on a part of the screen that doesn't have moving objects -// with sharp edges, such as sports athletes' faces, you might choose to set -// H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: -// When you enable temporal quantization, adjust the strength of the filter -// with the setting Adaptive quantization (adaptiveQuantization). To manually +// value for H264TemporalAdaptiveQuantization is Enabled. Keep this default +// value to adjust quantization within each frame based on temporal variation +// of content complexity. When you enable this feature, the encoder uses fewer +// bits on areas of the frame that aren't moving and uses more bits on complex +// objects with sharp edges that move a lot. For example, this feature improves +// the readability of text tickers on newscasts and scoreboards on sports matches. +// Enabling this feature will almost always improve your video quality. Note, +// though, that this feature doesn't take into account where the viewer's attention +// is likely to be. If viewers are likely to be focusing their attention on +// a part of the screen that doesn't have moving objects with sharp edges, such +// as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization +// to Disabled. Related setting: When you enable temporal quantization, adjust +// the strength of the filter with the setting Adaptive quantization. To manually // enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive -// quantization (H264AdaptiveQuantization) to a value other than AUTO. +// quantization to a value other than AUTO. const ( // H264TemporalAdaptiveQuantizationDisabled is a H264TemporalAdaptiveQuantization enum value H264TemporalAdaptiveQuantizationDisabled = "DISABLED" @@ -32264,16 +31811,13 @@ func H264UnregisteredSeiTimecode_Values() []string { } } -// When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), -// or leave blank, MediaConvert automatically applies quantization to improve -// the video quality of your output. Set Adaptive Quantization to Low (LOW), -// Medium (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control -// the strength of the quantization filter. When you do, you can specify a value -// for Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal -// Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive -// Quantization (H265FlickerAdaptiveQuantization), to further control the quantization -// filter. Set Adaptive Quantization to Off (OFF) to apply no quantization to -// your output. +// When you set Adaptive Quantization to Auto, or leave blank, MediaConvert +// automatically applies quantization to improve the video quality of your output. +// Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually +// control the strength of the quantization filter. When you do, you can specify +// a value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, +// and Flicker Adaptive Quantization, to further control the quantization filter. +// Set Adaptive Quantization to Off to apply no quantization to your output. const ( // H265AdaptiveQuantizationOff is a H265AdaptiveQuantization enum value H265AdaptiveQuantizationOff = "OFF" @@ -32440,7 +31984,7 @@ func H265CodecProfile_Values() []string { // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the -// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). +// value you provide for the setting B frames between reference frames. const ( // H265DynamicSubGopAdaptive is a H265DynamicSubGop enum value H265DynamicSubGopAdaptive = "ADAPTIVE" @@ -32463,7 +32007,7 @@ func H265DynamicSubGop_Values() []string { // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. This setting is disabled by // default. Related setting: In addition to enabling this setting, you must -// also set adaptiveQuantization to a value other than Off (OFF). +// also set adaptiveQuantization to a value other than Off. const ( // H265FlickerAdaptiveQuantizationDisabled is a H265FlickerAdaptiveQuantization enum value H265FlickerAdaptiveQuantizationDisabled = "DISABLED" @@ -32480,17 +32024,12 @@ func H265FlickerAdaptiveQuantization_Values() []string { } } -// If you are using the console, use the Framerate setting to specify the frame -// rate for this output. If you want to keep the same frame rate as the input -// video, choose Follow source. If you want to do frame rate conversion, choose -// a frame rate from the dropdown list or choose Custom. The framerates shown -// in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Use the Framerate setting to specify the frame rate for this output. If you +// want to keep the same frame rate as the input video, choose Follow source. +// If you want to do frame rate conversion, choose a frame rate from the dropdown +// list or choose Custom. The framerates shown in the dropdown list are decimal +// approximations of fractions. If you choose Custom, specify your frame rate +// as a fraction. const ( // H265FramerateControlInitializeFromSource is a H265FramerateControl enum value H265FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -32562,13 +32101,12 @@ func H265GopBReference_Values() []string { // Specify how the transcoder determines GOP size for this output. We recommend // that you have the transcoder automatically choose this value for you based // on characteristics of your input video. To enable this automatic behavior, -// choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if -// you don't specify GOP mode control (GopSizeUnits), MediaConvert will use -// automatic behavior. If your output group specifies HLS, DASH, or CMAF, set -// GOP mode control to Auto and leave GOP size blank in each output in your -// output group. To explicitly specify the GOP length, choose Specified, frames -// (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length -// in the related setting GOP size (GopSize). +// choose Auto and and leave GOP size blank. By default, if you don't specify +// GOP mode control, MediaConvert will use automatic behavior. If your output +// group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave +// GOP size blank in each output in your output group. To explicitly specify +// the GOP length, choose Specified, frames or Specified, seconds and then provide +// the GOP length in the related setting GOP size. const ( // H265GopSizeUnitsFrames is a H265GopSizeUnits enum value H265GopSizeUnitsFrames = "FRAMES" @@ -32590,17 +32128,16 @@ func H265GopSizeUnits_Values() []string { } // Choose the scan line type for the output. Keep the default value, Progressive -// (PROGRESSIVE) to create a progressive output, regardless of the scan type -// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) -// to create an output that's interlaced with the same field polarity throughout. -// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) -// to produce outputs with the same field polarity as the source. For jobs that -// have multiple inputs, the output field polarity might change over the course -// of the output. Follow behavior depends on the input scan type. If the source -// is interlaced, the output will be interlaced with the same polarity as the -// source. If the source is progressive, the output will be interlaced with -// top field bottom field first, depending on which of the Follow options you -// choose. +// to create a progressive output, regardless of the scan type of your input. +// Use Top field first or Bottom field first to create an output that's interlaced +// with the same field polarity throughout. Use Follow, default top or Follow, +// default bottom to produce outputs with the same field polarity as the source. +// For jobs that have multiple inputs, the output field polarity might change +// over the course of the output. Follow behavior depends on the input scan +// type. If the source is interlaced, the output will be interlaced with the +// same polarity as the source. If the source is progressive, the output will +// be interlaced with top field bottom field first, depending on which of the +// Follow options you choose. const ( // H265InterlaceModeProgressive is a H265InterlaceMode enum value H265InterlaceModeProgressive = "PROGRESSIVE" @@ -32630,12 +32167,10 @@ func H265InterlaceMode_Values() []string { } // Optional. Specify how the service determines the pixel aspect ratio (PAR) -// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), -// uses the PAR from your input video for your output. To specify a different -// PAR in the console, choose any value other than Follow source. To specify -// a different PAR by editing the JSON job specification, choose SPECIFIED. -// When you choose SPECIFIED for this setting, you must also specify values -// for the parNumerator and parDenominator settings. +// for this output. The default behavior, Follow source, uses the PAR from your +// input video for your output. To specify a different PAR, choose any value +// other than Follow source. When you choose SPECIFIED for this setting, you +// must also specify values for the parNumerator and parDenominator settings. const ( // H265ParControlInitializeFromSource is a H265ParControl enum value H265ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -32652,9 +32187,9 @@ func H265ParControl_Values() []string { } } -// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you -// want to trade off encoding speed for output video quality. The default behavior -// is faster, lower quality, single-pass encoding. +// Optional. Use Quality tuning level to choose how you want to trade off encoding +// speed for output video quality. The default behavior is faster, lower quality, +// single-pass encoding. const ( // H265QualityTuningLevelSinglePass is a H265QualityTuningLevel enum value H265QualityTuningLevelSinglePass = "SINGLE_PASS" @@ -32721,17 +32256,16 @@ func H265SampleAdaptiveOffsetFilterMode_Values() []string { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing -// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this -// case, each progressive frame from the input corresponds to an interlaced -// field in the output. Keep the default value, Basic interlacing (INTERLACED), -// for all other output frame rates. With basic interlacing, MediaConvert performs -// any frame rate conversion first and then interlaces the frames. When you -// choose Optimized interlacing and you set your output frame rate to a value -// that isn't suitable for optimized interlacing, MediaConvert automatically -// falls back to basic interlacing. Required settings: To use optimized interlacing, -// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't -// use optimized interlacing for hard telecine outputs. You must also set Interlace -// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). +// to create a better quality interlaced output. In this case, each progressive +// frame from the input corresponds to an interlaced field in the output. Keep +// the default value, Basic interlacing, for all other output frame rates. With +// basic interlacing, MediaConvert performs any frame rate conversion first +// and then interlaces the frames. When you choose Optimized interlacing and +// you set your output frame rate to a value that isn't suitable for optimized +// interlacing, MediaConvert automatically falls back to basic interlacing. +// Required settings: To use optimized interlacing, you must set Telecine to +// None or Soft. You can't use optimized interlacing for hard telecine outputs. +// You must also set Interlace mode to a value other than Progressive. const ( // H265ScanTypeConversionModeInterlaced is a H265ScanTypeConversionMode enum value H265ScanTypeConversionModeInterlaced = "INTERLACED" @@ -32750,9 +32284,8 @@ func H265ScanTypeConversionMode_Values() []string { // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. -// If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) -// for further video quality improvement. For more information about QVBR, see -// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. +// If this output uses QVBR, choose Transition detection for further video quality +// improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. const ( // H265SceneChangeDetectDisabled is a H265SceneChangeDetect enum value H265SceneChangeDetectDisabled = "DISABLED" @@ -32778,9 +32311,7 @@ func H265SceneChangeDetect_Values() []string { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: -// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) -// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to -// 1. +// You must also set Framerate to 25. const ( // H265SlowPalDisabled is a H265SlowPal enum value H265SlowPalDisabled = "DISABLED" @@ -32797,21 +32328,20 @@ func H265SlowPal_Values() []string { } } -// Keep the default value, Enabled (ENABLED), to adjust quantization within -// each frame based on spatial variation of content complexity. When you enable -// this feature, the encoder uses fewer bits on areas that can sustain more -// distortion with no noticeable visual degradation and uses more bits on areas -// where any small distortion will be noticeable. For example, complex textured -// blocks are encoded with fewer bits and smooth textured blocks are encoded -// with more bits. Enabling this feature will almost always improve your video -// quality. Note, though, that this feature doesn't take into account where -// the viewer's attention is likely to be. If viewers are likely to be focusing -// their attention on a part of the screen with a lot of complex texture, you -// might choose to disable this feature. Related setting: When you enable spatial -// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) -// depending on your content. For homogeneous content, such as cartoons and -// video games, set it to Low. For content with a wider variety of textures, -// set it to High or Higher. +// Keep the default value, Enabled, to adjust quantization within each frame +// based on spatial variation of content complexity. When you enable this feature, +// the encoder uses fewer bits on areas that can sustain more distortion with +// no noticeable visual degradation and uses more bits on areas where any small +// distortion will be noticeable. For example, complex textured blocks are encoded +// with fewer bits and smooth textured blocks are encoded with more bits. Enabling +// this feature will almost always improve your video quality. Note, though, +// that this feature doesn't take into account where the viewer's attention +// is likely to be. If viewers are likely to be focusing their attention on +// a part of the screen with a lot of complex texture, you might choose to disable +// this feature. Related setting: When you enable spatial adaptive quantization, +// set the value for Adaptive quantization depending on your content. For homogeneous +// content, such as cartoons and video games, set it to Low. For content with +// a wider variety of textures, set it to High or Higher. const ( // H265SpatialAdaptiveQuantizationDisabled is a H265SpatialAdaptiveQuantization enum value H265SpatialAdaptiveQuantizationDisabled = "DISABLED" @@ -32828,13 +32358,12 @@ func H265SpatialAdaptiveQuantization_Values() []string { } } -// This field applies only if the Streams > Advanced > Framerate (framerate) -// field is set to 29.970. This field works with the Streams > Advanced > Preprocessors -// > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced -// Mode field (interlace_mode) to identify the scan type for the output: Progressive, -// Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output -// from 23.976 input. - Soft: produces 23.976; the player converts this output -// to 29.97i. +// This field applies only if the Streams > Advanced > Framerate field is set +// to 29.970. This field works with the Streams > Advanced > Preprocessors > +// Deinterlacer field and the Streams > Advanced > Interlaced Mode field to +// identify the scan type for the output: Progressive, Interlaced, Hard Telecine +// or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: +// produces 23.976; the player converts this output to 29.97i. const ( // H265TelecineNone is a H265Telecine enum value H265TelecineNone = "NONE" @@ -32855,19 +32384,18 @@ func H265Telecine_Values() []string { } } -// Keep the default value, Enabled (ENABLED), to adjust quantization within -// each frame based on temporal variation of content complexity. When you enable -// this feature, the encoder uses fewer bits on areas of the frame that aren't -// moving and uses more bits on complex objects with sharp edges that move a -// lot. For example, this feature improves the readability of text tickers on -// newscasts and scoreboards on sports matches. Enabling this feature will almost -// always improve your video quality. Note, though, that this feature doesn't -// take into account where the viewer's attention is likely to be. If viewers -// are likely to be focusing their attention on a part of the screen that doesn't -// have moving objects with sharp edges, such as sports athletes' faces, you -// might choose to disable this feature. Related setting: When you enable temporal -// quantization, adjust the strength of the filter with the setting Adaptive -// quantization (adaptiveQuantization). +// Keep the default value, Enabled, to adjust quantization within each frame +// based on temporal variation of content complexity. When you enable this feature, +// the encoder uses fewer bits on areas of the frame that aren't moving and +// uses more bits on complex objects with sharp edges that move a lot. For example, +// this feature improves the readability of text tickers on newscasts and scoreboards +// on sports matches. Enabling this feature will almost always improve your +// video quality. Note, though, that this feature doesn't take into account +// where the viewer's attention is likely to be. If viewers are likely to be +// focusing their attention on a part of the screen that doesn't have moving +// objects with sharp edges, such as sports athletes' faces, you might choose +// to disable this feature. Related setting: When you enable temporal quantization, +// adjust the strength of the filter with the setting Adaptive quantization. const ( // H265TemporalAdaptiveQuantizationDisabled is a H265TemporalAdaptiveQuantization enum value H265TemporalAdaptiveQuantizationDisabled = "DISABLED" @@ -33017,9 +32545,9 @@ func HlsAdMarkers_Values() []string { // Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream // (M2TS) to create a file in an MPEG2-TS container. Keep the default value -// Automatic (AUTOMATIC) to create a raw audio-only file with no container. -// Regardless of the value that you specify here, if this output has video, -// the service will place outputs into an MPEG2-TS container. +// Automatic to create a raw audio-only file with no container. Regardless of +// the value that you specify here, if this output has video, the service will +// place outputs into an MPEG2-TS container. const ( // HlsAudioOnlyContainerAutomatic is a HlsAudioOnlyContainer enum value HlsAudioOnlyContainerAutomatic = "AUTOMATIC" @@ -33037,9 +32565,9 @@ func HlsAudioOnlyContainer_Values() []string { } // Ignore this setting unless you are using FairPlay DRM with Verimatrix and -// you encounter playback issues. Keep the default value, Include (INCLUDE), -// to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only -// headers from your audio segments. +// you encounter playback issues. Keep the default value, Include, to output +// audio-only headers. Choose Exclude to remove the audio-only headers from +// your audio segments. const ( // HlsAudioOnlyHeaderInclude is a HlsAudioOnlyHeader enum value HlsAudioOnlyHeaderInclude = "INCLUDE" @@ -33120,11 +32648,10 @@ func HlsCaptionLanguageSetting_Values() []string { } } -// Set Caption segment length control (CaptionSegmentLengthControl) to Match -// video (MATCH_VIDEO) to create caption segments that align with the video -// segments from the first video output in this output group. For example, if -// the video segments are 2 seconds long, your WebVTT segments will also be -// 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) +// Set Caption segment length control to Match video to create caption segments +// that align with the video segments from the first video output in this output +// group. For example, if the video segments are 2 seconds long, your WebVTT +// segments will also be 2 seconds long. Keep the default setting, Large segments // to create caption segments that are 300 seconds long. const ( // HlsCaptionSegmentLengthControlLargeSegments is a HlsCaptionSegmentLengthControl enum value @@ -33143,9 +32670,8 @@ func HlsCaptionSegmentLengthControl_Values() []string { } // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no -// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching -// in your video distribution set up. For example, use the Cache-Control http -// header. +// tag. Otherwise, keep the default value Enabled and control caching in your +// video distribution set up. For example, use the Cache-Control http header. const ( // HlsClientCacheDisabled is a HlsClientCache enum value HlsClientCacheDisabled = "DISABLED" @@ -33181,12 +32707,11 @@ func HlsCodecSpecification_Values() []string { } // Specify whether to flag this audio track as descriptive video service (DVS) -// in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes +// in your HLS parent manifest. When you choose Flag, MediaConvert includes // the parameter CHARACTERISTICS="public.accessibility.describes-video" in the // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't -// flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can -// help with accessibility on Apple devices. For more information, see the Apple -// documentation. +// flag, MediaConvert leaves this parameter out. The DVS flag can help with +// accessibility on Apple devices. For more information, see the Apple documentation. const ( // HlsDescriptiveVideoServiceFlagDontFlag is a HlsDescriptiveVideoServiceFlag enum value HlsDescriptiveVideoServiceFlagDontFlag = "DONT_FLAG" @@ -33238,13 +32763,13 @@ func HlsEncryptionType_Values() []string { } } -// Choose Include (INCLUDE) to have MediaConvert generate a child manifest that -// lists only the I-frames for this rendition, in addition to your regular manifest +// Choose Include to have MediaConvert generate a child manifest that lists +// only the I-frames for this rendition, in addition to your regular manifest // for this rendition. You might use this manifest as part of a workflow that // creates preview functions for your video. MediaConvert adds both the I-frame // only child manifest and the regular child manifest to the parent manifest. // When you don't need the I-frame only child manifest, keep the default value -// Exclude (EXCLUDE). +// Exclude. const ( // HlsIFrameOnlyManifestInclude is a HlsIFrameOnlyManifest enum value HlsIFrameOnlyManifestInclude = "INCLUDE" @@ -33262,14 +32787,13 @@ func HlsIFrameOnlyManifest_Values() []string { } // Specify whether MediaConvert generates images for trick play. Keep the default -// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) -// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) -// to generate tiled thumbnails and full-resolution images of single frames. -// MediaConvert creates a child manifest for each set of images that you generate -// and adds corresponding entries to the parent manifest. A common application -// for these images is Roku trick mode. The thumbnails and full-frame images -// that MediaConvert creates with this feature are compatible with this Roku -// specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md +// value, None, to not generate any images. Choose Thumbnail to generate tiled +// thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails +// and full-resolution images of single frames. MediaConvert creates a child +// manifest for each set of images that you generate and adds corresponding +// entries to the parent manifest. A common application for these images is +// Roku trick mode. The thumbnails and full-frame images that MediaConvert creates +// with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md const ( // HlsImageBasedTrickPlayNone is a HlsImageBasedTrickPlay enum value HlsImageBasedTrickPlayNone = "NONE" @@ -33487,10 +33011,10 @@ func HlsSegmentControl_Values() []string { } // Specify how you want MediaConvert to determine the segment length. Choose -// Exact (EXACT) to have the encoder use the exact length that you specify with -// the setting Segment length (SegmentLength). This might result in extra I-frames. -// Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment -// lengths to match the next GOP boundary. +// Exact to have the encoder use the exact length that you specify with the +// setting Segment length. This might result in extra I-frames. Choose Multiple +// of GOP to have the encoder round up the segment lengths to match the next +// GOP boundary. const ( // HlsSegmentLengthControlExact is a HlsSegmentLengthControl enum value HlsSegmentLengthControlExact = "EXACT" @@ -33549,11 +33073,9 @@ func HlsTargetDurationCompatibilityMode_Values() []string { } } -// Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 -// timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: -// Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to -// Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame -// type to None (NONE). +// Specify the type of the ID3 frame to use for ID3 timestamps in your output. +// To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. +// To exclude ID3 timestamps: Set ID3 timestamp frame type to None. const ( // HlsTimedMetadataId3FrameNone is a HlsTimedMetadataId3Frame enum value HlsTimedMetadataId3FrameNone = "NONE" @@ -33620,9 +33142,8 @@ func ImscStylePassthrough_Values() []string { } } -// Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. -// Default is disabled. Only manually controllable for MPEG2 and uncompressed -// video inputs. +// Enable Deblock to produce smoother motion in the output. Default is disabled. +// Only manually controllable for MPEG2 and uncompressed video inputs. const ( // InputDeblockFilterEnabled is a InputDeblockFilter enum value InputDeblockFilterEnabled = "ENABLED" @@ -33639,9 +33160,8 @@ func InputDeblockFilter_Values() []string { } } -// Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default -// is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video -// inputs. +// Enable Denoise to filter noise from the input. Default is disabled. Only +// applicable to MPEG2, H.264, H.265, and uncompressed video inputs. const ( // InputDenoiseFilterEnabled is a InputDenoiseFilter enum value InputDenoiseFilterEnabled = "ENABLED" @@ -33701,9 +33221,9 @@ func InputPolicy_Values() []string { } } -// Set PSI control (InputPsiControl) for transport stream inputs to specify -// which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio -// and video. * Use PSI - Scan only PSI data. +// Set PSI control for transport stream inputs to specify which data the demux +// process to scans.* Ignore PSI - Scan all PIDs for audio and video.* Use PSI +// - Scan only PSI data. const ( // InputPsiControlIgnorePsi is a InputPsiControl enum value InputPsiControlIgnorePsi = "IGNORE_PSI" @@ -33720,16 +33240,15 @@ func InputPsiControl_Values() []string { } } -// Use Rotate (InputRotate) to specify how the service rotates your video. You -// can choose automatic rotation or specify a rotation. You can specify a clockwise -// rotation of 0, 90, 180, or 270 degrees. If your input video container is -// .mov or .mp4 and your input has rotation metadata, you can choose Automatic -// to have the service rotate your video according to the rotation specified -// in the metadata. The rotation must be within one degree of 90, 180, or 270 -// degrees. If the rotation metadata specifies any other rotation, the service -// will default to no rotation. By default, the service does no rotation, even -// if your input video has rotation metadata. The service doesn't pass through -// rotation metadata. +// Use Rotate to specify how the service rotates your video. You can choose +// automatic rotation or specify a rotation. You can specify a clockwise rotation +// of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 +// and your input has rotation metadata, you can choose Automatic to have the +// service rotate your video according to the rotation specified in the metadata. +// The rotation must be within one degree of 90, 180, or 270 degrees. If the +// rotation metadata specifies any other rotation, the service will default +// to no rotation. By default, the service does no rotation, even if your input +// video has rotation metadata. The service doesn't pass through rotation metadata. const ( // InputRotateDegree0 is a InputRotate enum value InputRotateDegree0 = "DEGREE_0" @@ -33759,14 +33278,13 @@ func InputRotate_Values() []string { } // If the sample range metadata in your input video is accurate, or if you don't -// know about sample range, keep the default value, Follow (FOLLOW), for this -// setting. When you do, the service automatically detects your input sample -// range. If your input video has metadata indicating the wrong sample range, -// specify the accurate sample range here. When you do, MediaConvert ignores -// any sample range information in the input metadata. Regardless of whether -// MediaConvert uses the input sample range or the sample range that you specify, -// MediaConvert uses the sample range for transcoding and also writes it to -// the output metadata. +// know about sample range, keep the default value, Follow, for this setting. +// When you do, the service automatically detects your input sample range. If +// your input video has metadata indicating the wrong sample range, specify +// the accurate sample range here. When you do, MediaConvert ignores any sample +// range information in the input metadata. Regardless of whether MediaConvert +// uses the input sample range or the sample range that you specify, MediaConvert +// uses the sample range for transcoding and also writes it to the output metadata. const ( // InputSampleRangeFollow is a InputSampleRange enum value InputSampleRangeFollow = "FOLLOW" @@ -33791,9 +33309,9 @@ func InputSampleRange_Values() []string { // to flag the input as PsF. MediaConvert doesn't automatically detect PsF. // Therefore, flagging your input as PsF results in better preservation of video // quality when you do deinterlacing and frame rate conversion. If you don't -// specify, the default value is Auto (AUTO). Auto is the correct setting for -// all inputs that are not PsF. Don't set this value to PsF when your input -// is interlaced. Doing so creates horizontal interlacing artifacts. +// specify, the default value is Auto. Auto is the correct setting for all inputs +// that are not PsF. Don't set this value to PsF when your input is interlaced. +// Doing so creates horizontal interlacing artifacts. const ( // InputScanTypeAuto is a InputScanType enum value InputScanTypeAuto = "AUTO" @@ -33810,16 +33328,15 @@ func InputScanType_Values() []string { } } -// Use this Timecode source setting, located under the input settings (InputTimecodeSource), -// to specify how the service counts input video frames. This input frame count -// affects only the behavior of features that apply to a single input at a time, -// such as input clipping and synchronizing some captions formats. Choose Embedded -// (EMBEDDED) to use the timecodes in your input video. Choose Start at zero -// (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) -// to start the first frame at the timecode that you specify in the setting -// Start timecode (timecodeStart). If you don't specify a value for Timecode -// source, the service will use Embedded by default. For more information about -// timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. +// Use this Timecode source setting, located under the input settings, to specify +// how the service counts input video frames. This input frame count affects +// only the behavior of features that apply to a single input at a time, such +// as input clipping and synchronizing some captions formats. Choose Embedded +// to use the timecodes in your input video. Choose Start at zero to start the +// first frame at zero. Choose Specified start to start the first frame at the +// timecode that you specify in the setting Start timecode. If you don't specify +// a value for Timecode source, the service will use Embedded by default. For +// more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. const ( // InputTimecodeSourceEmbedded is a InputTimecodeSource enum value InputTimecodeSourceEmbedded = "EMBEDDED" @@ -34709,17 +34226,17 @@ func M2tsAudioBufferModel_Values() []string { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences -// between video and audio. For this situation, choose Match video duration -// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default -// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, -// MediaConvert pads the output audio streams with silence or trims them to -// ensure that the total duration of each audio stream is at least as long as -// the total duration of the video stream. After padding or trimming, the audio -// stream duration is no more than one frame longer than the video stream. MediaConvert -// applies audio padding or trimming only to the end of the last segment of -// the output. For unsegmented outputs, MediaConvert adds padding only to the -// end of the file. When you keep the default value, any minor discrepancies -// between audio and video duration will depend on your output audio codec. +// between video and audio. For this situation, choose Match video duration. +// In all other cases, keep the default value, Default codec duration. When +// you choose Match video duration, MediaConvert pads the output audio streams +// with silence or trims them to ensure that the total duration of each audio +// stream is at least as long as the total duration of the video stream. After +// padding or trimming, the audio stream duration is no more than one frame +// longer than the video stream. MediaConvert applies audio padding or trimming +// only to the end of the last segment of the output. For unsegmented outputs, +// MediaConvert adds padding only to the end of the file. When you keep the +// default value, any minor discrepancies between audio and video duration will +// depend on your output audio codec. const ( // M2tsAudioDurationDefaultCodecDuration is a M2tsAudioDuration enum value M2tsAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" @@ -34758,7 +34275,7 @@ func M2tsBufferModel_Values() []string { // If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets // with Presentation Timestamp (PTS) values greater than or equal to the first // video packet PTS (MediaConvert drops captions and data packets with lesser -// PTS values). Keep the default value (AUTO) to allow all PTS values. +// PTS values). Keep the default value to allow all PTS values. const ( // M2tsDataPtsControlAuto is a M2tsDataPtsControl enum value M2tsDataPtsControlAuto = "AUTO" @@ -34834,9 +34351,9 @@ func M2tsEsRateInPes_Values() []string { } } -// Keep the default value (DEFAULT) unless you know that your audio EBP markers -// are incorrectly appearing before your video EBP markers. To correct this -// problem, set this value to Force (FORCE). +// Keep the default value unless you know that your audio EBP markers are incorrectly +// appearing before your video EBP markers. To correct this problem, set this +// value to Force. const ( // M2tsForceTsVideoEbpOrderForce is a M2tsForceTsVideoEbpOrder enum value M2tsForceTsVideoEbpOrderForce = "FORCE" @@ -34929,12 +34446,12 @@ func M2tsRateMode_Values() []string { } } -// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if -// you want SCTE-35 markers that appear in your input to also appear in this -// output. Choose None (NONE) if you don't want SCTE-35 markers in this output. -// For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also -// provide the ESAM XML as a string in the setting Signal processing notification -// XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam). +// For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 +// markers that appear in your input to also appear in this output. Choose None +// if you don't want SCTE-35 markers in this output. For SCTE-35 markers from +// an ESAM XML document-- Choose None. Also provide the ESAM XML as a string +// in the setting Signal processing notification XML. Also enable ESAM SCTE-35 +// (include the property scte35Esam). const ( // M2tsScte35SourcePassthrough is a M2tsScte35Source enum value M2tsScte35SourcePassthrough = "PASSTHROUGH" @@ -35019,17 +34536,17 @@ func M2tsSegmentationStyle_Values() []string { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences -// between video and audio. For this situation, choose Match video duration -// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default -// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, -// MediaConvert pads the output audio streams with silence or trims them to -// ensure that the total duration of each audio stream is at least as long as -// the total duration of the video stream. After padding or trimming, the audio -// stream duration is no more than one frame longer than the video stream. MediaConvert -// applies audio padding or trimming only to the end of the last segment of -// the output. For unsegmented outputs, MediaConvert adds padding only to the -// end of the file. When you keep the default value, any minor discrepancies -// between audio and video duration will depend on your output audio codec. +// between video and audio. For this situation, choose Match video duration. +// In all other cases, keep the default value, Default codec duration. When +// you choose Match video duration, MediaConvert pads the output audio streams +// with silence or trims them to ensure that the total duration of each audio +// stream is at least as long as the total duration of the video stream. After +// padding or trimming, the audio stream duration is no more than one frame +// longer than the video stream. MediaConvert applies audio padding or trimming +// only to the end of the last segment of the output. For unsegmented outputs, +// MediaConvert adds padding only to the end of the file. When you keep the +// default value, any minor discrepancies between audio and video duration will +// depend on your output audio codec. const ( // M3u8AudioDurationDefaultCodecDuration is a M3u8AudioDuration enum value M3u8AudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" @@ -35049,7 +34566,7 @@ func M3u8AudioDuration_Values() []string { // If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets // with Presentation Timestamp (PTS) values greater than or equal to the first // video packet PTS (MediaConvert drops captions and data packets with lesser -// PTS values). Keep the default value (AUTO) to allow all PTS values. +// PTS values). Keep the default value AUTO to allow all PTS values. const ( // M3u8DataPtsControlAuto is a M3u8DataPtsControl enum value M3u8DataPtsControlAuto = "AUTO" @@ -35103,14 +34620,13 @@ func M3u8PcrControl_Values() []string { } } -// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if -// you want SCTE-35 markers that appear in your input to also appear in this -// output. Choose None (NONE) if you don't want SCTE-35 markers in this output. -// For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you -// don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose -// Ad markers (adMarkers) if you do want manifest conditioning. In both cases, -// also provide the ESAM XML as a string in the setting Signal processing notification -// XML (sccXml). +// For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 +// markers that appear in your input to also appear in this output. Choose None +// if you don't want SCTE-35 markers in this output. For SCTE-35 markers from +// an ESAM XML document-- Choose None if you don't want manifest conditioning. +// Choose Passthrough and choose Ad markers if you do want manifest conditioning. +// In both cases, also provide the ESAM XML as a string in the setting Signal +// processing notification XML. const ( // M3u8Scte35SourcePassthrough is a M3u8Scte35Source enum value M3u8Scte35SourcePassthrough = "PASSTHROUGH" @@ -35332,12 +34848,11 @@ func Mp4MoovPlacement_Values() []string { } } -// Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH -// manifest with elements for embedded 608 captions. This markup isn't generally -// required, but some video players require it to discover and play embedded -// 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements -// out. When you enable this setting, this is the markup that MediaConvert includes -// in your manifest: +// Optional. Choose Include to have MediaConvert mark up your DASH manifest +// with elements for embedded 608 captions. This markup isn't generally required, +// but some video players require it to discover and play embedded 608 captions. +// Keep the default value, Exclude, to leave these elements out. When you enable +// this setting, this is the markup that MediaConvert includes in your manifest: const ( // MpdAccessibilityCaptionHintsInclude is a MpdAccessibilityCaptionHints enum value MpdAccessibilityCaptionHintsInclude = "INCLUDE" @@ -35356,17 +34871,17 @@ func MpdAccessibilityCaptionHints_Values() []string { // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences -// between video and audio. For this situation, choose Match video duration -// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default -// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, -// MediaConvert pads the output audio streams with silence or trims them to -// ensure that the total duration of each audio stream is at least as long as -// the total duration of the video stream. After padding or trimming, the audio -// stream duration is no more than one frame longer than the video stream. MediaConvert -// applies audio padding or trimming only to the end of the last segment of -// the output. For unsegmented outputs, MediaConvert adds padding only to the -// end of the file. When you keep the default value, any minor discrepancies -// between audio and video duration will depend on your output audio codec. +// between video and audio. For this situation, choose Match video duration. +// In all other cases, keep the default value, Default codec duration. When +// you choose Match video duration, MediaConvert pads the output audio streams +// with silence or trims them to ensure that the total duration of each audio +// stream is at least as long as the total duration of the video stream. After +// padding or trimming, the audio stream duration is no more than one frame +// longer than the video stream. MediaConvert applies audio padding or trimming +// only to the end of the last segment of the output. For unsegmented outputs, +// MediaConvert adds padding only to the end of the file. When you keep the +// default value, any minor discrepancies between audio and video duration will +// depend on your output audio codec. const ( // MpdAudioDurationDefaultCodecDuration is a MpdAudioDuration enum value MpdAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" @@ -35385,10 +34900,10 @@ func MpdAudioDuration_Values() []string { // Use this setting only in DASH output groups that include sidecar TTML or // IMSC captions. You specify sidecar captions in a separate output from your -// audio and video. Choose Raw (RAW) for captions in a single XML file in a -// raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in -// XML format contained within fragmented MP4 files. This set of fragmented -// MP4 files is separate from your video and audio fragmented MP4 files. +// audio and video. Choose Raw for captions in a single XML file in a raw container. +// Choose Fragmented MPEG-4 for captions in XML format contained within fragmented +// MP4 files. This set of fragmented MP4 files is separate from your video and +// audio fragmented MP4 files. const ( // MpdCaptionContainerTypeRaw is a MpdCaptionContainerType enum value MpdCaptionContainerTypeRaw = "RAW" @@ -35434,7 +34949,7 @@ func MpdKlvMetadata_Values() []string { // To leave these elements out of your output MPD manifest, set Manifest metadata // signaling to Disabled. To enable Manifest metadata signaling, you must also // set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata -// (TimedMetadata) to Passthrough. +// to Passthrough. const ( // MpdManifestMetadataSignalingEnabled is a MpdManifestMetadataSignaling enum value MpdManifestMetadataSignalingEnabled = "ENABLED" @@ -35454,7 +34969,7 @@ func MpdManifestMetadataSignaling_Values() []string { // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting -// SCC XML (sccXml). +// SCC XML. const ( // MpdScte35EsamInsert is a MpdScte35Esam enum value MpdScte35EsamInsert = "INSERT" @@ -35472,9 +34987,9 @@ func MpdScte35Esam_Values() []string { } // Ignore this setting unless you have SCTE-35 markers in your input video file. -// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear -// in your input to also appear in this output. Choose None (NONE) if you don't -// want those SCTE-35 markers in this output. +// Choose Passthrough if you want SCTE-35 markers that appear in your input +// to also appear in this output. Choose None if you don't want those SCTE-35 +// markers in this output. const ( // MpdScte35SourcePassthrough is a MpdScte35Source enum value MpdScte35SourcePassthrough = "PASSTHROUGH" @@ -35491,11 +35006,10 @@ func MpdScte35Source_Values() []string { } } -// To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) -// to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata -// inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 -// metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: -// Set ID3 metadata to None (NONE) or leave blank. +// To include ID3 metadata in this output: Set ID3 metadata to Passthrough. +// Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes +// each instance of ID3 metadata in a separate Event Message (eMSG) box. To +// exclude this ID3 metadata: Set ID3 metadata to None or leave blank. const ( // MpdTimedMetadataPassthrough is a MpdTimedMetadata enum value MpdTimedMetadataPassthrough = "PASSTHROUGH" @@ -35515,7 +35029,7 @@ func MpdTimedMetadata_Values() []string { // Specify the event message box (eMSG) version for ID3 timed metadata in your // output.For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 // Syntax.Leave blank to use the default value Version 0.When you specify Version -// 1, you must also set ID3 metadata (timedMetadata) to Passthrough. +// 1, you must also set ID3 metadata to Passthrough. const ( // MpdTimedMetadataBoxVersionVersion0 is a MpdTimedMetadataBoxVersion enum value MpdTimedMetadataBoxVersionVersion0 = "VERSION_0" @@ -35534,8 +35048,7 @@ func MpdTimedMetadataBoxVersion_Values() []string { // Specify the strength of any adaptive quantization filters that you enable. // The value that you choose here applies to the following settings: Spatial -// adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive -// quantization (temporalAdaptiveQuantization). +// adaptive quantization, and Temporal adaptive quantization. const ( // Mpeg2AdaptiveQuantizationOff is a Mpeg2AdaptiveQuantization enum value Mpeg2AdaptiveQuantizationOff = "OFF" @@ -35560,7 +35073,7 @@ func Mpeg2AdaptiveQuantization_Values() []string { } } -// Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. +// Use Level to set the MPEG-2 level for the video output. const ( // Mpeg2CodecLevelAuto is a Mpeg2CodecLevel enum value Mpeg2CodecLevelAuto = "AUTO" @@ -35589,7 +35102,7 @@ func Mpeg2CodecLevel_Values() []string { } } -// Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. +// Use Profile to set the MPEG-2 profile for the video output. const ( // Mpeg2CodecProfileMain is a Mpeg2CodecProfile enum value Mpeg2CodecProfileMain = "MAIN" @@ -35610,7 +35123,7 @@ func Mpeg2CodecProfile_Values() []string { // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the -// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). +// value you provide for the setting B frames between reference frames. const ( // Mpeg2DynamicSubGopAdaptive is a Mpeg2DynamicSubGop enum value Mpeg2DynamicSubGopAdaptive = "ADAPTIVE" @@ -35632,12 +35145,7 @@ func Mpeg2DynamicSubGop_Values() []string { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Custom, specify your frame rate as a fraction. const ( // Mpeg2FramerateControlInitializeFromSource is a Mpeg2FramerateControl enum value Mpeg2FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -35685,8 +35193,8 @@ func Mpeg2FramerateConversionAlgorithm_Values() []string { } } -// Specify the units for GOP size (GopSize). If you don't specify a value here, -// by default the encoder measures GOP size in frames. +// Specify the units for GOP size. If you don't specify a value here, by default +// the encoder measures GOP size in frames. const ( // Mpeg2GopSizeUnitsFrames is a Mpeg2GopSizeUnits enum value Mpeg2GopSizeUnitsFrames = "FRAMES" @@ -35704,17 +35212,16 @@ func Mpeg2GopSizeUnits_Values() []string { } // Choose the scan line type for the output. Keep the default value, Progressive -// (PROGRESSIVE) to create a progressive output, regardless of the scan type -// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) -// to create an output that's interlaced with the same field polarity throughout. -// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) -// to produce outputs with the same field polarity as the source. For jobs that -// have multiple inputs, the output field polarity might change over the course -// of the output. Follow behavior depends on the input scan type. If the source -// is interlaced, the output will be interlaced with the same polarity as the -// source. If the source is progressive, the output will be interlaced with -// top field bottom field first, depending on which of the Follow options you -// choose. +// to create a progressive output, regardless of the scan type of your input. +// Use Top field first or Bottom field first to create an output that's interlaced +// with the same field polarity throughout. Use Follow, default top or Follow, +// default bottom to produce outputs with the same field polarity as the source. +// For jobs that have multiple inputs, the output field polarity might change +// over the course of the output. Follow behavior depends on the input scan +// type. If the source is interlaced, the output will be interlaced with the +// same polarity as the source. If the source is progressive, the output will +// be interlaced with top field bottom field first, depending on which of the +// Follow options you choose. const ( // Mpeg2InterlaceModeProgressive is a Mpeg2InterlaceMode enum value Mpeg2InterlaceModeProgressive = "PROGRESSIVE" @@ -35743,10 +35250,9 @@ func Mpeg2InterlaceMode_Values() []string { } } -// Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision -// for intra-block DC coefficients. If you choose the value auto, the service -// will automatically select the precision based on the per-frame compression -// ratio. +// Use Intra DC precision to set quantization precision for intra-block DC coefficients. +// If you choose the value auto, the service will automatically select the precision +// based on the per-frame compression ratio. const ( // Mpeg2IntraDcPrecisionAuto is a Mpeg2IntraDcPrecision enum value Mpeg2IntraDcPrecisionAuto = "AUTO" @@ -35776,12 +35282,10 @@ func Mpeg2IntraDcPrecision_Values() []string { } // Optional. Specify how the service determines the pixel aspect ratio (PAR) -// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), -// uses the PAR from your input video for your output. To specify a different -// PAR in the console, choose any value other than Follow source. To specify -// a different PAR by editing the JSON job specification, choose SPECIFIED. -// When you choose SPECIFIED for this setting, you must also specify values -// for the parNumerator and parDenominator settings. +// for this output. The default behavior, Follow source, uses the PAR from your +// input video for your output. To specify a different PAR in the console, choose +// any value other than Follow source. When you choose SPECIFIED for this setting, +// you must also specify values for the parNumerator and parDenominator settings. const ( // Mpeg2ParControlInitializeFromSource is a Mpeg2ParControl enum value Mpeg2ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -35798,9 +35302,9 @@ func Mpeg2ParControl_Values() []string { } } -// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you -// want to trade off encoding speed for output video quality. The default behavior -// is faster, lower quality, single-pass encoding. +// Optional. Use Quality tuning level to choose how you want to trade off encoding +// speed for output video quality. The default behavior is faster, lower quality, +// single-pass encoding. const ( // Mpeg2QualityTuningLevelSinglePass is a Mpeg2QualityTuningLevel enum value Mpeg2QualityTuningLevelSinglePass = "SINGLE_PASS" @@ -35817,8 +35321,8 @@ func Mpeg2QualityTuningLevel_Values() []string { } } -// Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate -// is variable (vbr) or constant (cbr). +// Use Rate control mode to specify whether the bitrate is variable (vbr) or +// constant (cbr). const ( // Mpeg2RateControlModeVbr is a Mpeg2RateControlMode enum value Mpeg2RateControlModeVbr = "VBR" @@ -35837,17 +35341,16 @@ func Mpeg2RateControlMode_Values() []string { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing -// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this -// case, each progressive frame from the input corresponds to an interlaced -// field in the output. Keep the default value, Basic interlacing (INTERLACED), -// for all other output frame rates. With basic interlacing, MediaConvert performs -// any frame rate conversion first and then interlaces the frames. When you -// choose Optimized interlacing and you set your output frame rate to a value -// that isn't suitable for optimized interlacing, MediaConvert automatically -// falls back to basic interlacing. Required settings: To use optimized interlacing, -// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't -// use optimized interlacing for hard telecine outputs. You must also set Interlace -// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). +// to create a better quality interlaced output. In this case, each progressive +// frame from the input corresponds to an interlaced field in the output. Keep +// the default value, Basic interlacing, for all other output frame rates. With +// basic interlacing, MediaConvert performs any frame rate conversion first +// and then interlaces the frames. When you choose Optimized interlacing and +// you set your output frame rate to a value that isn't suitable for optimized +// interlacing, MediaConvert automatically falls back to basic interlacing. +// Required settings: To use optimized interlacing, you must set Telecine to +// None or Soft. You can't use optimized interlacing for hard telecine outputs. +// You must also set Interlace mode to a value other than Progressive. const ( // Mpeg2ScanTypeConversionModeInterlaced is a Mpeg2ScanTypeConversionMode enum value Mpeg2ScanTypeConversionModeInterlaced = "INTERLACED" @@ -35887,9 +35390,7 @@ func Mpeg2SceneChangeDetect_Values() []string { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: -// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) -// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to -// 1. +// You must also set Framerate to 25. const ( // Mpeg2SlowPalDisabled is a Mpeg2SlowPal enum value Mpeg2SlowPalDisabled = "DISABLED" @@ -35906,21 +35407,20 @@ func Mpeg2SlowPal_Values() []string { } } -// Keep the default value, Enabled (ENABLED), to adjust quantization within -// each frame based on spatial variation of content complexity. When you enable -// this feature, the encoder uses fewer bits on areas that can sustain more -// distortion with no noticeable visual degradation and uses more bits on areas -// where any small distortion will be noticeable. For example, complex textured -// blocks are encoded with fewer bits and smooth textured blocks are encoded -// with more bits. Enabling this feature will almost always improve your video -// quality. Note, though, that this feature doesn't take into account where -// the viewer's attention is likely to be. If viewers are likely to be focusing -// their attention on a part of the screen with a lot of complex texture, you -// might choose to disable this feature. Related setting: When you enable spatial -// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) -// depending on your content. For homogeneous content, such as cartoons and -// video games, set it to Low. For content with a wider variety of textures, -// set it to High or Higher. +// Keep the default value, Enabled, to adjust quantization within each frame +// based on spatial variation of content complexity. When you enable this feature, +// the encoder uses fewer bits on areas that can sustain more distortion with +// no noticeable visual degradation and uses more bits on areas where any small +// distortion will be noticeable. For example, complex textured blocks are encoded +// with fewer bits and smooth textured blocks are encoded with more bits. Enabling +// this feature will almost always improve your video quality. Note, though, +// that this feature doesn't take into account where the viewer's attention +// is likely to be. If viewers are likely to be focusing their attention on +// a part of the screen with a lot of complex texture, you might choose to disable +// this feature. Related setting: When you enable spatial adaptive quantization, +// set the value for Adaptive quantization depending on your content. For homogeneous +// content, such as cartoons and video games, set it to Low. For content with +// a wider variety of textures, set it to High or Higher. const ( // Mpeg2SpatialAdaptiveQuantizationDisabled is a Mpeg2SpatialAdaptiveQuantization enum value Mpeg2SpatialAdaptiveQuantizationDisabled = "DISABLED" @@ -35938,8 +35438,8 @@ func Mpeg2SpatialAdaptiveQuantization_Values() []string { } // Specify whether this output's video uses the D10 syntax. Keep the default -// value to not use the syntax. Related settings: When you choose D10 (D_10) -// for your MXF profile (profile), you must also set this value to D10 (D_10). +// value to not use the syntax. Related settings: When you choose D10 for your +// MXF profile, you must also set this value to D10. const ( // Mpeg2SyntaxDefault is a Mpeg2Syntax enum value Mpeg2SyntaxDefault = "DEFAULT" @@ -35958,12 +35458,12 @@ func Mpeg2Syntax_Values() []string { // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable -// hard or soft telecine to create a smoother picture. Hard telecine (HARD) -// produces a 29.97i output. Soft telecine (SOFT) produces an output with a -// 23.976 output that signals to the video player device to do the conversion -// during play back. When you keep the default value, None (NONE), MediaConvert -// does a standard frame rate conversion to 29.97 without doing anything with -// the field polarity to create a smoother picture. +// hard or soft telecine to create a smoother picture. Hard telecine produces +// a 29.97i output. Soft telecine produces an output with a 23.976 output that +// signals to the video player device to do the conversion during play back. +// When you keep the default value, None, MediaConvert does a standard frame +// rate conversion to 29.97 without doing anything with the field polarity to +// create a smoother picture. const ( // Mpeg2TelecineNone is a Mpeg2Telecine enum value Mpeg2TelecineNone = "NONE" @@ -35984,19 +35484,18 @@ func Mpeg2Telecine_Values() []string { } } -// Keep the default value, Enabled (ENABLED), to adjust quantization within -// each frame based on temporal variation of content complexity. When you enable -// this feature, the encoder uses fewer bits on areas of the frame that aren't -// moving and uses more bits on complex objects with sharp edges that move a -// lot. For example, this feature improves the readability of text tickers on -// newscasts and scoreboards on sports matches. Enabling this feature will almost -// always improve your video quality. Note, though, that this feature doesn't -// take into account where the viewer's attention is likely to be. If viewers -// are likely to be focusing their attention on a part of the screen that doesn't -// have moving objects with sharp edges, such as sports athletes' faces, you -// might choose to disable this feature. Related setting: When you enable temporal -// quantization, adjust the strength of the filter with the setting Adaptive -// quantization (adaptiveQuantization). +// Keep the default value, Enabled, to adjust quantization within each frame +// based on temporal variation of content complexity. When you enable this feature, +// the encoder uses fewer bits on areas of the frame that aren't moving and +// uses more bits on complex objects with sharp edges that move a lot. For example, +// this feature improves the readability of text tickers on newscasts and scoreboards +// on sports matches. Enabling this feature will almost always improve your +// video quality. Note, though, that this feature doesn't take into account +// where the viewer's attention is likely to be. If viewers are likely to be +// focusing their attention on a part of the screen that doesn't have moving +// objects with sharp edges, such as sports athletes' faces, you might choose +// to disable this feature. Related setting: When you enable temporal quantization, +// adjust the strength of the filter with the setting Adaptive quantization. const ( // Mpeg2TemporalAdaptiveQuantizationDisabled is a Mpeg2TemporalAdaptiveQuantization enum value Mpeg2TemporalAdaptiveQuantizationDisabled = "DISABLED" @@ -36032,10 +35531,10 @@ func MsSmoothAudioDeduplication_Values() []string { } // Specify how you want MediaConvert to determine the fragment length. Choose -// Exact (EXACT) to have the encoder use the exact length that you specify with -// the setting Fragment length (FragmentLength). This might result in extra -// I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round -// up the segment lengths to match the next GOP boundary. +// Exact to have the encoder use the exact length that you specify with the +// setting Fragment length. This might result in extra I-frames. Choose Multiple +// of GOP to have the encoder round up the segment lengths to match the next +// GOP boundary. const ( // MsSmoothFragmentLengthControlExact is a MsSmoothFragmentLengthControl enum value MsSmoothFragmentLengthControlExact = "EXACT" @@ -36052,8 +35551,8 @@ func MsSmoothFragmentLengthControl_Values() []string { } } -// Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding -// format for the server and client manifest. Valid options are utf8 and utf16. +// Use Manifest encoding to specify the encoding format for the server and client +// manifest. Valid options are utf8 and utf16. const ( // MsSmoothManifestEncodingUtf8 is a MsSmoothManifestEncoding enum value MsSmoothManifestEncodingUtf8 = "UTF8" @@ -36072,13 +35571,12 @@ func MsSmoothManifestEncoding_Values() []string { // Optional. When you have AFD signaling set up in your output video stream, // use this setting to choose whether to also include it in the MXF wrapper. -// Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. -// Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from -// the video stream for this output to the MXF wrapper. Regardless of which -// option you choose, the AFD values remain in the video stream. Related settings: -// To set up your output to include or exclude AFD values, see AfdSignaling, -// under VideoDescription. On the console, find AFD signaling under the output's -// video encoding settings. +// Choose Don't copy to exclude AFD signaling from the MXF wrapper. Choose Copy +// from video stream to copy the AFD values from the video stream for this output +// to the MXF wrapper. Regardless of which option you choose, the AFD values +// remain in the video stream. Related settings: To set up your output to include +// or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, +// find AFD signaling under the output's video encoding settings. const ( // MxfAfdSignalingNoCopy is a MxfAfdSignaling enum value MxfAfdSignalingNoCopy = "NO_COPY" @@ -36128,11 +35626,11 @@ func MxfProfile_Values() []string { } // To create an output that complies with the XAVC file format guidelines for -// interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE). -// To include all frames from your input in this output, keep the default setting, -// Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert -// excludes when you set this to Drop frames for compliance depends on the output -// frame rate and duration. +// interoperability, keep the default value, Drop frames for compliance. To +// include all frames from your input in this output, keep the default setting, +// Allow any duration. The number of frames that MediaConvert excludes when +// you set this to Drop frames for compliance depends on the output frame rate +// and duration. const ( // MxfXavcDurationModeAllowAnyDuration is a MxfXavcDurationMode enum value MxfXavcDurationModeAllowAnyDuration = "ALLOW_ANY_DURATION" @@ -36150,10 +35648,9 @@ func MxfXavcDurationMode_Values() []string { } // Choose the type of Nielsen watermarks that you want in your outputs. When -// you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the -// setting SID (sourceId). When you choose CBET (CBET), you must provide a value -// for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET -// (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings. +// you choose NAES 2 and NW, you must provide a value for the setting SID. When +// you choose CBET, you must provide a value for the setting CSID. When you +// choose NAES 2, NW, and CBET, you must provide values for both of these settings. const ( // NielsenActiveWatermarkProcessTypeNaes2AndNw is a NielsenActiveWatermarkProcessType enum value NielsenActiveWatermarkProcessTypeNaes2AndNw = "NAES2_AND_NW" @@ -36175,9 +35672,9 @@ func NielsenActiveWatermarkProcessType_Values() []string { } // Required. Specify whether your source content already contains Nielsen non-linear -// watermarks. When you set this value to Watermarked (WATERMARKED), the service -// fails the job. Nielsen requires that you add non-linear watermarking to only -// clean content that doesn't already have non-linear Nielsen watermarks. +// watermarks. When you set this value to Watermarked, the service fails the +// job. Nielsen requires that you add non-linear watermarking to only clean +// content that doesn't already have non-linear Nielsen watermarks. const ( // NielsenSourceWatermarkStatusTypeClean is a NielsenSourceWatermarkStatusType enum value NielsenSourceWatermarkStatusTypeClean = "CLEAN" @@ -36195,8 +35692,8 @@ func NielsenSourceWatermarkStatusType_Values() []string { } // To create assets that have the same TIC values in each audio track, keep -// the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that -// have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK). +// the default value Share TICs. To create assets that have unique TIC values +// for each audio track, choose Use unique TICs. const ( // NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack is a NielsenUniqueTicPerAudioTrackType enum value NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack = "RESERVE_UNIQUE_TICS_PER_TRACK" @@ -36213,16 +35710,15 @@ func NielsenUniqueTicPerAudioTrackType_Values() []string { } } -// When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth -// and sharpness of your output is reduced. You can optionally use Post temporal -// sharpening (postTemporalSharpening) to apply sharpening to the edges of your -// output. Note that Post temporal sharpening will also make the bandwidth reduction -// from the Noise reducer smaller. The default behavior, Auto (AUTO), allows -// the transcoder to determine whether to apply sharpening, depending on your -// input type and quality. When you set Post temporal sharpening to Enabled -// (ENABLED), specify how much sharpening is applied using Post temporal sharpening -// strength (postTemporalSharpeningStrength). Set Post temporal sharpening to -// Disabled (DISABLED) to not apply sharpening. +// When you set Noise reducer to Temporal, the bandwidth and sharpness of your +// output is reduced. You can optionally use Post temporal sharpening to apply +// sharpening to the edges of your output. Note that Post temporal sharpening +// will also make the bandwidth reduction from the Noise reducer smaller. The +// default behavior, Auto, allows the transcoder to determine whether to apply +// sharpening, depending on your input type and quality. When you set Post temporal +// sharpening to Enabled, specify how much sharpening is applied using Post +// temporal sharpening strength. Set Post temporal sharpening to Disabled to +// not apply sharpening. const ( // NoiseFilterPostTemporalSharpeningDisabled is a NoiseFilterPostTemporalSharpening enum value NoiseFilterPostTemporalSharpeningDisabled = "DISABLED" @@ -36243,10 +35739,9 @@ func NoiseFilterPostTemporalSharpening_Values() []string { } } -// Use Post temporal sharpening strength (postTemporalSharpeningStrength) to -// define the amount of sharpening the transcoder applies to your output. Set -// Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High -// (HIGH) to indicate the amount of sharpening. +// Use Post temporal sharpening strength to define the amount of sharpening +// the transcoder applies to your output. Set Post temporal sharpening strength +// to Low, Medium, or High to indicate the amount of sharpening. const ( // NoiseFilterPostTemporalSharpeningStrengthLow is a NoiseFilterPostTemporalSharpeningStrength enum value NoiseFilterPostTemporalSharpeningStrengthLow = "LOW" @@ -36267,13 +35762,12 @@ func NoiseFilterPostTemporalSharpeningStrength_Values() []string { } } -// Use Noise reducer filter (NoiseReducerFilter) to select one of the following -// spatial image filtering functions. To use this setting, you must also enable -// Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing -// noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution -// filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain -// filtering based on JND principles. * Temporal optimizes video quality for -// complex motion. +// Use Noise reducer filter to select one of the following spatial image filtering +// functions. To use this setting, you must also enable Noise reducer. * Bilateral +// preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, +// and Sharpen (sharpest) do convolution filtering. * Conserve does min/max +// noise reduction. * Spatial does frequency-domain filtering based on JND principles. +// * Temporal optimizes video quality for complex motion. const ( // NoiseReducerFilterBilateral is a NoiseReducerFilter enum value NoiseReducerFilterBilateral = "BILATERAL" @@ -36395,11 +35889,10 @@ func OutputSdt_Values() []string { // Use this setting if your input has video and audio durations that don't align, // and your output or player has strict alignment requirements. Examples: Input // audio track has a delayed start. Input video track ends before audio ends. -// When you set Pad video (padVideo) to Black (BLACK), MediaConvert generates -// black video frames so that output video and audio durations match. Black -// video frames are added at the beginning or end, depending on your input. -// To keep the default behavior and not generate black video, set Pad video -// to Disabled (DISABLED) or leave blank. +// When you set Pad video to Black, MediaConvert generates black video frames +// so that output video and audio durations match. Black video frames are added +// at the beginning or end, depending on your input. To keep the default behavior +// and not generate black video, set Pad video to Disabled or leave blank. const ( // PadVideoDisabled is a PadVideo enum value PadVideoDisabled = "DISABLED" @@ -36462,14 +35955,13 @@ func PricingPlan_Values() []string { // This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that // you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 -// sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma -// sampling. You must specify a value for this setting when your output codec -// profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes -// outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when -// your input has 4:4:4 chroma sampling and your output codec Profile is Apple -// ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, -// you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, -// or Noise reducer. +// sampling to allow outputs to also use 4:4:4 chroma sampling. You must specify +// a value for this setting when your output codec profile supports 4:4:4 chroma +// sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: +// Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling +// and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that +// when you choose Preserve 4:4:4 sampling, you cannot include any of the following +// Preprocessors: Dolby Vision, HDR10+, or Noise reducer. const ( // ProresChromaSamplingPreserve444Sampling is a ProresChromaSampling enum value ProresChromaSamplingPreserve444Sampling = "PRESERVE_444_SAMPLING" @@ -36486,8 +35978,7 @@ func ProresChromaSampling_Values() []string { } } -// Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec -// to use for this output. +// Use Profile to specify the type of Apple ProRes codec to use for this output. const ( // ProresCodecProfileAppleProres422 is a ProresCodecProfile enum value ProresCodecProfileAppleProres422 = "APPLE_PRORES_422" @@ -36525,12 +36016,7 @@ func ProresCodecProfile_Values() []string { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Custom, specify your frame rate as a fraction. const ( // ProresFramerateControlInitializeFromSource is a ProresFramerateControl enum value ProresFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -36579,17 +36065,16 @@ func ProresFramerateConversionAlgorithm_Values() []string { } // Choose the scan line type for the output. Keep the default value, Progressive -// (PROGRESSIVE) to create a progressive output, regardless of the scan type -// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) -// to create an output that's interlaced with the same field polarity throughout. -// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) -// to produce outputs with the same field polarity as the source. For jobs that -// have multiple inputs, the output field polarity might change over the course -// of the output. Follow behavior depends on the input scan type. If the source -// is interlaced, the output will be interlaced with the same polarity as the -// source. If the source is progressive, the output will be interlaced with -// top field bottom field first, depending on which of the Follow options you -// choose. +// to create a progressive output, regardless of the scan type of your input. +// Use Top field first or Bottom field first to create an output that's interlaced +// with the same field polarity throughout. Use Follow, default top or Follow, +// default bottom to produce outputs with the same field polarity as the source. +// For jobs that have multiple inputs, the output field polarity might change +// over the course of the output. Follow behavior depends on the input scan +// type. If the source is interlaced, the output will be interlaced with the +// same polarity as the source. If the source is progressive, the output will +// be interlaced with top field bottom field first, depending on which of the +// Follow options you choose. const ( // ProresInterlaceModeProgressive is a ProresInterlaceMode enum value ProresInterlaceModeProgressive = "PROGRESSIVE" @@ -36619,12 +36104,10 @@ func ProresInterlaceMode_Values() []string { } // Optional. Specify how the service determines the pixel aspect ratio (PAR) -// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), -// uses the PAR from your input video for your output. To specify a different -// PAR in the console, choose any value other than Follow source. To specify -// a different PAR by editing the JSON job specification, choose SPECIFIED. -// When you choose SPECIFIED for this setting, you must also specify values -// for the parNumerator and parDenominator settings. +// for this output. The default behavior, Follow source, uses the PAR from your +// input video for your output. To specify a different PAR, choose any value +// other than Follow source. When you choose SPECIFIED for this setting, you +// must also specify values for the parNumerator and parDenominator settings. const ( // ProresParControlInitializeFromSource is a ProresParControl enum value ProresParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -36643,17 +36126,16 @@ func ProresParControl_Values() []string { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing -// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this -// case, each progressive frame from the input corresponds to an interlaced -// field in the output. Keep the default value, Basic interlacing (INTERLACED), -// for all other output frame rates. With basic interlacing, MediaConvert performs -// any frame rate conversion first and then interlaces the frames. When you -// choose Optimized interlacing and you set your output frame rate to a value -// that isn't suitable for optimized interlacing, MediaConvert automatically -// falls back to basic interlacing. Required settings: To use optimized interlacing, -// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't -// use optimized interlacing for hard telecine outputs. You must also set Interlace -// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). +// to create a better quality interlaced output. In this case, each progressive +// frame from the input corresponds to an interlaced field in the output. Keep +// the default value, Basic interlacing, for all other output frame rates. With +// basic interlacing, MediaConvert performs any frame rate conversion first +// and then interlaces the frames. When you choose Optimized interlacing and +// you set your output frame rate to a value that isn't suitable for optimized +// interlacing, MediaConvert automatically falls back to basic interlacing. +// Required settings: To use optimized interlacing, you must set Telecine to +// None or Soft. You can't use optimized interlacing for hard telecine outputs. +// You must also set Interlace mode to a value other than Progressive. const ( // ProresScanTypeConversionModeInterlaced is a ProresScanTypeConversionMode enum value ProresScanTypeConversionModeInterlaced = "INTERLACED" @@ -36675,9 +36157,7 @@ func ProresScanTypeConversionMode_Values() []string { // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: -// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) -// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to -// 1. +// You must also set Framerate to 25. const ( // ProresSlowPalDisabled is a ProresSlowPal enum value ProresSlowPalDisabled = "DISABLED" @@ -36696,10 +36176,9 @@ func ProresSlowPal_Values() []string { // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable -// hard telecine (HARD) to create a smoother picture. When you keep the default -// value, None (NONE), MediaConvert does a standard frame rate conversion to -// 29.97 without doing anything with the field polarity to create a smoother -// picture. +// hard telecine to create a smoother picture. When you keep the default value, +// None, MediaConvert does a standard frame rate conversion to 29.97 without +// doing anything with the field polarity to create a smoother picture. const ( // ProresTelecineNone is a ProresTelecine enum value ProresTelecineNone = "NONE" @@ -36806,14 +36285,13 @@ func ReservationPlanStatus_Values() []string { } } -// Use Respond to AFD (RespondToAfd) to specify how the service changes the -// video itself in response to AFD values in the input. * Choose Respond to -// clip the input video frame according to the AFD value, input display aspect -// ratio, and output display aspect ratio. * Choose Passthrough to include the -// input AFD values. Do not choose this when AfdSignaling is set to (NONE). -// A preferred implementation of this workflow is to set RespondToAfd to (NONE) -// and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values -// from this output. +// Use Respond to AFD to specify how the service changes the video itself in +// response to AFD values in the input. * Choose Respond to clip the input video +// frame according to the AFD value, input display aspect ratio, and output +// display aspect ratio. * Choose Passthrough to include the input AFD values. +// Do not choose this when AfdSignaling is set to NONE. A preferred implementation +// of this workflow is to set RespondToAfd to and set AfdSignaling to AUTO. +// * Choose None to remove all input AFD values from this output. const ( // RespondToAfdNone is a RespondToAfd enum value RespondToAfdNone = "NONE" @@ -36920,13 +36398,12 @@ func S3ObjectCannedAcl_Values() []string { // your content. AWS also encrypts the data keys themselves, using a customer // master key (CMK), and then stores the encrypted data keys alongside your // encrypted content. Use this setting to specify which AWS service manages -// the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). -// If you want your master key to be managed by AWS Key Management Service (KMS), -// choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose -// AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with -// Amazon S3 to encrypt your data keys. You can optionally choose to specify -// a different, customer managed CMK. Do so by specifying the Amazon Resource -// Name (ARN) of the key for the setting KMS ARN (kmsKeyArn). +// the CMK. For simplest set up, choose Amazon S3. If you want your master key +// to be managed by AWS Key Management Service (KMS), choose AWS KMS. By default, +// when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) +// associated with Amazon S3 to encrypt your data keys. You can optionally choose +// to specify a different, customer managed CMK. Do so by specifying the Amazon +// Resource Name (ARN) of the key for the setting KMS ARN. const ( // S3ServerSideEncryptionTypeServerSideEncryptionS3 is a S3ServerSideEncryptionType enum value S3ServerSideEncryptionTypeServerSideEncryptionS3 = "SERVER_SIDE_ENCRYPTION_S3" @@ -36978,11 +36455,10 @@ func SampleRangeConversion_Values() []string { } // Specify how the service handles outputs that have a different aspect ratio -// from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) -// to have the service stretch your video image to fit. Keep the setting Default -// (DEFAULT) to have the service letterbox your video instead. This setting -// overrides any value that you specify for the setting Selection placement -// (position) in this output. +// from the input aspect ratio. Choose Stretch to output to have the service +// stretch your video image to fit. Keep the setting Default to have the service +// letterbox your video instead. This setting overrides any value that you specify +// for the setting Selection placement in this output. const ( // ScalingBehaviorDefault is a ScalingBehavior enum value ScalingBehaviorDefault = "DEFAULT" @@ -36999,12 +36475,11 @@ func ScalingBehavior_Values() []string { } } -// Set Framerate (SccDestinationFramerate) to make sure that the captions and -// the video are synchronized in the output. Specify a frame rate that matches -// the frame rate of the associated video. If the video frame rate is 29.97, -// choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has -// video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 -// non-dropframe (FRAMERATE_29_97_NON_DROPFRAME). +// Set Framerate to make sure that the captions and the video are synchronized +// in the output. Specify a frame rate that matches the frame rate of the associated +// video. If the video frame rate is 29.97, choose 29.97 dropframe only if the +// video has video_insertion=true and drop_frame_timecode=true; otherwise, choose +// 29.97 non-dropframe. const ( // SccDestinationFramerateFramerate2397 is a SccDestinationFramerate enum value SccDestinationFramerateFramerate2397 = "FRAMERATE_23_97" @@ -37053,12 +36528,11 @@ func SimulateReservedQueue_Values() []string { } } -// Set Style passthrough (StylePassthrough) to ENABLED to use the available -// style, color, and position information from your input captions. MediaConvert -// uses default settings for any missing style and position information in your -// input captions. Set Style passthrough to DISABLED, or leave blank, to ignore -// the style and position information from your input captions and use simplified -// output captions. +// Set Style passthrough to ENABLED to use the available style, color, and position +// information from your input captions. MediaConvert uses default settings +// for any missing style and position information in your input captions. Set +// Style passthrough to DISABLED, or leave blank, to ignore the style and position +// information from your input captions and use simplified output captions. const ( // SrtStylePassthroughEnabled is a SrtStylePassthrough enum value SrtStylePassthroughEnabled = "ENABLED" @@ -37176,8 +36650,8 @@ func TeletextPageType_Values() []string { } } -// Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to -// specify the location the burned-in timecode on output video. +// Use Position under Timecode burn-in to specify the location the burned-in +// timecode on output video. const ( // TimecodeBurninPositionTopCenter is a TimecodeBurninPosition enum value TimecodeBurninPositionTopCenter = "TOP_CENTER" @@ -37222,17 +36696,15 @@ func TimecodeBurninPosition_Values() []string { } } -// Use Source (TimecodeSource) to set how timecodes are handled within this -// job. To make sure that your video, audio, captions, and markers are synchronized -// and that time-based features, such as image inserter, work correctly, choose -// the Timecode source option that matches your assets. All timecodes are in -// a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - -// Use the timecode that is in the input video. If no embedded timecode is in -// the source, the service will use Start at 0 (ZEROBASED) instead. * Start -// at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. -// * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame -// to a value other than zero. You use Start timecode (Start) to provide this -// value. +// Use Source to set how timecodes are handled within this job. To make sure +// that your video, audio, captions, and markers are synchronized and that time-based +// features, such as image inserter, work correctly, choose the Timecode source +// option that matches your assets. All timecodes are in a 24-hour format with +// frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the +// input video. If no embedded timecode is in the source, the service will use +// Start at 0 instead. * Start at 0 - Set the timecode of the initial frame +// to 00:00:00:00. * Specified Start - Set the timecode of the initial frame +// to a value other than zero. You use Start timecode to provide this value. const ( // TimecodeSourceEmbedded is a TimecodeSource enum value TimecodeSourceEmbedded = "EMBEDDED" @@ -37253,11 +36725,10 @@ func TimecodeSource_Values() []string { } } -// Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include -// ID3 metadata in this output. This includes ID3 metadata from the following -// features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata -// inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: -// set ID3 metadata to None (NONE) or leave blank. +// Set ID3 metadata to Passthrough to include ID3 metadata in this output. This +// includes ID3 metadata from the following features: ID3 timestamp period, +// and Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: +// set ID3 metadata to None or leave blank. const ( // TimedMetadataPassthrough is a TimedMetadata enum value TimedMetadataPassthrough = "PASSTHROUGH" @@ -37312,10 +36783,9 @@ func Type_Values() []string { // VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) // and Resolution (height and width), determine your output bitrate. For example, // say that your video resolution is 1920x1080 and your framerate is 29.97. -// Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately -// 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of -// approximately 220 Mbps. VC3 class also specifies the color bit depth of your -// output. +// Then Class 145 gives you an output with a bitrate of approximately 145 Mbps +// and Class 220 gives you and output with a bitrate of approximately 220 Mbps. +// VC3 class also specifies the color bit depth of your output. const ( // Vc3ClassClass1458bit is a Vc3Class enum value Vc3ClassClass1458bit = "CLASS_145_8BIT" @@ -37341,12 +36811,7 @@ func Vc3Class_Values() []string { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Custom, specify your frame rate as a fraction. const ( // Vc3FramerateControlInitializeFromSource is a Vc3FramerateControl enum value Vc3FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -37414,17 +36879,16 @@ func Vc3InterlaceMode_Values() []string { // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing -// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this -// case, each progressive frame from the input corresponds to an interlaced -// field in the output. Keep the default value, Basic interlacing (INTERLACED), -// for all other output frame rates. With basic interlacing, MediaConvert performs -// any frame rate conversion first and then interlaces the frames. When you -// choose Optimized interlacing and you set your output frame rate to a value -// that isn't suitable for optimized interlacing, MediaConvert automatically -// falls back to basic interlacing. Required settings: To use optimized interlacing, -// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't -// use optimized interlacing for hard telecine outputs. You must also set Interlace -// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). +// to create a better quality interlaced output. In this case, each progressive +// frame from the input corresponds to an interlaced field in the output. Keep +// the default value, Basic interlacing, for all other output frame rates. With +// basic interlacing, MediaConvert performs any frame rate conversion first +// and then interlaces the frames. When you choose Optimized interlacing and +// you set your output frame rate to a value that isn't suitable for optimized +// interlacing, MediaConvert automatically falls back to basic interlacing. +// Required settings: To use optimized interlacing, you must set Telecine to +// None or Soft. You can't use optimized interlacing for hard telecine outputs. +// You must also set Interlace mode to a value other than Progressive. const ( // Vc3ScanTypeConversionModeInterlaced is a Vc3ScanTypeConversionMode enum value Vc3ScanTypeConversionModeInterlaced = "INTERLACED" @@ -37445,9 +36909,7 @@ func Vc3ScanTypeConversionMode_Values() []string { // second (fps). Enable slow PAL to create a 25 fps output by relabeling the // video frames and resampling your audio. Note that enabling this setting will // slightly reduce the duration of your video. Related settings: You must also -// set Framerate to 25. In your JSON job specification, set (framerateControl) -// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to -// 1. +// set Framerate to 25. const ( // Vc3SlowPalDisabled is a Vc3SlowPal enum value Vc3SlowPalDisabled = "DISABLED" @@ -37466,10 +36928,9 @@ func Vc3SlowPal_Values() []string { // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable -// hard telecine (HARD) to create a smoother picture. When you keep the default -// value, None (NONE), MediaConvert does a standard frame rate conversion to -// 29.97 without doing anything with the field polarity to create a smoother -// picture. +// hard telecine to create a smoother picture. When you keep the default value, +// None, MediaConvert does a standard frame rate conversion to 29.97 without +// doing anything with the field polarity to create a smoother picture. const ( // Vc3TelecineNone is a Vc3Telecine enum value Vc3TelecineNone = "NONE" @@ -37564,16 +37025,15 @@ func VideoCodec_Values() []string { // Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode // insertion when the input frame rate is identical to the output frame rate. -// To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) -// to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. -// When the service inserts timecodes in an output, by default, it uses any -// embedded timecodes from the input. If none are present, the service will -// set the timecode for the first output frame to zero. To change this default -// behavior, adjust the settings under Timecode configuration (TimecodeConfig). -// In the console, these settings are located under Job > Job settings > Timecode -// configuration. Note - Timecode source under input settings (InputTimecodeSource) -// does not affect the timecodes that are inserted in the output. Source under -// Job settings > Timecode configuration (TimecodeSource) does. +// To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. +// To leave them out, set it to DISABLED. Default is DISABLED. When the service +// inserts timecodes in an output, by default, it uses any embedded timecodes +// from the input. If none are present, the service will set the timecode for +// the first output frame to zero. To change this default behavior, adjust the +// settings under Timecode configuration. In the console, these settings are +// located under Job > Job settings > Timecode configuration. Note - Timecode +// source under input settings does not affect the timecodes that are inserted +// in the output. Source under Job settings > Timecode configuration does. const ( // VideoTimecodeInsertionDisabled is a VideoTimecodeInsertion enum value VideoTimecodeInsertionDisabled = "DISABLED" @@ -37595,12 +37055,7 @@ func VideoTimecodeInsertion_Values() []string { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Custom, specify your frame rate as a fraction. const ( // Vp8FramerateControlInitializeFromSource is a Vp8FramerateControl enum value Vp8FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -37649,12 +37104,10 @@ func Vp8FramerateConversionAlgorithm_Values() []string { } // Optional. Specify how the service determines the pixel aspect ratio (PAR) -// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), -// uses the PAR from your input video for your output. To specify a different -// PAR in the console, choose any value other than Follow source. To specify -// a different PAR by editing the JSON job specification, choose SPECIFIED. -// When you choose SPECIFIED for this setting, you must also specify values -// for the parNumerator and parDenominator settings. +// for this output. The default behavior, Follow source, uses the PAR from your +// input video for your output. To specify a different PAR in the console, choose +// any value other than Follow source. When you choose SPECIFIED for this setting, +// you must also specify values for the parNumerator and parDenominator settings. const ( // Vp8ParControlInitializeFromSource is a Vp8ParControl enum value Vp8ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -37671,9 +37124,9 @@ func Vp8ParControl_Values() []string { } } -// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you -// want to trade off encoding speed for output video quality. The default behavior -// is faster, lower quality, multi-pass encoding. +// Optional. Use Quality tuning level to choose how you want to trade off encoding +// speed for output video quality. The default behavior is faster, lower quality, +// multi-pass encoding. const ( // Vp8QualityTuningLevelMultiPass is a Vp8QualityTuningLevel enum value Vp8QualityTuningLevelMultiPass = "MULTI_PASS" @@ -37709,12 +37162,7 @@ func Vp8RateControlMode_Values() []string { // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose -// Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate you specify in the settings FramerateNumerator and FramerateDenominator. +// Custom, specify your frame rate as a fraction. const ( // Vp9FramerateControlInitializeFromSource is a Vp9FramerateControl enum value Vp9FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -37763,12 +37211,10 @@ func Vp9FramerateConversionAlgorithm_Values() []string { } // Optional. Specify how the service determines the pixel aspect ratio (PAR) -// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), -// uses the PAR from your input video for your output. To specify a different -// PAR in the console, choose any value other than Follow source. To specify -// a different PAR by editing the JSON job specification, choose SPECIFIED. -// When you choose SPECIFIED for this setting, you must also specify values -// for the parNumerator and parDenominator settings. +// for this output. The default behavior, Follow source, uses the PAR from your +// input video for your output. To specify a different PAR in the console, choose +// any value other than Follow source. When you choose SPECIFIED for this setting, +// you must also specify values for the parNumerator and parDenominator settings. const ( // Vp9ParControlInitializeFromSource is a Vp9ParControl enum value Vp9ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -37785,9 +37231,9 @@ func Vp9ParControl_Values() []string { } } -// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you -// want to trade off encoding speed for output video quality. The default behavior -// is faster, lower quality, multi-pass encoding. +// Optional. Use Quality tuning level to choose how you want to trade off encoding +// speed for output video quality. The default behavior is faster, lower quality, +// multi-pass encoding. const ( // Vp9QualityTuningLevelMultiPass is a Vp9QualityTuningLevel enum value Vp9QualityTuningLevelMultiPass = "MULTI_PASS" @@ -37895,14 +37341,14 @@ func WebvttAccessibilitySubs_Values() []string { } // To use the available style, color, and position information from your input -// captions: Set Style passthrough (stylePassthrough) to Enabled (ENABLED). -// MediaConvert uses default settings when style and position information is -// missing from your input captions. To recreate the input captions exactly: -// Set Style passthrough to Strict (STRICT). MediaConvert automatically applies -// timing adjustments, including adjustments for frame rate conversion, ad avails, -// and input clipping. Your input captions format must be WebVTT. To ignore -// the style and position information from your input captions and use simplified -// output captions: Set Style passthrough to Disabled (DISABLED), or leave blank. +// captions: Set Style passthrough to Enabled. MediaConvert uses default settings +// when style and position information is missing from your input captions. +// To recreate the input captions exactly: Set Style passthrough to Strict. +// MediaConvert automatically applies timing adjustments, including adjustments +// for frame rate conversion, ad avails, and input clipping. Your input captions +// format must be WebVTT. To ignore the style and position information from +// your input captions and use simplified output captions: Set Style passthrough +// to Disabled, or leave blank. const ( // WebvttStylePassthroughEnabled is a WebvttStylePassthrough enum value WebvttStylePassthroughEnabled = "ENABLED" @@ -38011,9 +37457,9 @@ func Xavc4kProfileCodecProfile_Values() []string { } } -// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you -// want to trade off encoding speed for output video quality. The default behavior -// is faster, lower quality, single-pass encoding. +// Optional. Use Quality tuning level to choose how you want to trade off encoding +// speed for output video quality. The default behavior is faster, lower quality, +// single-pass encoding. const ( // Xavc4kProfileQualityTuningLevelSinglePass is a Xavc4kProfileQualityTuningLevel enum value Xavc4kProfileQualityTuningLevelSinglePass = "SINGLE_PASS" @@ -38034,16 +37480,15 @@ func Xavc4kProfileQualityTuningLevel_Values() []string { } } -// Keep the default value, Auto (AUTO), for this setting to have MediaConvert -// automatically apply the best types of quantization for your video content. -// When you want to apply your quantization settings manually, you must set -// Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO). -// Use this setting to specify the strength of any adaptive quantization filters -// that you enable. If you don't want MediaConvert to do any adaptive quantization -// in this transcode, set Adaptive quantization to Off (OFF). Related settings: -// The value that you choose here applies to the following settings: Flicker -// adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization -// (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). +// Keep the default value, Auto, for this setting to have MediaConvert automatically +// apply the best types of quantization for your video content. When you want +// to apply your quantization settings manually, you must set Adaptive quantization +// to a value other than Auto. Use this setting to specify the strength of any +// adaptive quantization filters that you enable. If you don't want MediaConvert +// to do any adaptive quantization in this transcode, set Adaptive quantization +// to Off. Related settings: The value that you choose here applies to the following +// settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial +// adaptive quantization, and Temporal adaptive quantization. const ( // XavcAdaptiveQuantizationOff is a XavcAdaptiveQuantization enum value XavcAdaptiveQuantizationOff = "OFF" @@ -38104,19 +37549,18 @@ func XavcEntropyEncoding_Values() []string { } // The best way to set up adaptive quantization is to keep the default value, -// Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). -// When you do so, MediaConvert automatically applies the best types of quantization -// for your video content. Include this setting in your JSON job specification -// only when you choose to change the default value for Adaptive quantization. -// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears -// as a visual flicker that can arise when the encoder saves bits by copying -// some macroblocks many times from frame to frame, and then refreshes them -// at the I-frame. When you enable this setting, the encoder updates these macroblocks -// slightly more often to smooth out the flicker. This setting is disabled by -// default. Related setting: In addition to enabling this setting, you must -// also set Adaptive quantization (adaptiveQuantization) to a value other than -// Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree -// of smoothing that Flicker adaptive quantization provides. +// Auto, for the setting Adaptive quantization. When you do so, MediaConvert +// automatically applies the best types of quantization for your video content. +// Include this setting in your JSON job specification only when you choose +// to change the default value for Adaptive quantization. Enable this setting +// to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker +// that can arise when the encoder saves bits by copying some macroblocks many +// times from frame to frame, and then refreshes them at the I-frame. When you +// enable this setting, the encoder updates these macroblocks slightly more +// often to smooth out the flicker. This setting is disabled by default. Related +// setting: In addition to enabling this setting, you must also set Adaptive +// quantization to a value other than Off or Auto. Use Adaptive quantization +// to adjust the degree of smoothing that Flicker adaptive quantization provides. const ( // XavcFlickerAdaptiveQuantizationDisabled is a XavcFlickerAdaptiveQuantization enum value XavcFlickerAdaptiveQuantizationDisabled = "DISABLED" @@ -38137,12 +37581,7 @@ func XavcFlickerAdaptiveQuantization_Values() []string { // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list. The framerates shown in the dropdown -// list are decimal approximations of fractions. If you are creating your transcoding -// job specification as a JSON file without the console, use FramerateControl -// to specify which value the service uses for the frame rate for this output. -// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate -// from the input. Choose SPECIFIED if you want the service to use the frame -// rate that you specify in the settings FramerateNumerator and FramerateDenominator. +// list are decimal approximations of fractions. const ( // XavcFramerateControlInitializeFromSource is a XavcFramerateControl enum value XavcFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -38191,9 +37630,9 @@ func XavcFramerateConversionAlgorithm_Values() []string { } // Specify whether the encoder uses B-frames as reference frames for other pictures -// in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames -// as reference frames. Choose Don't allow (DISABLED) to prevent the encoder -// from using B-frames as reference frames. +// in the same GOP. Choose Allow to allow the encoder to use B-frames as reference +// frames. Choose Don't allow to prevent the encoder from using B-frames as +// reference frames. const ( // XavcGopBReferenceDisabled is a XavcGopBReference enum value XavcGopBReferenceDisabled = "DISABLED" @@ -38256,9 +37695,9 @@ func XavcHdProfileBitrateClass_Values() []string { } } -// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you -// want to trade off encoding speed for output video quality. The default behavior -// is faster, lower quality, single-pass encoding. +// Optional. Use Quality tuning level to choose how you want to trade off encoding +// speed for output video quality. The default behavior is faster, lower quality, +// single-pass encoding. const ( // XavcHdProfileQualityTuningLevelSinglePass is a XavcHdProfileQualityTuningLevel enum value XavcHdProfileQualityTuningLevelSinglePass = "SINGLE_PASS" @@ -38281,8 +37720,7 @@ func XavcHdProfileQualityTuningLevel_Values() []string { // Ignore this setting unless you set Frame rate (framerateNumerator divided // by framerateDenominator) to 29.970. If your input framerate is 23.976, choose -// Hard (HARD). Otherwise, keep the default value None (NONE). For more information, -// see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html. +// Hard. Otherwise, keep the default value None. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html. const ( // XavcHdProfileTelecineNone is a XavcHdProfileTelecine enum value XavcHdProfileTelecineNone = "NONE" @@ -38300,17 +37738,16 @@ func XavcHdProfileTelecine_Values() []string { } // Choose the scan line type for the output. Keep the default value, Progressive -// (PROGRESSIVE) to create a progressive output, regardless of the scan type -// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) -// to create an output that's interlaced with the same field polarity throughout. -// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) -// to produce outputs with the same field polarity as the source. For jobs that -// have multiple inputs, the output field polarity might change over the course -// of the output. Follow behavior depends on the input scan type. If the source -// is interlaced, the output will be interlaced with the same polarity as the -// source. If the source is progressive, the output will be interlaced with -// top field bottom field first, depending on which of the Follow options you -// choose. +// to create a progressive output, regardless of the scan type of your input. +// Use Top field first or Bottom field first to create an output that's interlaced +// with the same field polarity throughout. Use Follow, default top or Follow, +// default bottom to produce outputs with the same field polarity as the source. +// For jobs that have multiple inputs, the output field polarity might change +// over the course of the output. Follow behavior depends on the input scan +// type. If the source is interlaced, the output will be interlaced with the +// same polarity as the source. If the source is progressive, the output will +// be interlaced with top field bottom field first, depending on which of the +// Follow options you choose. const ( // XavcInterlaceModeProgressive is a XavcInterlaceMode enum value XavcInterlaceModeProgressive = "PROGRESSIVE" @@ -38375,9 +37812,7 @@ func XavcProfile_Values() []string { // second (fps). Enable slow PAL to create a 25 fps output by relabeling the // video frames and resampling your audio. Note that enabling this setting will // slightly reduce the duration of your video. Related settings: You must also -// set Frame rate to 25. In your JSON job specification, set (framerateControl) -// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to -// 1. +// set Frame rate to 25. const ( // XavcSlowPalDisabled is a XavcSlowPal enum value XavcSlowPalDisabled = "DISABLED" @@ -38395,25 +37830,24 @@ func XavcSlowPal_Values() []string { } // The best way to set up adaptive quantization is to keep the default value, -// Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). -// When you do so, MediaConvert automatically applies the best types of quantization -// for your video content. Include this setting in your JSON job specification -// only when you choose to change the default value for Adaptive quantization. -// For this setting, keep the default value, Enabled (ENABLED), to adjust quantization -// within each frame based on spatial variation of content complexity. When -// you enable this feature, the encoder uses fewer bits on areas that can sustain -// more distortion with no noticeable visual degradation and uses more bits -// on areas where any small distortion will be noticeable. For example, complex -// textured blocks are encoded with fewer bits and smooth textured blocks are -// encoded with more bits. Enabling this feature will almost always improve -// your video quality. Note, though, that this feature doesn't take into account -// where the viewer's attention is likely to be. If viewers are likely to be -// focusing their attention on a part of the screen with a lot of complex texture, -// you might choose to disable this feature. Related setting: When you enable -// spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) -// depending on your content. For homogeneous content, such as cartoons and -// video games, set it to Low. For content with a wider variety of textures, -// set it to High or Higher. +// Auto, for the setting Adaptive quantization. When you do so, MediaConvert +// automatically applies the best types of quantization for your video content. +// Include this setting in your JSON job specification only when you choose +// to change the default value for Adaptive quantization. For this setting, +// keep the default value, Enabled, to adjust quantization within each frame +// based on spatial variation of content complexity. When you enable this feature, +// the encoder uses fewer bits on areas that can sustain more distortion with +// no noticeable visual degradation and uses more bits on areas where any small +// distortion will be noticeable. For example, complex textured blocks are encoded +// with fewer bits and smooth textured blocks are encoded with more bits. Enabling +// this feature will almost always improve your video quality. Note, though, +// that this feature doesn't take into account where the viewer's attention +// is likely to be. If viewers are likely to be focusing their attention on +// a part of the screen with a lot of complex texture, you might choose to disable +// this feature. Related setting: When you enable spatial adaptive quantization, +// set the value for Adaptive quantization depending on your content. For homogeneous +// content, such as cartoons and video games, set it to Low. For content with +// a wider variety of textures, set it to High or Higher. const ( // XavcSpatialAdaptiveQuantizationDisabled is a XavcSpatialAdaptiveQuantization enum value XavcSpatialAdaptiveQuantizationDisabled = "DISABLED" @@ -38431,23 +37865,23 @@ func XavcSpatialAdaptiveQuantization_Values() []string { } // The best way to set up adaptive quantization is to keep the default value, -// Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). -// When you do so, MediaConvert automatically applies the best types of quantization -// for your video content. Include this setting in your JSON job specification -// only when you choose to change the default value for Adaptive quantization. -// For this setting, keep the default value, Enabled (ENABLED), to adjust quantization -// within each frame based on temporal variation of content complexity. When -// you enable this feature, the encoder uses fewer bits on areas of the frame -// that aren't moving and uses more bits on complex objects with sharp edges -// that move a lot. For example, this feature improves the readability of text -// tickers on newscasts and scoreboards on sports matches. Enabling this feature -// will almost always improve your video quality. Note, though, that this feature -// doesn't take into account where the viewer's attention is likely to be. If -// viewers are likely to be focusing their attention on a part of the screen -// that doesn't have moving objects with sharp edges, such as sports athletes' -// faces, you might choose to disable this feature. Related setting: When you -// enable temporal adaptive quantization, adjust the strength of the filter -// with the setting Adaptive quantization (adaptiveQuantization). +// Auto, for the setting Adaptive quantization. When you do so, MediaConvert +// automatically applies the best types of quantization for your video content. +// Include this setting in your JSON job specification only when you choose +// to change the default value for Adaptive quantization. For this setting, +// keep the default value, Enabled, to adjust quantization within each frame +// based on temporal variation of content complexity. When you enable this feature, +// the encoder uses fewer bits on areas of the frame that aren't moving and +// uses more bits on complex objects with sharp edges that move a lot. For example, +// this feature improves the readability of text tickers on newscasts and scoreboards +// on sports matches. Enabling this feature will almost always improve your +// video quality. Note, though, that this feature doesn't take into account +// where the viewer's attention is likely to be. If viewers are likely to be +// focusing their attention on a part of the screen that doesn't have moving +// objects with sharp edges, such as sports athletes' faces, you might choose +// to disable this feature. Related setting: When you enable temporal adaptive +// quantization, adjust the strength of the filter with the setting Adaptive +// quantization. const ( // XavcTemporalAdaptiveQuantizationDisabled is a XavcTemporalAdaptiveQuantization enum value XavcTemporalAdaptiveQuantizationDisabled = "DISABLED" diff --git a/service/omics/doc.go b/service/omics/doc.go index 09eec2ac402..7a614208407 100644 --- a/service/omics/doc.go +++ b/service/omics/doc.go @@ -3,9 +3,9 @@ // Package omics provides the client and types for making API // requests to Amazon Omics. // -// This is the Amazon Omics API Reference. For an introduction to the service, -// see What is Amazon Omics? (https://docs.aws.amazon.com/omics/latest/dev/) -// in the Amazon Omics User Guide. +// This is the AWS HealthOmics API Reference. For an introduction to the service, +// see What is AWS HealthOmics? (https://docs.aws.amazon.com/omics/latest/dev/) +// in the AWS HealthOmics User Guide. // // See https://docs.aws.amazon.com/goto/WebAPI/omics-2022-11-28 for more information on this service. // diff --git a/service/opensearchserverless/api.go b/service/opensearchserverless/api.go index 46cf3f36197..d302e3b846d 100644 --- a/service/opensearchserverless/api.go +++ b/service/opensearchserverless/api.go @@ -334,7 +334,8 @@ func (c *OpenSearchServerless) CreateCollectionRequest(input *CreateCollectionIn // Returned Error Types: // // - OcuLimitExceededException -// OCU Limit Exceeded for service limits +// Thrown when the collection you're attempting to create results in a number +// of search or indexing OCUs that exceeds the account limit. // // - InternalServerException // Thrown when an error internal to the service occurs while processing a request. @@ -6427,7 +6428,8 @@ func (s *ListVpcEndpointsOutput) SetVpcEndpointSummaries(v []*VpcEndpointSummary return s } -// OCU Limit Exceeded for service limits +// Thrown when the collection you're attempting to create results in a number +// of search or indexing OCUs that exceeds the account limit. type OcuLimitExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -8614,6 +8616,9 @@ const ( // CollectionTypeTimeseries is a CollectionType enum value CollectionTypeTimeseries = "TIMESERIES" + + // CollectionTypeVectorsearch is a CollectionType enum value + CollectionTypeVectorsearch = "VECTORSEARCH" ) // CollectionType_Values returns all elements of the CollectionType enum @@ -8621,6 +8626,7 @@ func CollectionType_Values() []string { return []string{ CollectionTypeSearch, CollectionTypeTimeseries, + CollectionTypeVectorsearch, } } diff --git a/service/opensearchserverless/errors.go b/service/opensearchserverless/errors.go index 8c85ad9cc2d..bfdc7830f99 100644 --- a/service/opensearchserverless/errors.go +++ b/service/opensearchserverless/errors.go @@ -25,7 +25,8 @@ const ( // ErrCodeOcuLimitExceededException for service response error code // "OcuLimitExceededException". // - // OCU Limit Exceeded for service limits + // Thrown when the collection you're attempting to create results in a number + // of search or indexing OCUs that exceeds the account limit. ErrCodeOcuLimitExceededException = "OcuLimitExceededException" // ErrCodeResourceNotFoundException for service response error code diff --git a/service/polly/api.go b/service/polly/api.go index 8670fcf44a3..22780960e85 100644 --- a/service/polly/api.go +++ b/service/polly/api.go @@ -3995,6 +3995,9 @@ const ( // LanguageCodeEnIe is a LanguageCode enum value LanguageCodeEnIe = "en-IE" + + // LanguageCodeNlBe is a LanguageCode enum value + LanguageCodeNlBe = "nl-BE" ) // LanguageCode_Values returns all elements of the LanguageCode enum @@ -4037,6 +4040,7 @@ func LanguageCode_Values() []string { LanguageCodeArAe, LanguageCodeFiFi, LanguageCodeEnIe, + LanguageCodeNlBe, } } @@ -4398,6 +4402,9 @@ const ( // VoiceIdSofie is a VoiceId enum value VoiceIdSofie = "Sofie" + + // VoiceIdLisa is a VoiceId enum value + VoiceIdLisa = "Lisa" ) // VoiceId_Values returns all elements of the VoiceId enum @@ -4493,5 +4500,6 @@ func VoiceId_Values() []string { VoiceIdTomoko, VoiceIdNiamh, VoiceIdSofie, + VoiceIdLisa, } } diff --git a/service/route53/api.go b/service/route53/api.go index 046397c6116..ed55d8224d2 100644 --- a/service/route53/api.go +++ b/service/route53/api.go @@ -492,11 +492,11 @@ func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSet // # Change Propagation to Route 53 DNS Servers // // When you submit a ChangeResourceRecordSets request, Route 53 propagates your -// changes to all of the Route 53 authoritative DNS servers. While your changes -// are propagating, GetChange returns a status of PENDING. When propagation -// is complete, GetChange returns a status of INSYNC. Changes generally propagate -// to all Route 53 name servers within 60 seconds. For more information, see -// GetChange (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html). +// changes to all of the Route 53 authoritative DNS servers managing the hosted +// zone. While your changes are propagating, GetChange returns a status of PENDING. +// When propagation is complete, GetChange returns a status of INSYNC. Changes +// generally propagate to all Route 53 name servers managing the hosted zone +// within 60 seconds. For more information, see GetChange (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html). // // # Limits on ChangeResourceRecordSets Requests // @@ -1728,6 +1728,13 @@ func (c *Route53) CreateTrafficPolicyInstanceRequest(input *CreateTrafficPolicyI // responds to DNS queries for the domain or subdomain name by using the resource // record sets that CreateTrafficPolicyInstance created. // +// After you submit an CreateTrafficPolicyInstance request, there's a brief +// delay while Amazon Route 53 creates the resource record sets that are specified +// in the traffic policy definition. Use GetTrafficPolicyInstance with the id +// of new traffic policy instance to confirm that the CreateTrafficPolicyInstance +// request completed successfully. For more information, see the State response +// element. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3500,11 +3507,11 @@ func (c *Route53) GetChangeRequest(input *GetChangeInput) (req *request.Request, // the following values: // // - PENDING indicates that the changes in this request have not propagated -// to all Amazon Route 53 DNS servers. This is the initial status of all -// change batch requests. +// to all Amazon Route 53 DNS servers managing the hosted zone. This is the +// initial status of all change batch requests. // // - INSYNC indicates that the changes have propagated to all Route 53 DNS -// servers. +// servers managing the hosted zone. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4780,10 +4787,10 @@ func (c *Route53) GetTrafficPolicyInstanceRequest(input *GetTrafficPolicyInstanc // // Gets information about a specified traffic policy instance. // -// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance -// request, there's a brief delay while Amazon Route 53 creates the resource -// record sets that are specified in the traffic policy definition. For more -// information, see the State response element. +// Use GetTrafficPolicyInstance with the id of new traffic policy instance to +// confirm that the CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request completed successfully. For more information, see the State response +// element. // // In the Route 53 console, traffic policy instances are known as policy records. // @@ -7155,6 +7162,11 @@ func (c *Route53) TestDNSAnswerRequest(input *TestDNSAnswerInput) (req *request. // // This call only supports querying public hosted zones. // +// The TestDnsAnswer returns information similar to what you would expect from +// the answer section of the dig command. Therefore, if you query for the name +// servers of a subdomain that point to the parent name servers, those will +// not be returned. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7500,6 +7512,12 @@ func (c *Route53) UpdateTrafficPolicyInstanceRequest(input *UpdateTrafficPolicyI // UpdateTrafficPolicyInstance API operation for Amazon Route 53. // +// After you submit a UpdateTrafficPolicyInstance request, there's a brief delay +// while Route 53 creates the resource record sets that are specified in the +// traffic policy definition. Use GetTrafficPolicyInstance with the id of updated +// traffic policy instance confirm that the UpdateTrafficPolicyInstance request +// completed successfully. For more information, see the State response element. +// // Updates the resource record sets in a specified hosted zone that were created // based on the settings in a specified traffic policy version. // @@ -9574,6 +9592,11 @@ type CreateHostedZoneInput struct { // the ID that Amazon Route 53 assigned to the reusable delegation set when // you created it. For more information about reusable delegation sets, see // CreateReusableDelegationSet (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html). + // + // If you are using a reusable delegation set to create a public hosted zone + // for a subdomain, make sure that the parent hosted zone doesn't use one or + // more of the same name servers. If you have overlapping nameservers, the operation + // will cause a ConflictingDomainsExist error. DelegationSetId *string `type:"string"` // (Optional) A complex type that contains the following optional values: