From f3f7488c2060cb64f88b2c1ce49d683b609b06c9 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Mon, 22 Feb 2021 11:19:44 -0800 Subject: [PATCH] Release v1.37.16 (2021-02-22) (#3795) Release v1.37.16 (2021-02-22) === ### Service Client Updates * `service/runtime.sagemaker`: Updates service API and documentation * `service/sagemaker`: Updates service API and documentation * Amazon SageMaker now supports core dump for SageMaker Endpoints and direct invocation of a single container in a SageMaker Endpoint that hosts multiple containers. --- CHANGELOG.md | 8 + aws/version.go | 2 +- .../runtime.sagemaker/2017-05-13/api-2.json | 10 + .../runtime.sagemaker/2017-05-13/docs-2.json | 6 + models/apis/sagemaker/2017-07-24/api-2.json | 27 ++- models/apis/sagemaker/2017-07-24/docs-2.json | 29 ++- service/sagemaker/api.go | 181 +++++++++++++++++- service/sagemakerruntime/api.go | 10 + 8 files changed, 263 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4891592a184..158296130fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +Release v1.37.16 (2021-02-22) +=== + +### Service Client Updates +* `service/runtime.sagemaker`: Updates service API and documentation +* `service/sagemaker`: Updates service API and documentation + * Amazon SageMaker now supports core dump for SageMaker Endpoints and direct invocation of a single container in a SageMaker Endpoint that hosts multiple containers. + Release v1.37.15 (2021-02-19) === diff --git a/aws/version.go b/aws/version.go index fe73428eb24..d00ac003418 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.15" +const SDKVersion = "1.37.16" diff --git a/models/apis/runtime.sagemaker/2017-05-13/api-2.json b/models/apis/runtime.sagemaker/2017-05-13/api-2.json index 82c0208afa2..a15bb58e01b 100644 --- a/models/apis/runtime.sagemaker/2017-05-13/api-2.json +++ b/models/apis/runtime.sagemaker/2017-05-13/api-2.json @@ -104,6 +104,11 @@ "location":"header", "locationName":"X-Amzn-SageMaker-Target-Variant" }, + "TargetContainerHostname":{ + "shape":"TargetContainerHostnameHeader", + "location":"header", + "locationName":"X-Amzn-SageMaker-Target-Container-Hostname" + }, "InferenceId":{ "shape":"InferenceId", "location":"header", @@ -162,6 +167,11 @@ "synthetic":true }, "StatusCode":{"type":"integer"}, + "TargetContainerHostnameHeader":{ + "type":"string", + "max":63, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, "TargetModelHeader":{ "type":"string", "max":1024, diff --git a/models/apis/runtime.sagemaker/2017-05-13/docs-2.json b/models/apis/runtime.sagemaker/2017-05-13/docs-2.json index 9b482015823..8683ba57fd6 100644 --- a/models/apis/runtime.sagemaker/2017-05-13/docs-2.json +++ b/models/apis/runtime.sagemaker/2017-05-13/docs-2.json @@ -87,6 +87,12 @@ "ModelError$OriginalStatusCode": "

Original status code.

" } }, + "TargetContainerHostnameHeader": { + "base": null, + "refs": { + "InvokeEndpointInput$TargetContainerHostname": "

If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.

" + } + }, "TargetModelHeader": { "base": null, "refs": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 99edd4969e1..a204038be21 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -4624,6 +4624,7 @@ "ModelName":{"shape":"ModelName"}, "PrimaryContainer":{"shape":"ContainerDefinition"}, "Containers":{"shape":"ContainerDefinitionList"}, + "InferenceExecutionConfig":{"shape":"InferenceExecutionConfig"}, "ExecutionRoleArn":{"shape":"RoleArn"}, "Tags":{"shape":"TagList"}, "VpcConfig":{"shape":"VpcConfig"}, @@ -6460,6 +6461,7 @@ "ModelName":{"shape":"ModelName"}, "PrimaryContainer":{"shape":"ContainerDefinition"}, "Containers":{"shape":"ContainerDefinitionList"}, + "InferenceExecutionConfig":{"shape":"InferenceExecutionConfig"}, "ExecutionRoleArn":{"shape":"RoleArn"}, "VpcConfig":{"shape":"VpcConfig"}, "CreationTime":{"shape":"Timestamp"}, @@ -8569,6 +8571,20 @@ "type":"list", "member":{"shape":"Image"} }, + "InferenceExecutionConfig":{ + "type":"structure", + "required":["Mode"], + "members":{ + "Mode":{"shape":"InferenceExecutionMode"} + } + }, + "InferenceExecutionMode":{ + "type":"string", + "enum":[ + "Serial", + "Direct" + ] + }, "InferenceSpecification":{ "type":"structure", "required":[ @@ -11999,7 +12015,8 @@ "InitialInstanceCount":{"shape":"TaskCount"}, "InstanceType":{"shape":"ProductionVariantInstanceType"}, "InitialVariantWeight":{"shape":"VariantWeight"}, - "AcceleratorType":{"shape":"ProductionVariantAcceleratorType"} + "AcceleratorType":{"shape":"ProductionVariantAcceleratorType"}, + "CoreDumpConfig":{"shape":"ProductionVariantCoreDumpConfig"} } }, "ProductionVariantAcceleratorType":{ @@ -12013,6 +12030,14 @@ "ml.eia2.xlarge" ] }, + "ProductionVariantCoreDumpConfig":{ + "type":"structure", + "required":["DestinationS3Uri"], + "members":{ + "DestinationS3Uri":{"shape":"DestinationS3Uri"}, + "KmsKeyId":{"shape":"KmsKeyId"} + } + }, "ProductionVariantInstanceType":{ "type":"string", "enum":[ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 2a559b8c995..a9bacd0ab5c 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -2995,7 +2995,8 @@ "base": null, "refs": { "DataCaptureConfig$DestinationS3Uri": "

", - "DataCaptureConfigSummary$DestinationS3Uri": "

" + "DataCaptureConfigSummary$DestinationS3Uri": "

", + "ProductionVariantCoreDumpConfig$DestinationS3Uri": "

The Amazon S3 bucket to send the core dump to.

" } }, "DetailedAlgorithmStatus": { @@ -4618,6 +4619,19 @@ "ListImagesResponse$Images": "

A list of images and their properties.

" } }, + "InferenceExecutionConfig": { + "base": "

Specifies details about how containers in a multi-container are run.

", + "refs": { + "CreateModelInput$InferenceExecutionConfig": "

Specifies details of how containers in a multi-container endpoint are called.

", + "DescribeModelOutput$InferenceExecutionConfig": "

Specifies details of how containers in a multi-container endpoint are called.

" + } + }, + "InferenceExecutionMode": { + "base": null, + "refs": { + "InferenceExecutionConfig$Mode": "

How containers in a multi-container are run. The following values are valid.

" + } + }, "InferenceSpecification": { "base": "

Defines how to perform inference generation after a training job is run.

", "refs": { @@ -4817,9 +4831,10 @@ "MonitoringOutputConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

", "OnlineStoreSecurityConfig$KmsKeyId": "

The ID of the AWS Key Management Service (AWS KMS) key that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.

The caller (either IAM user or IAM role) of CreateFeatureGroup must have below permissions to the OnlineStore KmsKeyId:

The caller (either IAM user or IAM role) to all DataPlane operations (PutRecord, GetRecord, DeleteRecord) must have the following permissions to the KmsKeyId:

", "OutputConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account

The KmsKeyId can be any of the following formats:

", - "OutputDataConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

", + "OutputDataConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

", "ProcessingClusterConfig$VolumeKmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the processing job.

", "ProcessingOutputConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the processing job output. KmsKeyId can be an ID of a KMS key, ARN of a KMS key, alias of a KMS key, or alias of a KMS key. The KmsKeyId is applied to all outputs.

", + "ProductionVariantCoreDumpConfig$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint and UpdateEndpoint requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

", "RedshiftDatasetDefinition$KmsKeyId": "

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data from a Redshift execution.

", "ResourceConfig$VolumeKmsKeyId": "

The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

The VolumeKmsKeyId can be in any of the following formats:

", "S3StorageConfig$KmsKeyId": "

The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location.

The IAM roleARN that is passed as a parameter to CreateFeatureGroup must have below permissions to the KmsKeyId:

", @@ -7611,6 +7626,12 @@ "ProductionVariant$AcceleratorType": "

The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker.

" } }, + "ProductionVariantCoreDumpConfig": { + "base": "

Specifies configuration for a core dump from the model container when the process crashes.

", + "refs": { + "ProductionVariant$CoreDumpConfig": "

Specifies configuration for a core dump from the model container when the process crashes.

" + } + }, "ProductionVariantInstanceType": { "base": null, "refs": { @@ -8917,7 +8938,7 @@ "TaskAvailabilityLifetimeInSeconds": { "base": null, "refs": { - "HumanTaskConfig$TaskAvailabilityLifetimeInSeconds": "

The length of time that a task remains available for labeling by human workers. The default and maximum values for this parameter depend on the type of workforce you use.

" + "HumanTaskConfig$TaskAvailabilityLifetimeInSeconds": "

The length of time that a task remains available for labeling by human workers. The default and maximum values for this parameter depend on the type of workforce you use.

" } }, "TaskCount": { @@ -8956,7 +8977,7 @@ "TaskTimeLimitInSeconds": { "base": null, "refs": { - "HumanTaskConfig$TaskTimeLimitInSeconds": "

The amount of time that a worker has to complete a task.

If you create a custom labeling job, the maximum value for this parameter is 8 hours (28,800 seconds).

If you create a labeling job using a built-in task type the maximum for this parameter depends on the task type you use:

" + "HumanTaskConfig$TaskTimeLimitInSeconds": "

The amount of time that a worker has to complete a task.

If you create a custom labeling job, the maximum value for this parameter is 8 hours (28,800 seconds).

If you create a labeling job using a built-in task type the maximum for this parameter depends on the task type you use:

" } }, "TaskTitle": { diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index 5f20f39c17e..85d03ed86a1 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -30941,6 +30941,9 @@ type CreateModelInput struct { // ExecutionRoleArn is a required field ExecutionRoleArn *string `min:"20" type:"string" required:"true"` + // Specifies details of how containers in a multi-container endpoint are called. + InferenceExecutionConfig *InferenceExecutionConfig `type:"structure"` + // The name of the new model. // // ModelName is a required field @@ -30997,6 +31000,11 @@ func (s *CreateModelInput) Validate() error { } } } + if s.InferenceExecutionConfig != nil { + if err := s.InferenceExecutionConfig.Validate(); err != nil { + invalidParams.AddNested("InferenceExecutionConfig", err.(request.ErrInvalidParams)) + } + } if s.PrimaryContainer != nil { if err := s.PrimaryContainer.Validate(); err != nil { invalidParams.AddNested("PrimaryContainer", err.(request.ErrInvalidParams)) @@ -31042,6 +31050,12 @@ func (s *CreateModelInput) SetExecutionRoleArn(v string) *CreateModelInput { return s } +// SetInferenceExecutionConfig sets the InferenceExecutionConfig field's value. +func (s *CreateModelInput) SetInferenceExecutionConfig(v *InferenceExecutionConfig) *CreateModelInput { + s.InferenceExecutionConfig = v + return s +} + // SetModelName sets the ModelName field's value. func (s *CreateModelInput) SetModelName(v string) *CreateModelInput { s.ModelName = &v @@ -42138,6 +42152,9 @@ type DescribeModelOutput struct { // ExecutionRoleArn is a required field ExecutionRoleArn *string `min:"20" type:"string" required:"true"` + // Specifies details of how containers in a multi-container endpoint are called. + InferenceExecutionConfig *InferenceExecutionConfig `type:"structure"` + // The Amazon Resource Name (ARN) of the model. // // ModelArn is a required field @@ -42192,6 +42209,12 @@ func (s *DescribeModelOutput) SetExecutionRoleArn(v string) *DescribeModelOutput return s } +// SetInferenceExecutionConfig sets the InferenceExecutionConfig field's value. +func (s *DescribeModelOutput) SetInferenceExecutionConfig(v *InferenceExecutionConfig) *DescribeModelOutput { + s.InferenceExecutionConfig = v + return s +} + // SetModelArn sets the ModelArn field's value. func (s *DescribeModelOutput) SetModelArn(v string) *DescribeModelOutput { s.ModelArn = &v @@ -49609,7 +49632,8 @@ type HumanTaskConfig struct { // hours (43,200 seconds). The default is 6 hours (21,600 seconds). // // * If you choose a private or vendor workforce, the default value is 10 - // days (864,000 seconds). For most users, the maximum is also 10 days. + // days (864,000 seconds). For most users, the maximum is also 10 days. If + // you want to change this limit, contact AWS Support. TaskAvailabilityLifetimeInSeconds *int64 `min:"60" type:"integer"` // A description of the task for your human workers. @@ -49635,7 +49659,8 @@ type HumanTaskConfig struct { // // * For 3D point cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-point-cloud.html) // and video frame (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-video.html) - // labeling jobs, the maximum is 7 days (604,800 seconds). + // labeling jobs, the maximum is 7 days (604,800 seconds). If you want to + // change these limits, contact AWS Support. // // TaskTimeLimitInSeconds is a required field TaskTimeLimitInSeconds *int64 `min:"30" type:"integer" required:"true"` @@ -51144,6 +51169,49 @@ func (s *ImageVersion) SetVersion(v int64) *ImageVersion { return s } +// Specifies details about how containers in a multi-container are run. +type InferenceExecutionConfig struct { + _ struct{} `type:"structure"` + + // How containers in a multi-container are run. The following values are valid. + // + // * SERIAL - Containers run as a serial pipeline. + // + // * DIRECT - Only the individual container that you specify is run. + // + // Mode is a required field + Mode *string `type:"string" required:"true" enum:"InferenceExecutionMode"` +} + +// String returns the string representation +func (s InferenceExecutionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceExecutionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InferenceExecutionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InferenceExecutionConfig"} + if s.Mode == nil { + invalidParams.Add(request.NewErrParamRequired("Mode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMode sets the Mode field's value. +func (s *InferenceExecutionConfig) SetMode(v string) *InferenceExecutionConfig { + s.Mode = &v + return s +} + // Defines how to perform inference generation after a training job is run. type InferenceSpecification struct { _ struct{} `type:"structure"` @@ -64357,12 +64425,12 @@ type OutputDataConfig struct { // with KMS-managed keys for OutputDataConfig. If you use a bucket policy with // an s3:PutObject permission that only allows objects with server-side encryption, // set the condition key of s3:x-amz-server-side-encryption to "aws:kms". For - // more information, see KMS-Managed Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) + // more information, see KMS-Managed Encryption Keys (https://docs.aws.amazon.com/mazonS3/latest/dev/UsingKMSEncryption.html) // in the Amazon Simple Storage Service Developer Guide. // // The KMS key policy must grant permission to the IAM role that you specify // in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob - // requests. For more information, see Using Key Policies in AWS KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // requests. For more information, see Using Key Policies in AWS KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) // in the AWS Key Management Service Developer Guide. KmsKeyId *string `type:"string"` @@ -66223,6 +66291,10 @@ type ProductionVariant struct { // more information, see Using Elastic Inference in Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). AcceleratorType *string `type:"string" enum:"ProductionVariantAcceleratorType"` + // Specifies configuration for a core dump from the model container when the + // process crashes. + CoreDumpConfig *ProductionVariantCoreDumpConfig `type:"structure"` + // Number of instances to launch initially. // // InitialInstanceCount is a required field @@ -66279,6 +66351,11 @@ func (s *ProductionVariant) Validate() error { if s.VariantName == nil { invalidParams.Add(request.NewErrParamRequired("VariantName")) } + if s.CoreDumpConfig != nil { + if err := s.CoreDumpConfig.Validate(); err != nil { + invalidParams.AddNested("CoreDumpConfig", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -66292,6 +66369,12 @@ func (s *ProductionVariant) SetAcceleratorType(v string) *ProductionVariant { return s } +// SetCoreDumpConfig sets the CoreDumpConfig field's value. +func (s *ProductionVariant) SetCoreDumpConfig(v *ProductionVariantCoreDumpConfig) *ProductionVariant { + s.CoreDumpConfig = v + return s +} + // SetInitialInstanceCount sets the InitialInstanceCount field's value. func (s *ProductionVariant) SetInitialInstanceCount(v int64) *ProductionVariant { s.InitialInstanceCount = &v @@ -66322,6 +66405,80 @@ func (s *ProductionVariant) SetVariantName(v string) *ProductionVariant { return s } +// Specifies configuration for a core dump from the model container when the +// process crashes. +type ProductionVariantCoreDumpConfig struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket to send the core dump to. + // + // DestinationS3Uri is a required field + DestinationS3Uri *string `type:"string" required:"true"` + + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to + // encrypt the core dump data at rest using Amazon S3 server-side encryption. + // The KmsKeyId can be any of the following formats: + // + // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" + // + // * // Amazon Resource Name (ARN) of a KMS Key "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + // + // * // KMS Key Alias "alias/ExampleAlias" + // + // * // Amazon Resource Name (ARN) of a KMS Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" + // + // If you use a KMS key ID or an alias of your master key, the Amazon SageMaker + // execution role must include permissions to call kms:Encrypt. If you don't + // provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon + // S3 for your role's account. Amazon SageMaker uses server-side encryption + // with KMS-managed keys for OutputDataConfig. If you use a bucket policy with + // an s3:PutObject permission that only allows objects with server-side encryption, + // set the condition key of s3:x-amz-server-side-encryption to "aws:kms". For + // more information, see KMS-Managed Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // The KMS key policy must grant permission to the IAM role that you specify + // in your CreateEndpoint and UpdateEndpoint requests. For more information, + // see Using Key Policies in AWS KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in the AWS Key Management Service Developer Guide. + KmsKeyId *string `type:"string"` +} + +// String returns the string representation +func (s ProductionVariantCoreDumpConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductionVariantCoreDumpConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProductionVariantCoreDumpConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProductionVariantCoreDumpConfig"} + if s.DestinationS3Uri == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationS3Uri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationS3Uri sets the DestinationS3Uri field's value. +func (s *ProductionVariantCoreDumpConfig) SetDestinationS3Uri(v string) *ProductionVariantCoreDumpConfig { + s.DestinationS3Uri = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ProductionVariantCoreDumpConfig) SetKmsKeyId(v string) *ProductionVariantCoreDumpConfig { + s.KmsKeyId = &v + return s +} + // Describes weight and capacities for a production variant associated with // an endpoint. If you sent a request to the UpdateEndpointWeightsAndCapacities // API and the endpoint status is Updating, you get different desired and current @@ -78198,6 +78355,22 @@ func ImageVersionStatus_Values() []string { } } +const ( + // InferenceExecutionModeSerial is a InferenceExecutionMode enum value + InferenceExecutionModeSerial = "Serial" + + // InferenceExecutionModeDirect is a InferenceExecutionMode enum value + InferenceExecutionModeDirect = "Direct" +) + +// InferenceExecutionMode_Values returns all elements of the InferenceExecutionMode enum +func InferenceExecutionMode_Values() []string { + return []string{ + InferenceExecutionModeSerial, + InferenceExecutionModeDirect, + } +} + const ( // InputModePipe is a InputMode enum value InputModePipe = "Pipe" diff --git a/service/sagemakerruntime/api.go b/service/sagemakerruntime/api.go index c2124f7a954..a7b5fa7430f 100644 --- a/service/sagemakerruntime/api.go +++ b/service/sagemakerruntime/api.go @@ -227,6 +227,10 @@ type InvokeEndpointInput struct { // Data (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-data-capture.html). InferenceId *string `location:"header" locationName:"X-Amzn-SageMaker-Inference-Id" min:"1" type:"string"` + // If the endpoint hosts multiple containers and is configured to use direct + // invocation, this parameter specifies the host name of the container to invoke. + TargetContainerHostname *string `location:"header" locationName:"X-Amzn-SageMaker-Target-Container-Hostname" type:"string"` + // The model to request for inference when invoking a multi-model endpoint. TargetModel *string `location:"header" locationName:"X-Amzn-SageMaker-Target-Model" min:"1" type:"string"` @@ -311,6 +315,12 @@ func (s *InvokeEndpointInput) SetInferenceId(v string) *InvokeEndpointInput { return s } +// SetTargetContainerHostname sets the TargetContainerHostname field's value. +func (s *InvokeEndpointInput) SetTargetContainerHostname(v string) *InvokeEndpointInput { + s.TargetContainerHostname = &v + return s +} + // SetTargetModel sets the TargetModel field's value. func (s *InvokeEndpointInput) SetTargetModel(v string) *InvokeEndpointInput { s.TargetModel = &v