-
Notifications
You must be signed in to change notification settings - Fork 81
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Managed Node Group Launch Template Improvements #1225
Changes from all commits
b749708
94fd33c
9c05537
1723d4a
e5561eb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
name: managed-ng-ami-options | ||
description: Tests that various AMI related options can be set on managed nodegroup. | ||
runtime: nodejs |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
# tests/managed-ng-ami-options | ||
|
||
Tests that various AMI related options can be set on managed nodegroup | ||
Includes: | ||
- amiId | ||
- gpu |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
import * as awsx from '@pulumi/awsx'; | ||
import * as eks from "@pulumi/eks"; | ||
import * as pulumi from "@pulumi/pulumi"; | ||
import * as iam from "../iam"; | ||
import {GetParameterCommand, SSMClient} from "@aws-sdk/client-ssm"; | ||
|
||
const eksVpc = new awsx.ec2.Vpc("eks-vpc", { | ||
enableDnsHostnames: true, | ||
cidrBlock: "10.0.0.0/16", | ||
}); | ||
|
||
// IAM roles for the node groups. | ||
const role = iam.createRole("example-role"); | ||
|
||
const projectName = pulumi.getProject(); | ||
|
||
const cluster = new eks.Cluster(`${projectName}`, { | ||
skipDefaultNodeGroup: true, | ||
deployDashboard: false, | ||
vpcId: eksVpc.vpcId, | ||
// Public subnets will be used for load balancers | ||
publicSubnetIds: eksVpc.publicSubnetIds, | ||
// Private subnets will be used for cluster nodes | ||
privateSubnetIds: eksVpc.privateSubnetIds, | ||
instanceRoles: [role], | ||
}); | ||
|
||
// Export the cluster's kubeconfig. | ||
export const kubeconfig = cluster.kubeconfig; | ||
|
||
// Find the recommended AMI for the EKS node group. | ||
// See https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html | ||
async function getEksAmiId(k8sVersion: string): Promise<string> { | ||
const client = new SSMClient(); | ||
const parameterName = `/aws/service/eks/optimized-ami/${k8sVersion}/amazon-linux/recommended/image_id`; | ||
const command = new GetParameterCommand({ Name: parameterName }); | ||
const response = await client.send(command); | ||
|
||
if (!response.Parameter || !response.Parameter.Value) { | ||
throw new Error(`Could not find EKS optimized AMI for Kubernetes version ${k8sVersion}`); | ||
} | ||
|
||
return response.Parameter.Value; | ||
} | ||
|
||
const amiId = cluster.eksCluster.version.apply(version => pulumi.output(getEksAmiId(version))); | ||
|
||
// Create a managed node group using a cluster as input. | ||
eks.createManagedNodeGroup(`${projectName}-managed-ng`, { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What do you think about combining the ami and gpu tests into one? We need to balance the number of eks clusters we're creating and here we're interested in the node group. |
||
cluster: cluster, | ||
nodeRole: role, | ||
amiId: amiId, | ||
}); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
import * as awsx from '@pulumi/awsx'; | ||
import * as eks from "@pulumi/eks"; | ||
import * as pulumi from "@pulumi/pulumi"; | ||
import * as iam from "../iam"; | ||
|
||
const eksVpc = new awsx.ec2.Vpc("eks-vpc", { | ||
enableDnsHostnames: true, | ||
cidrBlock: "10.0.0.0/16", | ||
}); | ||
|
||
// IAM roles for the node groups. | ||
const role = iam.createRole("example-role"); | ||
|
||
const projectName = pulumi.getProject(); | ||
|
||
const cluster = new eks.Cluster(`${projectName}`, { | ||
skipDefaultNodeGroup: true, | ||
deployDashboard: false, | ||
vpcId: eksVpc.vpcId, | ||
// Public subnets will be used for load balancers | ||
publicSubnetIds: eksVpc.publicSubnetIds, | ||
// Private subnets will be used for cluster nodes | ||
privateSubnetIds: eksVpc.privateSubnetIds, | ||
instanceRoles: [role], | ||
}); | ||
|
||
// Export the cluster's kubeconfig. | ||
export const kubeconfig = cluster.kubeconfig; | ||
|
||
// Create a managed node group using a cluster as input. | ||
eks.createManagedNodeGroup(`${projectName}-managed-ng`, { | ||
cluster: cluster, | ||
nodeRole: role, | ||
gpu: true | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does this have any special instance type requirements or can we launch a GPU optimized AMI on a basic t3 instance for example? |
||
}); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
import * as aws from "@pulumi/aws"; | ||
import * as pulumi from "@pulumi/pulumi"; | ||
import * as iam from "./iam"; | ||
|
||
const managedPolicyArns: string[] = [ | ||
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", | ||
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", | ||
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", | ||
]; | ||
|
||
// Creates a role and attaches the EKS worker node IAM managed policies | ||
export function createRole(name: string): aws.iam.Role { | ||
const role = new aws.iam.Role(name, { | ||
assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ | ||
Service: "ec2.amazonaws.com", | ||
}), | ||
}); | ||
|
||
let counter = 0; | ||
for (const policy of managedPolicyArns) { | ||
// Create RolePolicyAttachment without returning it. | ||
const rpa = new aws.iam.RolePolicyAttachment(`${name}-policy-${counter++}`, | ||
{ policyArn: policy, role: role }, | ||
); | ||
} | ||
|
||
return role; | ||
} | ||
|
||
// Creates a collection of IAM roles. | ||
export function createRoles(name: string, quantity: number): aws.iam.Role[] { | ||
const roles: aws.iam.Role[] = []; | ||
|
||
for (let i = 0; i < quantity; i++) { | ||
roles.push(iam.createRole(`${name}-role-${i}`)); | ||
} | ||
|
||
return roles; | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
{ | ||
"name": "managed-ng-ami-options", | ||
"devDependencies": { | ||
"typescript": "^4.0.0", | ||
"@types/node": "latest" | ||
}, | ||
"dependencies": { | ||
"@pulumi/pulumi": "^3.0.0", | ||
"@pulumi/aws": "^6.0.0", | ||
"@pulumi/eks": "latest", | ||
"@pulumi/awsx": "^2.0.0", | ||
"@aws-sdk/client-ssm": "^3.637.0" | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
{ | ||
"compilerOptions": { | ||
"outDir": "bin", | ||
"target": "es6", | ||
"lib": [ | ||
"es6" | ||
], | ||
"module": "commonjs", | ||
"moduleResolution": "node", | ||
"declaration": true, | ||
"sourceMap": true, | ||
"stripInternal": true, | ||
"experimentalDecorators": true, | ||
"pretty": true, | ||
"noFallthroughCasesInSwitch": true, | ||
"noImplicitAny": true, | ||
"noImplicitReturns": true, | ||
"forceConsistentCasingInFileNames": true, | ||
"strictNullChecks": true | ||
}, | ||
"files": [ | ||
"ami-id/index.ts", | ||
"gpu/index.ts", | ||
] | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1544,6 +1544,33 @@ export type ManagedNodeGroupOptions = Omit< | |
* - maxSize: 2 | ||
*/ | ||
scalingConfig?: pulumi.Input<awsInputs.eks.NodeGroupScalingConfig>; | ||
|
||
/** | ||
* The AMI ID to use for the worker nodes. | ||
* | ||
* Defaults to the latest recommended EKS Optimized Linux AMI from the | ||
* AWS Systems Manager Parameter Store. | ||
* | ||
* Note: `amiId` and `gpu` are mutually exclusive. | ||
* | ||
* See for more details: | ||
* - https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html. | ||
*/ | ||
amiId?: pulumi.Input<string>; | ||
JustASquid marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
/** | ||
* Use the latest recommended EKS Optimized Linux AMI with GPU support for | ||
* the worker nodes from the AWS Systems Manager Parameter Store. | ||
* | ||
* Defaults to false. | ||
* | ||
* Note: `gpu` and `amiId` are mutually exclusive. | ||
* | ||
* See for more details: | ||
* - https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html. | ||
* - https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html | ||
*/ | ||
gpu?: pulumi.Input<boolean>; | ||
}; | ||
|
||
/** | ||
|
@@ -1668,6 +1695,14 @@ function createManagedNodeGroupInternal( | |
); | ||
} | ||
|
||
if (args.amiId && args.gpu) { | ||
throw new pulumi.ResourceError("amiId and gpu are mutually exclusive.", parent); | ||
} | ||
|
||
if (args.amiType && args.gpu) { | ||
throw new pulumi.ResourceError("amiType and gpu are mutually exclusive.", parent); | ||
} | ||
|
||
let roleArn: pulumi.Input<string>; | ||
if (args.nodeRoleArn) { | ||
roleArn = args.nodeRoleArn; | ||
|
@@ -1740,7 +1775,7 @@ function createManagedNodeGroupInternal( | |
} | ||
|
||
let launchTemplate: aws.ec2.LaunchTemplate | undefined; | ||
if (args.kubeletExtraArgs || args.bootstrapExtraArgs || args.enableIMDSv2) { | ||
if (args.kubeletExtraArgs || args.bootstrapExtraArgs || args.enableIMDSv2 || args.gpu || args.amiId || args.amiType) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do we need to create a custom launch template if Aside from that, the list here gets a bit long now. What do you think about creating a method for deciding whether to create a custom launch template? |
||
launchTemplate = createMNGCustomLaunchTemplate(name, args, core, parent, provider); | ||
|
||
// Disk size is specified in the launch template. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -735,6 +735,21 @@ func generateSchema() schema.PackageSpec { | |
"(https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) " + | ||
"for valid AMI Types. This provider will only perform drift detection if a configuration value is provided.", | ||
}, | ||
"amiId": { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You'll need to run Because of the disparity between the nodejs SDK and the rest of the SDKs it would be good to add a test in another language as well (e.g. Golang). Let me know if you need help with converting the test you've created into another language |
||
TypeSpec: schema.TypeSpec{Type: "string"}, | ||
Description: "The AMI ID to use for the worker nodes.\n\nDefaults to the latest recommended " + | ||
"EKS Optimized Linux AMI from the AWS Systems Manager Parameter Store.\n\nNote: " + | ||
"`amiId` and `gpu` are mutually exclusive.\n\nSee for more details:\n" + | ||
"- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html.", | ||
}, | ||
"gpu": { | ||
TypeSpec: schema.TypeSpec{Type: "boolean"}, | ||
Description: "Use the latest recommended EKS Optimized Linux AMI with GPU support for the " + | ||
"worker nodes from the AWS Systems Manager Parameter Store.\n\nDefaults to false.\n\n" + | ||
"Note: `gpu` and `amiId` are mutually exclusive.\n\nSee for more details:\n" + | ||
"- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html\n" + | ||
"- https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html", | ||
}, | ||
"capacityType": { | ||
TypeSpec: schema.TypeSpec{Type: "string"}, | ||
Description: "Type of capacity associated with the EKS Node Group. " + | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You can do that in pulumi natively. See here for an example:
pulumi-eks/examples/custom-managed-nodegroup/index.ts
Lines 38 to 40 in 9d80b5d