-
Notifications
You must be signed in to change notification settings - Fork 10
/
manage-cluster
executable file
·230 lines (172 loc) · 8.94 KB
/
manage-cluster
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
#!/bin/bash
set -e
shopt -s expand_aliases
red=`tput setaf 1`
green=`tput setaf 2`
reset=`tput sgr0`
function check_dep {
if ! [ -x "$(command -v jq)" ] || ! [ -x "$(command -v aws)" ]; then
echo -e "\n${red}Error: Required CLIs are not installed, check Instructions${reset}" >&2
exit 1
fi
if ! [ -f "${HOME}/.pks/creds.yml" ]; then
echo -e "\n${red}Error: Login to PKS API before proceeding${reset}" >&2
exit 1
fi
CLUSTER_REGION=$(aws configure get region)
if [[ "$CLUSTER_REGION" == "" ]]; then
echo -e "\n${red}Error: Configure AWS region, check step 3 on Instructions!!${reset}" >&2
exit 1
fi
if [ -z "$ENV_NAME" ] || [ -z "$ROUTE_53_ZONE_ID" ]; then
echo -e "\n${red}Error: Configure ENV variables, follow step 4${reset}" >&2
exit 1
fi
}
function get_change_status() {
aws route53 get-change --id $1 | jq -r '.ChangeInfo.Status'
}
#Function to create cluster using Route53 DNS resolution
function provision_cluster {
check_dep #Check env variables
echo -e "\n${green}Enter pks cluster name to be created and press [ENTER]:${reset}"
read CLUSTER_NAME
echo -e "\n${green}Listing pks plans...${reset}"
pks list-plans
echo -e "\n${green}Enter pks plan to be provisioned, choose plan from above list and press [ENTER]:${reset}"
read CLUSTER_PLAN
echo -e "\n${green}Fetching domain from provided hosted zone${reset}"
DOMAIN=$(aws route53 get-hosted-zone --id $ROUTE_53_ZONE_ID | jq -r .HostedZone.Name)
DNS_ROUTE="$CLUSTER_NAME-k8s.$DOMAIN"
echo -e "\n${green}Creating PKS cluster $CLUSTER_NAME with external hostname $DNS_ROUTE ${reset}"
pks create-cluster $CLUSTER_NAME --external-hostname $DNS_ROUTE -p $CLUSTER_PLAN
}
#Function to enable access to pks cluster on AWS
function enable_access {
check_dep #Check env variables
echo -e "\n${green}Listing available clusters...${reset}"
pks clusters
echo -e "\n${green}Enter pks cluster name from above list & press [ENTER]:${reset}"
read CLUSTER_NAME
DOMAIN=$(aws route53 get-hosted-zone --id $ROUTE_53_ZONE_ID | jq -r .HostedZone.Name)
DNS_ROUTE="$CLUSTER_NAME-k8s.$DOMAIN"
echo -e "\n${green}Getting master nodes for pks cluster $CLUSTER_NAME ${reset}"
CLUSTER_UUID=$(pks cluster $CLUSTER_NAME --json | jq -r .uuid)
export BOSH_DEPLOYMENT_NAME=service-instance_$CLUSTER_UUID
MASTERS=$(mktemp /tmp/master-$CLUSTER_UUID.XXXX)
SUBNETS=$(mktemp /tmp/subnets-$CLUSTER_UUID.XXXX)
echo -e "\n${green}Fetching master vms from pks cluster $CLUSTER_NAME ${reset}"
aws ec2 describe-instances --filters "Name=tag:deployment,Values=service-instance_$CLUSTER_UUID" \
"Name=tag:instance_group,Values=master" --query "Reservations[].Instances[].[VpcId,InstanceId]" --output text > $MASTERS
CLUSTER_VPC=$(basename $(head -1 $MASTERS | cut -d' ' -f1))
SECURITY_GROUP=$CLUSTER_NAME-master-access-sg
CLUSTER_LB_NAME=$CLUSTER_NAME-k8s-lb
SEC_GROUP=$(aws ec2 describe-security-groups \
--filters Name=ip-permission.from-port,Values=8443 Name=ip-permission.to-port,Values=8443 Name=ip-permission.cidr,Values='0.0.0.0/0' Name=group-name,Values=$SECURITY_GROUP \
--query "SecurityGroups[*].{ID:GroupId}" --output text)
if [[ $SEC_GROUP == "" ]]; then
echo -e "\n${green}Creating security group for k8s master access${reset}"
SECURITY_GROUP_ID=$(aws ec2 create-security-group \
--group-name $SECURITY_GROUP \
--description "$CLUSTER_NAME master lb security group" \
--vpc-id $CLUSTER_VPC | jq -r .GroupId)
echo -e "\n${green}Adding rules to the security group $SECURITY_GROUP ${reset}"
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol tcp --port 8443 --cidr 0.0.0.0/0
fi
echo -e "\n${green}Fetching public subnets from vpc $CLUSTER_VPC ${reset}"
subnets=( $(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$CLUSTER_VPC" \
"Name=tag:Name,Values=$ENV_NAME-public-subnet*" --query 'Subnets[*].[SubnetId]' --output text) )
echo -e "\n${green}Adding kubernetes tags to public subnets for LoadBalancer type service"
for subnet in "${subnets[@]}"
do
aws ec2 create-tags --resources "$subnet" --tags Key="kubernetes.io/cluster/service-instance_$CLUSTER_UUID",Value="shared"
done
LB_CHECK=$(aws elb describe-load-balancers --output json \
| jq -r --arg CLUSTER_LB $CLUSTER_LB_NAME '.LoadBalancerDescriptions[] | select(.LoadBalancerName==$CLUSTER_LB) | .LoadBalancerName')
if [[ $LB_CHECK == "" ]]; then
echo -e "\n${green}Creating Load balancer for $CLUSTER_NAME pks cluster${reset}"
ELB_DNS=$(aws elb create-load-balancer --load-balancer-name $CLUSTER_LB_NAME \
--listeners "Protocol=TCP,LoadBalancerPort=8443,InstanceProtocol=TCP,InstancePort=8443" \
--subnets "${subnets[0]}" "${subnets[1]}" "${subnets[2]}" --security-groups $SECURITY_GROUP_ID | jq -r .DNSName)
else
echo -e "\n${green}Load balancer $CLUSTER_LB_NAME already exists${reset}"
fi
IFS=$'\t'
while read -r vpc instance
do
echo -e "\n${green}Adding $instance to load balancer $CLUSTER_LB_NAME ${reset}"
aws elb register-instances-with-load-balancer --load-balancer-name $CLUSTER_LB_NAME --instances $instance
done < "$MASTERS"
ROUTE_CHECK=$(aws route53 list-resource-record-sets --hosted-zone-id $ROUTE_53_ZONE_ID \
--query "ResourceRecordSets[?Name == '$DNS_ROUTE']" --output json | jq -r .[].Name)
if [[ $ROUTE_CHECK == "" ]]; then
LB_HOSTED_ZONE=$(aws elb describe-load-balancers --load-balancer-name $CLUSTER_LB_NAME \
| jq -r .LoadBalancerDescriptions[].CanonicalHostedZoneNameID)
CHANGE_BATCH=$(jq -c -n "{\"Changes\": [{\"Action\": \"CREATE\",\"ResourceRecordSet\": {\"Name\": \"$DNS_ROUTE\",\"Type\": \"A\",\"AliasTarget\": {\"EvaluateTargetHealth\": false,\"HostedZoneId\": \"$LB_HOSTED_ZONE\",\"DNSName\": \"dualstack.$ELB_DNS\"}}}]}")
echo -e "\n${green}Creating ELB A record in Hosted Zone${reset}"
CHANGE_ID=$(aws route53 change-resource-record-sets --hosted-zone-id $ROUTE_53_ZONE_ID \
--change-batch $CHANGE_BATCH | jq -r '.ChangeInfo.Id' | cut -d'/' -f3)
echo -e "\n${green}Record change submitted! Change Id: $CHANGE_ID ${reset}"
echo -e "\n${green}Waiting for ROUTE changes to be propagated${reset}"
until [[ $(get_change_status $CHANGE_ID) == "INSYNC" ]]; do
echo -n "."
sleep 5
done
echo -e "\nYour record change has now propagated."
echo -e "\n${green}Waiting for DNS resolution to go through${reset}"
until [[ $(dig @8.8.8.8 $DNS_ROUTE A +short) != "" ]]; do
echo -n "."
sleep 5
done
else
echo -e "\n${green}Route already exists in Hosted Zone${reset}"
fi
rm $MASTERS
echo -e "\n${green}Configuring kubectl for creds using pks get-credentials${reset}"
pks get-credentials $CLUSTER_NAME
echo -e "\n${green}PKS cluster $CLUSTER_NAME information${reset}"
kubectl cluster-info
echo -e "\n${green}Getting all pods across all namespaces...${reset}"
kubectl get pods --all-namespaces
}
function aws_cleanup {
check_dep #Check env variables
echo -e "\n${green}Listing available clusters...${reset}"
pks clusters
echo -e "\n${green}Enter pks cluster name from above list & press [ENTER]:${reset}"
read CLUSTER_NAME
CLUSTER_LB_NAME=$CLUSTER_NAME-k8s-lb
DOMAIN=$(aws route53 get-hosted-zone --id $ROUTE_53_ZONE_ID | jq -r .HostedZone.Name)
DNS_ROUTE="$CLUSTER_NAME-k8s.$DOMAIN"
SECURITY_GROUP=$CLUSTER_NAME-master-access-sg
LB_HOSTED_ZONE=$(aws elb describe-load-balancers --load-balancer-name $CLUSTER_LB_NAME \
| jq -r .LoadBalancerDescriptions[].CanonicalHostedZoneNameID)
ELB_DNS=$(aws elb describe-load-balancers --load-balancer-name $CLUSTER_LB_NAME \
| jq -r .LoadBalancerDescriptions[].CanonicalHostedZoneName)
CHANGE_BATCH=$(jq -c -n "{\"Changes\": [{\"Action\": \"DELETE\",\"ResourceRecordSet\": {\"Name\": \"$DNS_ROUTE\",\"Type\": \"A\",\"AliasTarget\": {\"EvaluateTargetHealth\": false,\"HostedZoneId\": \"$LB_HOSTED_ZONE\",\"DNSName\": \"dualstack.$ELB_DNS\"}}}]}")
echo -e "\n${green}Deleting ELB A record in Hosted Zone${reset}"
aws route53 change-resource-record-sets --hosted-zone-id $ROUTE_53_ZONE_ID --change-batch $CHANGE_BATCH
echo -e "\n${green}Deleting load balancer $CLUSTER_LB_NAME & waiting for network interfaces deletion${reset}"
aws elb delete-load-balancer --load-balancer-name $CLUSTER_LB_NAME && sleep 60
echo -e "\n${green}Deleting security group${reset}"
SECURITY_GROUP_ID=$(aws ec2 describe-security-groups \
--filters Name=ip-permission.from-port,Values=8443 Name=ip-permission.to-port,Values=8443 Name=ip-permission.cidr,Values='0.0.0.0/0' Name=group-name,Values=$SECURITY_GROUP \
--query "SecurityGroups[*].{ID:GroupId}" --output text)
aws ec2 delete-security-group --group-id $SECURITY_GROUP_ID
echo -e "\n${green}AWS Cleanup complete, you can delete your cluster with: ${reset}${red}pks delete-cluster $CLUSTER_NAME ${reset}"
}
operation=$1
case $operation in
provision)
provision_cluster
;;
access)
enable_access
;;
cleanup)
aws_cleanup
;;
*)
echo $"Usage: $0 {provision|access|cleanup}"
exit 1
esac