diff --git a/aws/README.md b/aws/README.md index eb0fa258..3e8c9ece 100644 --- a/aws/README.md +++ b/aws/README.md @@ -42,13 +42,14 @@ The terraform code is loosely based on [this EKS managed Node Group TF example]( 1. export your AWS credentials (`export AWS_PROFILE=awsuser`) 2. check whether you have the right profile by doing `aws sts get-caller-identity`. Make sure you have the right account and have the rights to do this. -3. Do `terraform init` (if required, use tfenv to select TF 0.14.0 or higher ) -4. The bucket ARN will be asked in the next 2 steps. Take the one provided to you in the output earlier (e.g., `arn:aws:s3:::terraform-20230102231352749300000001`). -5. Do `terraform plan` -6. Do `terraform apply`. Note: the apply will take 10 to 20 minutes depending on the speed of the AWS backplane. -7. When creation is done, do `aws eks update-kubeconfig --region eu-west-1 --name wrongsecrets-exercise-cluster --kubeconfig ~/.kube/wrongsecrets` -8. Do `export KUBECONFIG=~/.kube/wrongsecrets` -9. Run `./build-and-deploy-aws.sh` to install all the required materials (helm for calico, secrets management, autoscaling, etc.) +3. Ensure you have set all the right variables in `terraform.tfvars`. If you want to use a custom domain with TLS, also fill out your domain name(s) and Route53 hosted zone here. Delegate (sub)domains to Route53 nameservers if you're not hosting your domain with Route53: [using the AWS docs](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html) +4. Do `terraform init` (if required, use tfenv to select TF 0.14.0 or higher ) +5. The bucket ARN will be asked in the next 2 steps. Take the one provided to you in the output earlier (e.g., `arn:aws:s3:::terraform-20230102231352749300000001`). +6. Do `terraform plan` +7. Do `terraform apply`. Note: the apply will take 10 to 20 minutes depending on the speed of the AWS backplane. +8. When creation is done, do `aws eks update-kubeconfig --region eu-west-1 --name wrongsecrets-exercise-cluster --kubeconfig ~/.kube/wrongsecrets` +9. Do `export KUBECONFIG=~/.kube/wrongsecrets` +10. Run `./build-and-deploy-aws.sh` to install all the required materials (helm for calico, secrets management, autoscaling, etc.) Your EKS cluster should be visible in [eu-west-1](https://eu-west-1.console.aws.amazon.com/eks/home?region=eu-west-1#/clusters) by default. Want a different region? You can modify `terraform.tfvars` or input it directly using the `region` variable in plan/apply. diff --git a/aws/acm.tf b/aws/acm.tf new file mode 100644 index 00000000..1513cd1d --- /dev/null +++ b/aws/acm.tf @@ -0,0 +1,34 @@ +# Uncomment for ssl using ACM +module "acm_balancer" { + source = "terraform-aws-modules/acm/aws" + + count = var.balancer_domain_name != "" ? 1 : 0 + + validation_method = "DNS" + + domain_name = var.balancer_domain_name + zone_id = var.hosted_zone_id + + subject_alternative_names = [ + "*.${var.balancer_domain_name}" + ] + + wait_for_validation = true +} + +module "acm_ctfd" { + source = "terraform-aws-modules/acm/aws" + + count = var.ctfd_domain_name != "" ? 1 : 0 + + validation_method = "DNS" + + domain_name = var.ctfd_domain_name + zone_id = var.hosted_zone_id + + subject_alternative_names = [ + "*.${var.ctfd_domain_name}" + ] + + wait_for_validation = true +} diff --git a/aws/k8s-aws-alb-script-cleanup.sh b/aws/k8s-aws-alb-script-cleanup.sh index fc759447..c41a2ef1 100755 --- a/aws/k8s-aws-alb-script-cleanup.sh +++ b/aws/k8s-aws-alb-script-cleanup.sh @@ -37,7 +37,4 @@ echo "Cleanup helm chart" helm uninstall aws-load-balancer-controller \ -n kube-system -echo "Cleanup k8s ALB" -kubectl delete -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" - kubectl delete serviceaccount -n kube-system aws-load-balancer-controller diff --git a/aws/k8s-aws-alb-script.sh b/aws/k8s-aws-alb-script.sh index 14b70280..090b93bc 100755 --- a/aws/k8s-aws-alb-script.sh +++ b/aws/k8s-aws-alb-script.sh @@ -71,25 +71,39 @@ sleep 10 EKS_CLUSTER_VERSION=$(aws eks describe-cluster --name $CLUSTERNAME --region $AWS_REGION --query cluster.version --output text) -# echo "apply -f k8s/secret-challenge-vault-service.yml in 10 s" -# sleep 10 -# kubectl apply -f k8s/secret-challenge-vault-service.yml -echo "apply -f k8s/wrongsecrets-balancer-service.yml in 10 s" +EXTERNAL_DNS_ROLE_ARN="$(terraform output -raw external_dns_role_arn)" +kubectl create serviceaccount -n kube-system external-dns +kubectl annotate serviceaccount -n kube-system --overwrite external-dns eks.amazonaws.com/role-arn=${EXTERNAL_DNS_ROLE_ARN} + +echo "apply -f k8s/external-dns.yaml in 10 s" sleep 10 +kubectl apply -f k8s/external-dns.yaml + + +echo "apply -f k8s/wrongsecrets-balancer-service.yml" kubectl apply -f k8s/wrongsecrets-balancer-service.yml -# echo "apply -f k8s/secret-challenge-vault-ingress.yml in 1 s" -# sleep 1 -# kubectl apply -f k8s/secret-challenge-vault-ingress.yml -echo "apply -f k8s/wrongsecrets-balancer-ingress.yml in 10 s" -sleep 10 + +export BALANCER_DOMAIN_NAME="$(terraform output -raw balancer_domain_name)" + +envsubst <./k8s/wrongsecrets-balancer-ingress.yml.tpl >./k8s/wrongsecrets-balancer-ingress.yml + +echo "apply -f k8s/wrongsecrets-balancer-ingress.yml" kubectl apply -f k8s/wrongsecrets-balancer-ingress.yml +echo "apply -f k8s/ctfd-service.yaml" kubectl apply -f k8s/ctfd-service.yaml + +export CTFD_DOMAIN_NAME="$(terraform output -raw ctfd_domain_name)" +envsubst <./k8s/ctfd-ingress.yaml.tpl >./k8s/ctfd-ingress.yaml + +echo "apply -f k8s/ctfd-ingress.yaml" kubectl apply -f k8s/ctfd-ingress.yaml -echo "waiting 10 s for loadBalancer" -sleep 10 +echo "waiting 20 s for load balancer" +sleep 20 echo "Wrongsecrets ingress: http://$(kubectl get ingress wrongsecrets-balancer -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')" +echo "Wrongsecrets host: http://$(kubectl get ingress wrongsecrets-balancer -o jsonpath='{.spec.rules[0].host}')" echo "ctfd ingress: http://$(kubectl get ingress -n ctfd ctfd -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')" +echo "ctfd host: http://$(kubectl get ingress -n ctfd ctfd -o jsonpath='{.spec.rules[0].host}')" echo "Do not forget to cleanup afterwards! Run k8s-aws-alb-script-cleanup.sh" diff --git a/aws/k8s/ctfd-ingress.yaml b/aws/k8s/ctfd-ingress.yaml deleted file mode 100644 index 49f21b97..00000000 --- a/aws/k8s/ctfd-ingress.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - namespace: ctfd - name: ctfd - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/target-type: instance - alb.ingress.kubernetes.io/success-codes: 200-399 - acme.cert-manager.io/http01-edit-in-place: "true" - # cert-manager.io/issue-temporary-certificate: "true" - #uncomment and configure below if you want to use tls, don't forget to override the cookie to a secure value! - # alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:::certificate/xxxxxx - # alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' - # alb.ingress.kubernetes.io/actions.ssl-redirect: '443' -spec: - ingressClassName: alb - rules: - - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: ctfd - port: - number: 80 diff --git a/aws/k8s/ctfd-ingress.yaml.tpl b/aws/k8s/ctfd-ingress.yaml.tpl new file mode 100644 index 00000000..f7b7fdfc --- /dev/null +++ b/aws/k8s/ctfd-ingress.yaml.tpl @@ -0,0 +1,29 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + namespace: ctfd + name: ctfd + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: instance + alb.ingress.kubernetes.io/success-codes: 200-399 + #uncomment and configure below if you want to use tls, don't forget to override the cookie to a secure value! + alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-2021-06 + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/ssl-redirect: "443" + external-dns.alpha.kubernetes.io/hostname: ${CTFD_DOMAIN_NAME} + # The certificate ARN can be discovered automatically by the ALB Ingress Controller based on the host value in the ingress, or you can specify it manually by uncommenting and customizing the line below + # alb.ingress.kubernetes.io/certificate-arn: +spec: + ingressClassName: alb + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: ctfd + port: + number: 80 + host: ${CTFD_DOMAIN_NAME} # Specify the hostname to route to the service diff --git a/aws/k8s/external-dns.yaml b/aws/k8s/external-dns.yaml new file mode 100644 index 00000000..e7d9cb01 --- /dev/null +++ b/aws/k8s/external-dns.yaml @@ -0,0 +1,62 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: external-dns + labels: + app.kubernetes.io/name: external-dns +rules: + - apiGroups: [""] + resources: ["services", "endpoints", "pods", "nodes"] + verbs: ["get", "watch", "list"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: external-dns-viewer + labels: + app.kubernetes.io/name: external-dns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-dns +subjects: + - kind: ServiceAccount + name: external-dns + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: external-dns + namespace: kube-system + labels: + app: external-dns +spec: + selector: + matchLabels: + app: external-dns + strategy: + type: Recreate + template: + metadata: + labels: + app: external-dns + spec: + serviceAccountName: external-dns + securityContext: + fsGroup: 65534 + containers: + - name: external-dns + image: bitnami/external-dns:0.15.0 + resources: + limits: + memory: 256Mi + cpu: 500m + args: + - --source=ingress + - --provider=aws + - --aws-zone-type=public # only look at public hosted zones (valid values are public, private or no value for both) + - --txt-owner-id=external-dns diff --git a/aws/k8s/wrongsecrets-balancer-ingress.yml b/aws/k8s/wrongsecrets-balancer-ingress.yml.tpl similarity index 50% rename from aws/k8s/wrongsecrets-balancer-ingress.yml rename to aws/k8s/wrongsecrets-balancer-ingress.yml.tpl index 0e5094a2..2f747128 100644 --- a/aws/k8s/wrongsecrets-balancer-ingress.yml +++ b/aws/k8s/wrongsecrets-balancer-ingress.yml.tpl @@ -7,12 +7,13 @@ metadata: alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/target-type: instance alb.ingress.kubernetes.io/success-codes: 200-399 - acme.cert-manager.io/http01-edit-in-place: "true" - # cert-manager.io/issue-temporary-certificate: "true" #uncomment and configure below if you want to use tls, don't forget to override the cookie to a secure value! - # alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:::certificate/xxxxxx - # alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' - # alb.ingress.kubernetes.io/actions.ssl-redirect: '443' + alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-2021-06 + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/ssl-redirect: "443" + external-dns.alpha.kubernetes.io/hostname: ${BALANCER_DOMAIN_NAME} + # The certificate ARN can be discovered automatically by the ALB Ingress Controller based on the host value in the ingress, or you can specify it manually by uncommenting and customizing the line below + # alb.ingress.kubernetes.io/certificate-arn: spec: ingressClassName: alb rules: @@ -25,3 +26,4 @@ spec: name: wrongsecrets-balancer port: number: 80 + host: ${BALANCER_DOMAIN_NAME} # Specify the hostname to route to the service diff --git a/aws/main.tf b/aws/main.tf index a152fe37..e0f909b7 100644 --- a/aws/main.tf +++ b/aws/main.tf @@ -208,3 +208,18 @@ module "load_balancer_controller_irsa_role" { } } } + +module "external_dns_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.46" + + role_name = "external-dns" + attach_external_dns_policy = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:external-dns"] + } + } +} diff --git a/aws/outputs.tf b/aws/outputs.tf index 4c3e5eb8..b575a225 100644 --- a/aws/outputs.tf +++ b/aws/outputs.tf @@ -68,3 +68,28 @@ output "state_bucket_name" { description = "Terraform s3 state bucket name" value = split(":", var.state_bucket_arn)[length(split(":", var.state_bucket_arn)) - 1] } + +output "balancer_acm_cert_arn" { + description = "Balancer ACM certificate ARN" + value = var.balancer_domain_name == "" ? null : one(module.acm_balancer).acm_certificate_arn +} + +output "balancer_domain_name" { + description = "Balancer domain name" + value = var.balancer_domain_name +} + +output "ctfd_acm_cert_arn" { + description = "CTFd ACM certificate ARN" + value = var.ctfd_domain_name == "" ? null : one(module.acm_ctfd).acm_certificate_arn +} + +output "ctfd_domain_name" { + description = "CTFd domain name" + value = var.ctfd_domain_name +} + +output "external_dns_role_arn" { + description = "External DNS role" + value = module.external_dns_irsa_role.iam_role_arn +} diff --git a/aws/terraform.tfvars b/aws/terraform.tfvars index c731d9d2..09fae324 100644 --- a/aws/terraform.tfvars +++ b/aws/terraform.tfvars @@ -1,2 +1,7 @@ region = "eu-west-1" # state_bucket_arn = "..." + +## Use this section for a custom domain with TLS +# hosted_zone_id = "..." +# balancer_domain_name = "..." +# ctfd_domain_name = "..." diff --git a/aws/variables.tf b/aws/variables.tf index 2ec5baa4..0e02f2c3 100644 --- a/aws/variables.tf +++ b/aws/variables.tf @@ -3,6 +3,23 @@ variable "region" { type = string default = "eu-west-1" } +variable "balancer_domain_name" { + description = "The domain name to use" + type = string + default = "ctf.wrongsecrets.com" +} + +variable "ctfd_domain_name" { + description = "The domain name to use" + type = string + default = "ctfd.wrongsecrets.com" +} + +variable "hosted_zone_id" { + description = "The ID of the Route53 Hosted Zone to use" + type = string + default = "Z0495090L8BJ8O5NYNKK" +} variable "cluster_version" { description = "The EKS cluster version to use"