diff --git a/.circleci/config.yml b/.circleci/config.yml index 9c84f112cc..05f6a209e9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ orbs: executors: go: docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20.1 + - image: docker.mirror.hashicorp.services/cimg/go:1.18.3 environment: TEST_RESULTS: /tmp/test-results # path to where test results are saved @@ -21,9 +21,8 @@ gke-terraform-path: &gke-terraform-path charts/consul/test/terraform/gke eks-terraform-path: &eks-terraform-path charts/consul/test/terraform/eks aks-terraform-path: &aks-terraform-path charts/consul/test/terraform/aks openshift-terraform-path: &openshift-terraform-path charts/consul/test/terraform/openshift -# This image is built from test/docker/Test.dockerfile -consul-helm-test-image: &consul-helm-test-image docker.mirror.hashicorp.services/hashicorpdev/consul-helm-test:0.15.0 -consul-test-image: &consul-test-image hashicorppreview/consul-enterprise:1.15-dev +# This image is built from charts/consul/test/docker/Test.dockerfile and provides the necessary dependencies for running on our cloud targets. +consul-helm-test-image: &consul-helm-test-image docker.mirror.hashicorp.services/hashicorpdev/consul-helm-test:0.12.3 ######################## # COMMANDS @@ -35,16 +34,16 @@ commands: - run: name: Install go, gotestsum, kind, kubectl, and helm command: | - wget https://golang.org/dl/go1.20.1.linux-amd64.tar.gz - sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.20.1.linux-amd64.tar.gz - rm go1.20.1.linux-amd64.tar.gz + wget https://golang.org/dl/go1.18.3.linux-amd64.tar.gz + sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.18.3.linux-amd64.tar.gz + rm go1.18.3.linux-amd64.tar.gz echo 'export PATH=$PATH:/usr/local/go/bin' >> $BASH_ENV - wget https://github.com/gotestyourself/gotestsum/releases/download/v1.8.2/gotestsum_1.8.2_linux_amd64.tar.gz - sudo tar -C /usr/local/bin -xzf gotestsum_1.8.2_linux_amd64.tar.gz - rm gotestsum_1.8.2_linux_amd64.tar.gz + wget https://github.com/gotestyourself/gotestsum/releases/download/v1.6.4/gotestsum_1.6.4_linux_amd64.tar.gz + sudo tar -C /usr/local/bin -xzf gotestsum_1.6.4_linux_amd64.tar.gz + rm gotestsum_1.6.4_linux_amd64.tar.gz - curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.17.0/kind-linux-amd64 + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.15.0/kind-linux-amd64 chmod +x ./kind sudo mv ./kind /usr/local/bin/kind @@ -52,8 +51,8 @@ commands: chmod +x ./kubectl sudo mv ./kubectl /usr/local/bin/kubectl - wget https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz - tar -zxvf helm-v3.9.4-linux-amd64.tar.gz + wget https://get.helm.sh/helm-v3.7.0-linux-amd64.tar.gz + tar -zxvf helm-v3.7.0-linux-amd64.tar.gz sudo mv linux-amd64/helm /usr/local/bin/helm custom-checkout: description: | @@ -169,10 +168,10 @@ commands: echo $pkgs for pkg in $pkgs do - if ! gotestsum --format=testname --no-summary=all --jsonfile=jsonfile-${pkg////-} -- $pkg -p 1 -timeout 2h -failfast \ + if ! gotestsum --no-summary=all --jsonfile=jsonfile-${pkg////-} -- $pkg -p 1 -timeout 2h -failfast \ << parameters.additional-flags >> \ - -enable-multi-cluster \ ${ENABLE_ENTERPRISE:+-enable-enterprise} \ + -enable-multi-cluster \ -debug-directory="$TEST_RESULTS/debug" \ -consul-k8s-image=<< parameters.consul-k8s-image >> then @@ -181,7 +180,7 @@ commands: break fi done - gotestsum --format=testname --raw-command --junitfile "$TEST_RESULTS/gotestsum-report.xml" -- cat jsonfile* + gotestsum --raw-command --junitfile "$TEST_RESULTS/gotestsum-report.xml" -- cat jsonfile* exit $exit_code - unless: @@ -200,7 +199,7 @@ commands: pkgs=$(go list ./... | circleci tests split --split-by=timings --timings-type=classname) echo "Running $pkgs" - gotestsum --format testname --junitfile "$TEST_RESULTS/gotestsum-report.xml" -- $pkgs -p 1 -timeout 2h -failfast \ + gotestsum --junitfile "$TEST_RESULTS/gotestsum-report.xml" -- $pkgs -p 1 -timeout 2h -failfast \ << parameters.additional-flags >> \ ${ENABLE_ENTERPRISE:+-enable-enterprise} \ -enable-multi-cluster \ @@ -281,7 +280,7 @@ jobs: unzip consul_"${CONSUL_VERSION}"_linux_amd64.zip -d /home/circleci/bin && rm consul_"${CONSUL_VERSION}"_linux_amd64.zip PACKAGE_NAMES=$(go list ./...) - gotestsum --format testname --junitfile $TEST_RESULTS/gotestsum-report.xml -- -p 4 $PACKAGE_NAMES + gotestsum --junitfile $TEST_RESULTS/gotestsum-report.xml -- -p 4 $PACKAGE_NAMES - store_test_results: path: /tmp/test-results @@ -312,7 +311,7 @@ jobs: unzip consul_"${CONSUL_ENT_VERSION}"_linux_amd64.zip -d /home/circleci/bin && rm consul_"${CONSUL_ENT_VERSION}"_linux_amd64.zip PACKAGE_NAMES=$(go list ./...) - gotestsum --format testname --junitfile $TEST_RESULTS/gotestsum-report.xml -- -tags=enterprise -p 4 $PACKAGE_NAMES + gotestsum --junitfile $TEST_RESULTS/gotestsum-report.xml -- -tags=enterprise -p 4 $PACKAGE_NAMES - store_test_results: path: /tmp/test-results @@ -401,7 +400,7 @@ jobs: name: Run tests working_directory: *cli-path command: | - gotestsum --format testname --junitfile $TEST_RESULTS/gotestsum-report.xml ./... -- -p 4 + gotestsum --junitfile $TEST_RESULTS/gotestsum-report.xml ./... -- -p 4 - store_test_results: path: /tmp/test-results @@ -500,7 +499,7 @@ jobs: name: Run tests working_directory: *acceptance-framework-path command: | - gotestsum --format testname --junitfile $TEST_RESULTS/gotestsum-report.xml ./... -- -p 4 + gotestsum --junitfile $TEST_RESULTS/gotestsum-report.xml ./... -- -p 4 - store_test_results: path: /tmp/test-results @@ -523,7 +522,7 @@ jobs: name: Run tests working_directory: *helm-gen-path command: | - gotestsum --format testname --junitfile $TEST_RESULTS/gotestsum-report.xml ./... -- -p 4 + gotestsum --junitfile $TEST_RESULTS/gotestsum-report.xml ./... -- -p 4 - store_test_results: path: /tmp/test-results @@ -566,7 +565,6 @@ jobs: acceptance: environment: - TEST_RESULTS: /tmp/test-results - - CONSUL_TEST_IMAGE: *consul-test-image machine: image: ubuntu-2004:202010-01 resource_class: xlarge @@ -575,7 +573,7 @@ jobs: - checkout - install-prereqs - create-kind-clusters: - version: "v1.26.0" + version: "v1.24.4" - restore_cache: keys: - consul-helm-modcache-v2-{{ checksum "acceptance/go.mod" }} @@ -591,7 +589,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: failfast: true - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results - store_artifacts: @@ -600,7 +598,6 @@ jobs: acceptance-tproxy: environment: - TEST_RESULTS: /tmp/test-results - - CONSUL_TEST_IMAGE: *consul-test-image machine: image: ubuntu-2004:202010-01 resource_class: xlarge @@ -609,7 +606,7 @@ jobs: - checkout - install-prereqs - create-kind-clusters: - version: "v1.26.0" + version: "v1.24.4" - restore_cache: keys: - consul-helm-modcache-v2-{{ checksum "acceptance/go.mod" }} @@ -625,7 +622,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: failfast: true - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results - store_artifacts: @@ -634,7 +631,6 @@ jobs: acceptance-tproxy-cni: environment: - TEST_RESULTS: /tmp/test-results - - CONSUL_TEST_IMAGE: *consul-test-image machine: image: ubuntu-2004:202010-01 resource_class: xlarge @@ -642,8 +638,8 @@ jobs: steps: - checkout - install-prereqs - - create-kind-cni-clusters: - version: "v1.25.3" + - create-kind-clusters: + version: "v1.24.4" - restore_cache: keys: - consul-helm-modcache-v2-{{ checksum "acceptance/go.mod" }} @@ -659,7 +655,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: failfast: true - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -enable-cni -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results - store_artifacts: @@ -727,12 +723,11 @@ jobs: ############################# # CLOUD ACCEPTANCE TEST JOBS ############################# - acceptance-gke-1-25: + acceptance-gke-1-23: parallelism: 2 environment: - TEST_RESULTS: /tmp/test-results - USE_GKE_GCLOUD_AUTH_PLUGIN: true - - CONSUL_TEST_IMAGE: *consul-test-image docker: - image: *consul-helm-test-image @@ -778,7 +773,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-pod-security-policies -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -797,12 +792,11 @@ jobs: fail_only: true failure_message: "GKE acceptance tests failed. Check the logs at: ${CIRCLE_BUILD_URL}" - acceptance-gke-cni-1-25: + acceptance-gke-cni-1-23: parallelism: 2 environment: - TEST_RESULTS: /tmp/test-results - USE_GKE_GCLOUD_AUTH_PLUGIN: true - - CONSUL_TEST_IMAGE: *consul-test-image docker: - image: *consul-helm-test-image @@ -848,7 +842,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -use-gke -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -use-gke -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-pod-security-policies -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -867,11 +861,10 @@ jobs: fail_only: true failure_message: "GKE CNI acceptance tests failed. Check the logs at: ${CIRCLE_BUILD_URL}" - acceptance-aks-1-24: + acceptance-aks-1-22: parallelism: 3 environment: - TEST_RESULTS: /tmp/test-results - - CONSUL_TEST_IMAGE: *consul-test-image docker: - image: *consul-helm-test-image @@ -906,7 +899,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -use-aks -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -925,11 +918,10 @@ jobs: fail_only: true failure_message: "AKS acceptance tests failed. Check the logs at: ${CIRCLE_BUILD_URL}" - acceptance-aks-cni-1-24: + acceptance-aks-cni-1-22: parallelism: 3 environment: - TEST_RESULTS: /tmp/test-results - - CONSUL_TEST_IMAGE: *consul-test-image docker: - image: *consul-helm-test-image @@ -964,7 +956,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -use-aks -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -982,11 +974,10 @@ jobs: fail_only: true failure_message: "AKS CNI acceptance tests failed. Check the logs at: ${CIRCLE_BUILD_URL}" - acceptance-eks-1-23: + acceptance-eks-1-21: parallelism: 3 environment: - TEST_RESULTS: /tmp/test-results - - CONSUL_TEST_IMAGE: *consul-test-image docker: - image: *consul-helm-test-image @@ -1027,7 +1018,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -1046,11 +1037,10 @@ jobs: fail_only: true failure_message: "EKS acceptance tests failed. Check the logs at: ${CIRCLE_BUILD_URL}" - acceptance-eks-cni-1-23: + acceptance-eks-cni-1-21: parallelism: 3 environment: - TEST_RESULTS: /tmp/test-results - - CONSUL_TEST_IMAGE: *consul-test-image docker: - image: *consul-helm-test-image @@ -1091,7 +1081,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-transparent-proxy -enable-cni -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -1113,7 +1103,6 @@ jobs: acceptance-openshift: environment: TEST_RESULTS: /tmp/test-results - CONSUL_TEST_IMAGE: *consul-test-image parallelism: 1 docker: - image: *consul-helm-test-image @@ -1146,7 +1135,7 @@ jobs: - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-openshift -enable-transparent-proxy -consul-image=$CONSUL_TEST_IMAGE + additional-flags: -kubeconfig="$primary_kubeconfig" -secondary-kubeconfig="$secondary_kubeconfig" -enable-openshift -enable-transparent-proxy -consul-image=hashicorppreview/consul-enterprise:1.13-dev - store_test_results: path: /tmp/test-results @@ -1165,19 +1154,57 @@ jobs: fail_only: true failure_message: "OpenShift acceptance tests failed. Check the logs at: ${CIRCLE_BUILD_URL}" + acceptance-kind-1-23-consul-compat-nightly-1-11: + environment: + - TEST_RESULTS: /tmp/test-results + - CONSUL_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.11-dev" + - ENVOY_IMAGE: "envoyproxy/envoy:v1.20.2" + - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.48.0" + - HELM_CHART_VERSION: "0.48.0" + machine: + image: ubuntu-2004:202010-01 + resource_class: xlarge + steps: + - checkout + - install-prereqs + - create-kind-clusters: + version: "v1.23.0" + - restore_cache: + keys: + - consul-helm-modcache-v2-{{ checksum "acceptance/go.mod" }} + - run: + name: go mod download + working_directory: *acceptance-mod-path + command: go mod download + - save_cache: + key: consul-helm-modcache-v2-{{ checksum "acceptance/go.mod" }} + paths: + - ~/.go_workspace/pkg/mod + - build-cli + - run: mkdir -p $TEST_RESULTS + - run-acceptance-tests: + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-k8s-image=$CONSUL_K8S_IMAGE -consul-image=$CONSUL_IMAGE -consul-version="1.11" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION + - store_test_results: + path: /tmp/test-results + - store_artifacts: + path: /tmp/test-results + - slack/status: + channel: *slack-channel + fail_only: true + failure_message: "Acceptance tests against Kind with Kubernetes v1.23 with Consul 1.11 nightly failed. Check the logs at: ${CIRCLE_BUILD_URL}" + acceptance-kind-1-23-consul-compat-nightly-1-12: environment: - TEST_RESULTS: /tmp/test-results - CONSUL_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.12-dev" - ENVOY_IMAGE: "envoyproxy/envoy:v1.22.2" - - HELM_CHART_VERSION: "0.49.0" - - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.49.0" + - HELM_CHART_VERSION: "0.48.0" + - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.48.0" machine: image: ubuntu-2004:202010-01 resource_class: xlarge steps: - - custom-checkout: - git-ref: "v$HELM_CHART_VERSION" + - checkout - install-prereqs - create-kind-clusters: version: "v1.23.0" @@ -1195,8 +1222,7 @@ jobs: - build-cli - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - consul-k8s-image: $CONSUL_K8S_IMAGE - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -consul-image=$CONSUL_IMAGE -consul-version="1.12" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION -enable-transparent-proxy + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-k8s-image=$CONSUL_K8S_IMAGE -consul-image=$CONSUL_IMAGE -consul-version="1.12" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION - store_test_results: path: /tmp/test-results - store_artifacts: @@ -1204,21 +1230,20 @@ jobs: - slack/status: channel: *slack-channel fail_only: true - failure_message: "Acceptance tests against Kind with Kubernetes v1.25 with Consul 1.12 nightly failed. Check the logs at: ${CIRCLE_BUILD_URL}" + failure_message: "Acceptance tests against Kind with Kubernetes v1.23 with Consul 1.12 nightly failed. Check the logs at: ${CIRCLE_BUILD_URL}" acceptance-kind-1-23-consul-compat-nightly-1-13: environment: - TEST_RESULTS: /tmp/test-results - CONSUL_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.13-dev" - ENVOY_IMAGE: "envoyproxy/envoy:v1.23.1" - - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.49.0" - - HELM_CHART_VERSION: "0.49.0" + - CONSUL_K8S_IMAGE: "docker.mirror.hashicorp.services/hashicorp/consul-k8s-control-plane:0.48.0" + - HELM_CHART_VERSION: "0.48.0" machine: image: ubuntu-2004:202010-01 resource_class: xlarge steps: - - custom-checkout: - git-ref: "v$HELM_CHART_VERSION" + - checkout - install-prereqs - create-kind-clusters: version: "v1.23.0" @@ -1236,8 +1261,7 @@ jobs: - build-cli - run: mkdir -p $TEST_RESULTS - run-acceptance-tests: - consul-k8s-image: $CONSUL_K8S_IMAGE - additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -consul-image=$CONSUL_IMAGE -consul-version="1.13" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION -enable-transparent-proxy + additional-flags: -use-kind -kubecontext="kind-dc1" -secondary-kubecontext="kind-dc2" -enable-transparent-proxy -consul-k8s-image=$CONSUL_K8S_IMAGE -consul-image=$CONSUL_IMAGE -consul-version="1.13" -envoy-image=$ENVOY_IMAGE -helm-chart-version=$HELM_CHART_VERSION - store_test_results: path: /tmp/test-results - store_artifacts: @@ -1245,7 +1269,7 @@ jobs: - slack/status: channel: *slack-channel fail_only: true - failure_message: "Acceptance tests against Kind with Kubernetes v1.25 with Consul 1.13 nightly failed. Check the logs at: ${CIRCLE_BUILD_URL}" + failure_message: "Acceptance tests against Kind with Kubernetes v1.23 with Consul 1.13 nightly failed. Check the logs at: ${CIRCLE_BUILD_URL}" ######################## # WORKFLOWS @@ -1285,7 +1309,6 @@ workflows: requires: - dev-upload-docker - nightly-cleanup: triggers: - schedule: @@ -1299,6 +1322,7 @@ workflows: - cleanup-azure-resources - cleanup-eks-resources + nightly-acceptance-tests-release: description: | Tests which run on a release branch nightly. These exist separate from the main @@ -1311,8 +1335,6 @@ workflows: branches: only: - release/0.49.x - - release/1.0.x - - release/1.1.x jobs: - build-distro: OS: "linux" @@ -1323,12 +1345,12 @@ workflows: - build-distros-linux # Disable until we can use UBI images. # - acceptance-openshift - - acceptance-gke-1-25: + - acceptance-gke-1-23: requires: - dev-upload-docker - - acceptance-gke-cni-1-25: + - acceptance-gke-cni-1-23: requires: - - acceptance-gke-1-25 + - acceptance-gke-1-23 - acceptance-tproxy: requires: - dev-upload-docker @@ -1355,24 +1377,41 @@ workflows: - build-distros-linux # Disable until we can use UBI images. # - acceptance-openshift - - acceptance-gke-1-25: + - acceptance-gke-1-23: requires: - dev-upload-docker - - acceptance-gke-cni-1-25: + - acceptance-gke-cni-1-23: requires: - - acceptance-gke-1-25 - - acceptance-eks-1-23: + - acceptance-gke-1-23 + - acceptance-eks-1-21: requires: - dev-upload-docker - - acceptance-eks-cni-1-23: + - acceptance-eks-cni-1-21: requires: - - acceptance-eks-1-23 - - acceptance-aks-1-24: + - acceptance-eks-1-21 + - acceptance-aks-1-22: requires: - dev-upload-docker - - acceptance-aks-cni-1-24: + - acceptance-aks-cni-1-22: requires: - - acceptance-aks-1-24 + - acceptance-aks-1-22 - acceptance-tproxy: requires: - dev-upload-docker + + nightly-kind-acceptance-tests-consul-compatability: + description: | + Acceptance tests which run nightly to verify the compatibility between + a consul-k8s binary and it's consul version pair. Tests will be conducted + for up to n-2 previous Consul-k8s releases. + triggers: + - schedule: + cron: "0 0 * * *" # Run at 12 am UTC (5 pm PST) + filters: + branches: + only: + - main + jobs: + - acceptance-kind-1-23-consul-compat-nightly-1-11 + - acceptance-kind-1-23-consul-compat-nightly-1-12 + - acceptance-kind-1-23-consul-compat-nightly-1-13 diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index e22e28a48a..8082308548 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,5 @@ blank_issues_enabled: false contact_links: - - name: Consul Community Support - url: https://discuss.hashicorp.com/c/consul/29 - about: If you have a question or are looking for advice on Consul K8s, please post on our Discuss forum! The community loves to chime in to help. Happy Coding! - name: Consul on Kubernetes Learn Tutorials url: https://learn.hashicorp.com/collections/consul/kubernetes about: Please check out our Learn Tutorials. These hands on tutorials deal with many of the tasks common to using Consul on Kubernetes. diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 9c241d9ada..c1548332e0 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -2,7 +2,7 @@ name: Backport Assistant Runner on: - pull_request_target: + pull_request: types: - closed - labeled diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9ce46f1704..c6b107e141 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,7 +19,7 @@ jobs: outputs: go-version: ${{ steps.get-go-version.outputs.go-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Determine Go version id: get-go-version # We use .go-version as our source of truth for current Go @@ -33,7 +33,7 @@ jobs: outputs: product-version: ${{ steps.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: get product version id: get-product-version run: | @@ -47,7 +47,7 @@ jobs: filepath: ${{ steps.generate-metadata-file.outputs.filepath }} steps: - name: "Checkout directory" - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Generate metadata file id: generate-metadata-file uses: hashicorp/actions-generate-metadata@v1 @@ -55,7 +55,7 @@ jobs: version: ${{ needs.get-product-version.outputs.product-version }} product: ${{ env.PKG_NAME }} repositoryOwner: "hashicorp" - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v2 with: name: metadata.json path: ${{ steps.generate-metadata-file.outputs.filepath }} @@ -107,10 +107,10 @@ jobs: name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} ${{ matrix.component }} build steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ matrix.go }} @@ -132,7 +132,7 @@ jobs: zip -r -j out/${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ - name: Upload built binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 with: name: ${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip path: ${{ matrix.component}}/out/${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip @@ -177,7 +177,7 @@ jobs: echo "Test PASSED, expected: ${VERSION}, got: ${CONSUL_K8S_VERSION}" - name: Upload rpm package - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 if: ${{ matrix.goos == 'linux' && matrix.component == 'cli' && matrix.goarch == 'amd64'}} with: name: ${{ env.RPM_PACKAGE }} @@ -202,7 +202,7 @@ jobs: echo "Test PASSED, expected: ${VERSION}, got: ${CONSUL_K8S_VERSION}" - name: Upload debian packages - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 if: ${{ matrix.goos == 'linux' && matrix.component == 'cli' && matrix.goarch == 'amd64'}} with: name: ${{ env.DEB_PACKAGE }} @@ -219,7 +219,7 @@ jobs: repo: ${{ github.event.repository.name }} version: ${{ needs.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - uses: actions/download-artifact@v3 with: name: consul-cni_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip @@ -263,7 +263,7 @@ jobs: repo: ${{ github.event.repository.name }} version: ${{ needs.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - uses: actions/download-artifact@v3 with: name: consul-cni_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip @@ -274,9 +274,9 @@ jobs: run: | cd "${ZIP_LOCATION}" unzip -j *.zip - - name: Copy LICENSE + - name: Copy LICENSE.md run: - cp LICENSE ./control-plane + cp LICENSE.md ./control-plane - uses: hashicorp/actions-docker-build@v1 with: smoke_test: | @@ -305,7 +305,7 @@ jobs: repo: ${{ github.event.repository.name }} version: ${{ needs.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - uses: actions/download-artifact@v3 with: name: consul-cni_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip @@ -316,9 +316,9 @@ jobs: run: | cd ${ZIP_LOCATION} unzip -j *.zip - - name: Copy LICENSE + - name: Copy LICENSE.md run: - cp LICENSE ./control-plane + cp LICENSE.md ./control-plane - uses: hashicorp/actions-docker-build@v1 with: smoke_test: | diff --git a/.github/workflows/jira-issues.yaml b/.github/workflows/jira-issues.yaml deleted file mode 100644 index 18705db8e8..0000000000 --- a/.github/workflows/jira-issues.yaml +++ /dev/null @@ -1,83 +0,0 @@ -on: - issues: - types: [opened, closed, deleted, reopened] - issue_comment: - types: [created] - workflow_dispatch: - -name: Jira Community Issue Sync - -jobs: - sync: - runs-on: ubuntu-latest - name: Jira Community Issue sync - steps: - - name: Login - uses: atlassian/gajira-login@v3.0.0 - env: - JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} - JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} - JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} - - - name: Set ticket type - id: set-ticket-type - run: | - echo "TYPE=GH Issue" >> $GITHUB_OUTPUT - - - name: Set ticket labels - if: github.event.action == 'opened' - id: set-ticket-labels - run: | - LABELS="[" - if [[ "${{ contains(github.event.issue.labels.*.name, 'type/bug') }}" == "true" ]]; then LABELS+="\"type/bug\", "; fi - if [[ "${{ contains(github.event.issue.labels.*.name, 'type/enhancement') }}" == "true" ]]; then LABELS+="\"type/enhancement\", "; fi - if [[ ${#LABELS} != 1 ]]; then LABELS=${LABELS::-2}"]"; else LABELS+="]"; fi - echo "LABELS=${LABELS}" >> $GITHUB_OUTPUT - - - name: Create ticket if an issue is filed, or if PR not by a team member is opened - if: github.event.action == 'opened' - uses: tomhjp/gh-action-jira-create@v0.2.0 - with: - project: NET - issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" - summary: "${{ github.event.repository.name }} [${{ steps.set-ticket-type.outputs.TYPE }} #${{ github.event.issue.number }}]: ${{ github.event.issue.title }}" - description: "${{ github.event.issue.body || github.event.pull_request.body }}\n\n_Created in GitHub by ${{ github.actor }}._" - # customfield_10089 is "Issue Link", customfield_10371 is "Source" (use JIRA API to retrieve) - extraFields: '{ "customfield_10089": "${{ github.event.issue.html_url || github.event.pull_request.html_url }}", - "customfield_10371": { "value": "GitHub" }, - "customfield_10535": [{ "value": "Service Mesh" }], - "components": [{ "name": "${{ github.event.repository.name }}" }], - "labels": ${{ steps.set-ticket-labels.outputs.LABELS }} }' - env: - JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} - JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} - JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} - - - name: Search - if: github.event.action != 'opened' - id: search - uses: tomhjp/gh-action-jira-search@v0.2.1 - with: - # cf[10089] is Issue Link (use JIRA API to retrieve) - jql: 'issuetype = "${{ steps.set-ticket-type.outputs.TYPE }}" and cf[10089] = "${{ github.event.issue.html_url || github.event.pull_request.html_url }}"' - - - name: Sync comment - if: github.event.action == 'created' && steps.search.outputs.issue - uses: tomhjp/gh-action-jira-comment@v0.1.0 - with: - issue: ${{ steps.search.outputs.issue }} - comment: "${{ github.actor }} ${{ github.event.review.state || 'commented' }}:\n\n${{ github.event.comment.body || github.event.review.body }}\n\n${{ github.event.comment.html_url || github.event.review.html_url }}" - - - name: Close ticket - if: ( github.event.action == 'closed' || github.event.action == 'deleted' ) && steps.search.outputs.issue - uses: atlassian/gajira-transition@v2.0.1 - with: - issue: ${{ steps.search.outputs.issue }} - transition: "Closed" - - - name: Reopen ticket - if: github.event.action == 'reopened' && steps.search.outputs.issue - uses: atlassian/gajira-transition@v2.0.1 - with: - issue: ${{ steps.search.outputs.issue }} - transition: "To Do" diff --git a/.github/workflows/jira-pr.yaml b/.github/workflows/jira-pr.yaml deleted file mode 100644 index 5c0ba71cd2..0000000000 --- a/.github/workflows/jira-pr.yaml +++ /dev/null @@ -1,97 +0,0 @@ -on: - pull_request_target: - types: [opened, closed, reopened] - workflow_dispatch: - -name: Jira Community PR Sync - -jobs: - sync: - runs-on: ubuntu-latest - name: Jira sync - steps: - - name: Login - uses: atlassian/gajira-login@v3.0.0 - env: - JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} - JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} - JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} - - - name: Set ticket type - id: set-ticket-type - run: | - echo "TYPE=GH Issue" >> $GITHUB_OUTPUT - - - name: Set ticket labels - if: github.event.action == 'opened' - id: set-ticket-labels - run: | - LABELS="[" - if [[ "${{ contains(github.event.issue.labels.*.name, 'type/bug') }}" == "true" ]]; then LABELS+="\"type/bug\", "; fi - if [[ "${{ contains(github.event.issue.labels.*.name, 'type/enhancement') }}" == "true" ]]; then LABELS+="\"type/enhancement\", "; fi - if [[ ${#LABELS} != 1 ]]; then LABELS=${LABELS::-2}"]"; else LABELS+="]"; fi - echo "LABELS=${LABELS}" >> $GITHUB_OUTPUT - - - name: Check if team member - if: github.event.action == 'opened' - id: is-team-member - run: | - TEAM=consul - ROLE="$(hub api orgs/hashicorp/teams/${TEAM}/memberships/${{ github.actor }} | jq -r '.role | select(.!=null)')" - if [[ -n ${ROLE} ]]; then - echo "Actor ${{ github.actor }} is a ${TEAM} team member" - echo "MESSAGE=true" >> $GITHUB_OUTPUT - else - echo "Actor ${{ github.actor }} is NOT a ${TEAM} team member" - echo "MESSAGE=false" >> $GITHUB_OUTPUT - fi - env: - GITHUB_TOKEN: ${{ secrets.JIRA_SYNC_GITHUB_TOKEN }} - - - name: Create ticket if an issue is filed, or if PR not by a team member is opened - if: ( github.event.action == 'opened' && steps.is-team-member.outputs.MESSAGE == 'false' ) - uses: tomhjp/gh-action-jira-create@v0.2.0 - with: - project: NET - issuetype: "${{ steps.set-ticket-type.outputs.TYPE }}" - summary: "${{ github.event.repository.name }} [${{ steps.set-ticket-type.outputs.TYPE }} #${{ github.event.pull_request.number }}]: ${{ github.event.pull_request.title }}" - description: "${{ github.event.issue.body || github.event.pull_request.body }}\n\n_Created in GitHub by ${{ github.actor }}._" - # customfield_10089 is "Issue Link", customfield_10371 is "Source" (use JIRA API to retrieve) - extraFields: '{ "customfield_10089": "${{ github.event.pull_request.html_url }}", - "customfield_10371": { "value": "GitHub" }, - "customfield_10535": [{ "value": "Service Mesh" }], - "components": [{ "name": "${{ github.event.repository.name }}" }], - "labels": ${{ steps.set-ticket-labels.outputs.LABELS }} }' - env: - JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }} - JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }} - JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} - - - name: Search - if: github.event.action != 'opened' - id: search - uses: tomhjp/gh-action-jira-search@v0.2.1 - with: - # cf[10089] is Issue Link (use JIRA API to retrieve) - jql: 'issuetype = "${{ steps.set-ticket-type.outputs.TYPE }}" and cf[10089] = "${{ github.event.issue.html_url || github.event.pull_request.html_url }}"' - - - name: Sync comment - if: github.event.action == 'created' && steps.search.outputs.issue - uses: tomhjp/gh-action-jira-comment@v0.1.0 - with: - issue: ${{ steps.search.outputs.issue }} - comment: "${{ github.actor }} ${{ github.event.review.state || 'commented' }}:\n\n${{ github.event.comment.body || github.event.review.body }}\n\n${{ github.event.comment.html_url || github.event.review.html_url }}" - - - name: Close ticket - if: ( github.event.action == 'closed' || github.event.action == 'deleted' ) && steps.search.outputs.issue - uses: atlassian/gajira-transition@v2.0.1 - with: - issue: ${{ steps.search.outputs.issue }} - transition: "Closed" - - - name: Reopen ticket - if: github.event.action == 'reopened' && steps.search.outputs.issue - uses: atlassian/gajira-transition@v2.0.1 - with: - issue: ${{ steps.search.outputs.issue }} - transition: "To Do" diff --git a/.github/workflows/reusable-acceptance.yml b/.github/workflows/reusable-acceptance.yml index e4401932d3..56389bb346 100644 --- a/.github/workflows/reusable-acceptance.yml +++ b/.github/workflows/reusable-acceptance.yml @@ -11,8 +11,9 @@ on: type: string default: "" consul-k8s-image: - required: false + required: false type: string + default: docker.mirror.hashicorp.services/hashicorpdev/consul-k8s-control-plane:latest directory: required: true type: string @@ -25,53 +26,40 @@ on: kind-version: required: false type: string - default: "v1.24.6" - checkout-ref: - required: false - type: string - default: ${{ github.sha }} + default: "v1.22.4" secrets: CONSUL_ENT_LICENSE: required: true - VAULT_LICENSE: - required: true # Environment variables can only be used at the step level env: TEST_RESULTS: /tmp/test-results # path to where test results are saved - CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} - VAULT_LICENSE: ${{ secrets.VAULT_LICENSE }} - CONSUL_K8S_IMAGE: ${{ inputs.consul-k8s-image }} + CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} jobs: job: - runs-on: [custom, linux, xl] + runs-on: ubuntu-latest strategy: matrix: - include: - - {runner: "0", test-packages: "basic consul-dns metrics"} - - {runner: "1", test-packages: "connect"} - - {runner: "2", test-packages: "controller example"} - - {runner: "3", test-packages: "ingress-gateway"} - - {runner: "4", test-packages: "partitions"} - - {runner: "5", test-packages: "peering"} - - {runner: "6", test-packages: "snapshot-agent vault wan-federation"} - - {runner: "7", test-packages: "cli sync terminating-gateway"} + include: # I am really sorry for this but I could not find a way to automatically split our tests into several runners. For now, split manually. + - {runner: "0", test-packages: "basic connect consul-dns"} + - {runner: "1", test-packages: "controller example ingress-gateway"} + - {runner: "2", test-packages: "mesh-gateway metrics"} + - {runner: "3", test-packages: "partitions sync terminating-gateway"} + - {runner: "4", test-packages: "vault"} - fail-fast: false + fail-fast: true steps: - name: Checkout code - uses: actions/checkout@v3 - with: - ref: ${{ inputs.checkout-ref }} + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ inputs.go-version }} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -80,23 +68,11 @@ jobs: restore-keys: | ${{ runner.os }}-go- - - name: Install pre-requisites # Install gotestsum, kind, kubectl, and helm + - name: Install gotestsum run: | - wget https://github.com/gotestyourself/gotestsum/releases/download/v1.6.4/gotestsum_1.6.4_linux_amd64.tar.gz - sudo tar -C /usr/local/bin -xzf gotestsum_1.6.4_linux_amd64.tar.gz - rm gotestsum_1.6.4_linux_amd64.tar.gz - - curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.15.0/kind-linux-amd64 - chmod +x ./kind - sudo mv ./kind /usr/local/bin/kind - - curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" - chmod +x ./kubectl - sudo mv ./kubectl /usr/local/bin/kubectl - - wget https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz - tar -zxvf helm-v3.9.4-linux-amd64.tar.gz - sudo mv linux-amd64/helm /usr/local/bin/helm + wget https://github.com/gotestyourself/gotestsum/releases/download/v"${{ inputs.gotestsum-version }}"/gotestsum_"${{ inputs.gotestsum-version }}"_linux_amd64.tar.gz + sudo tar -C /usr/local/bin -xzf gotestsum_"${{ inputs.gotestsum-version }}"_linux_amd64.tar.gz + rm gotestsum_"${{ inputs.gotestsum-version }}"_linux_amd64.tar.gz - run: mkdir -p ${{ env.TEST_RESULTS }} @@ -109,11 +85,6 @@ jobs: kind create cluster --name dc1 --image kindest/node:${{ inputs.kind-version }} kind create cluster --name dc2 --image kindest/node:${{ inputs.kind-version }} - - name: Build CLI - run: | - sudo make cli-dev - consul-k8s version - # We have to run the tests for each package separately so that we can # exit early if any test fails (-failfast only works within a single # package). @@ -127,7 +98,7 @@ jobs: do fullpkg="github.com/hashicorp/consul-k8s/${{ inputs.directory }}/${pkg}" echo "Testing package: ${fullpkg}" - if ! gotestsum --format=testname --jsonfile=jsonfile-${pkg////-} -- ${fullpkg} -p 1 -timeout 2h -failfast \ + if ! gotestsum --jsonfile=jsonfile-${pkg////-} -- ${fullpkg} -p 1 -timeout 2h -failfast \ ${{ inputs.additional-flags }} \ -enable-enterprise \ -enable-multi-cluster \ @@ -139,19 +110,19 @@ jobs: break fi done - gotestsum --format=testname --raw-command --junitfile "${{ env.TEST_RESULTS }}/gotestsum-report.xml" -- cat jsonfile* + gotestsum --raw-command --junitfile "${{ env.TEST_RESULTS }}/gotestsum-report.xml" -- cat jsonfile* exit $exit_code - name: Upload tests if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 with: name: ${{ inputs.name }}-${{ matrix.test-packages }}-gotestsum-report.xml path: ${{ env.TEST_RESULTS }}/gotestsum-report.xml - - name: Upload debug (on failure) + - name: Upload debug (on failure) if: failure() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v2 with: name: ${{ inputs.name }}-${{ matrix.test-packages }}-debug-info path: ${{ env.TEST_RESULTS }}/debug diff --git a/.github/workflows/reusable-golangci-lint.yml b/.github/workflows/reusable-golangci-lint.yml index 30b6a0a3b3..c8c3793b03 100644 --- a/.github/workflows/reusable-golangci-lint.yml +++ b/.github/workflows/reusable-golangci-lint.yml @@ -22,13 +22,13 @@ jobs: uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ inputs.go-version }} - name: golangci-lint-${{inputs.directory}} - uses: golangci/golangci-lint-action@v3.4.0 + uses: golangci/golangci-lint-action@v3.2.0 with: - version: v1.51 + version: v1.46.2 working-directory: ${{inputs.directory}} args: ${{inputs.args}} diff --git a/.github/workflows/reusable-unit.yml b/.github/workflows/reusable-unit.yml index 8ba09377cc..9e563e0203 100644 --- a/.github/workflows/reusable-unit.yml +++ b/.github/workflows/reusable-unit.yml @@ -13,7 +13,7 @@ on: # Environment variables can only be used at the step level env: TEST_RESULTS: /tmp/test-results # path to where test results are saved - GOTESTSUM_VERSION: 1.8.2 + GOTESTSUM_VERSION: 1.8.1 jobs: job: @@ -21,15 +21,15 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{inputs.go-version}} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -53,5 +53,5 @@ jobs: - name: Run tests working-directory: ${{inputs.directory}} run: | - gotestsum --format=testname --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml ./... -- -p 4 + gotestsum --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml ./... -- -p 4 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 803aed69a4..1e5de4c98e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,33 +4,18 @@ on: env: TEST_RESULTS: /tmp/test-results # path to where test results are saved - GOTESTSUM_VERSION: 1.8.2 # You cannot use environment variables with workflows. The gotestsum version is hardcoded in the reusable workflows too. - # We use docker images to copy the consul binary for unit tests. - CONSUL_OSS_DOCKER_IMAGE: hashicorppreview/consul:1.14-dev # Consul's OSS version to use in tests - CONSUL_ENT_DOCKER_IMAGE: hashicorppreview/consul-enterprise:1.14-dev # Consul's enterprise version to use in tests + CONSUL_VERSION: 1.13.3 # Consul's OSS version to use in tests + CONSUL_ENT_VERSION: 1.13.3+ent # Consul's enterprise version to use in tests + GOTESTSUM_VERSION: 1.8.1 # You cannot use environment variables with workflows. The gotestsum version is hardcoded in the reusable workflows too. jobs: - terraform-fmt-check: - name: "Terraform format check" - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Setup Terraform - uses: hashicorp/setup-terraform@v2 - with: - terraform_version: TERRAFORM_VERSION - terraform_wrapper: false - - name: Run Terraform checks - run: | - make terraform-fmt-check TERRAFORM_DIR="${{ github.workspace }}" - get-go-version: name: "Determine Go toolchain version" runs-on: ubuntu-latest outputs: go-version: ${{ steps.get-go-version.outputs.go-version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Determine Go version id: get-go-version # We use .go-version as our source of truth for current Go @@ -39,33 +24,21 @@ jobs: echo "Building with Go $(cat .go-version)" echo "::set-output name=go-version::$(cat .go-version)" - get-product-version: - runs-on: ubuntu-latest - outputs: - product-version: ${{ steps.get-product-version.outputs.product-version }} - steps: - - uses: actions/checkout@v3 - - name: get product version - id: get-product-version - run: | - make version - echo "::set-output name=product-version::$(make version)" - validate-helm-gen: needs: - get-go-version runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ needs.get-go-version.outputs.go-version }} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -101,11 +74,11 @@ jobs: - unit-helm-gen runs-on: ubuntu-latest container: - image: docker.mirror.hashicorp.services/hashicorpdev/consul-helm-test:0.15.0 + image: docker.mirror.hashicorp.services/hashicorpdev/consul-helm-test:0.12.0 options: --user 1001 steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Run Unit Tests working-directory: charts/consul @@ -117,10 +90,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ needs.get-go-version.outputs.go-version }} @@ -143,15 +116,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ needs.get-go-version.outputs.go-version }} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -173,14 +146,16 @@ jobs: working-directory: control-plane run: | mkdir -p $HOME/bin - container_id=$(docker create ${{env.CONSUL_OSS_DOCKER_IMAGE}}) - docker cp "$container_id:/bin/consul" $HOME/bin/consul - docker rm "$container_id" + wget https://releases.hashicorp.com/consul/${{env.CONSUL_VERSION}}/consul_${{env.CONSUL_VERSION}}_linux_amd64.zip && \ + unzip consul_${{env.CONSUL_VERSION}}_linux_amd64.zip -d $HOME/bin && \ + rm consul_${{env.CONSUL_VERSION}}_linux_amd64.zip + chmod +x $HOME/bin/consul + - name: Run go tests working-directory: control-plane run: | PACKAGE_NAMES=$(go list ./...) - gotestsum --format=testname --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml -- -p 4 $PACKAGE_NAMES + gotestsum --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml -- -p 4 $PACKAGE_NAMES test-enterprise-control-plane: if: github.repository_owner == 'hashicorp' # Do not run on forks as this requires secrets @@ -190,15 +165,15 @@ jobs: CONSUL_LICENSE: ${{secrets.CONSUL_LICENSE}} steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ needs.get-go-version.outputs.go-version }} - name: Setup go mod cache - uses: actions/cache@v3 + uses: actions/cache@v2 with: path: | ~/.cache/go-build @@ -220,62 +195,52 @@ jobs: working-directory: control-plane run: | mkdir -p $HOME/bin - container_id=$(docker create ${{env.CONSUL_ENT_DOCKER_IMAGE}}) - docker cp "$container_id:/bin/consul" $HOME/bin/consul - docker rm "$container_id" + wget https://releases.hashicorp.com/consul/${{env.CONSUL_ENT_VERSION}}/consul_${{env.CONSUL_ENT_VERSION}}_linux_amd64.zip && \ + unzip consul_${{env.CONSUL_ENT_VERSION}}_linux_amd64.zip -d $HOME/bin && \ + rm consul_${{env.CONSUL_ENT_VERSION}}_linux_amd64.zip + chmod +x $HOME/bin/consul - name: Run go tests working-directory: control-plane run: | PACKAGE_NAMES=$(go list ./...) - gotestsum --format=testname --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml -- -tags=enterprise -p 4 $PACKAGE_NAMES + gotestsum --junitfile ${{env.TEST_RESULTS}}/gotestsum-report.xml -- -tags=enterprise -p 4 $PACKAGE_NAMES build-distros: - needs: [get-go-version, get-product-version] + needs: [get-go-version, test-control-plane, test-enterprise-control-plane] runs-on: ubuntu-latest strategy: matrix: include: - # cli - - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "amd64", component: "cli", pkg_name: "consul-k8s", "bin_name": "consul-k8s" } - # control-plane - - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "amd64", component: "control-plane", pkg_name: "consul-k8s-control-plane", "bin_name": "consul-k8s-control-plane" } - # consul-cni - - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "amd64", component: "control-plane/cni", pkg_name: "consul-cni", "bin_name": "consul-cni" } - + - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "386"} + - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "amd64"} + - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "arm"} + - {go: "${{ needs.get-go-version.outputs.go-version }}", goos: "linux", goarch: "arm64"} fail-fast: true - name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} ${{ matrix.component }} build + name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v2 with: go-version: ${{ matrix.go }} - name: Build + working-directory: control-plane env: GOOS: ${{ matrix.goos }} GOARCH: ${{ matrix.goarch }} CGO_ENABLED: 0 - working-directory: ${{ matrix.component }} run: | - mkdir -p dist out - - export GIT_COMMIT=$(git rev-parse --short HEAD) - export GIT_DIRTY=$(test -n "$(git status --porcelain)" && echo "+CHANGES") - export GIT_IMPORT=github.com/hashicorp/consul-k8s/${{ matrix.component }}/version - export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X ${GIT_IMPORT}.GitDescribe=${{ needs.get-product-version.outputs.product-version }}" + XC_OS=${{ matrix.goos }} XC_ARCH=${{ matrix.goarch }} ./build-support/scripts/build-local.sh + zip -r -j consul-k8s_${{ matrix.goos }}_${{ matrix.goarch }}.zip bin - CGO_ENABLED=0 go build -o dist/${{ matrix.bin_name }} -ldflags "${GOLDFLAGS}" . - zip -r -j out/${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/ - - - name: Upload built binaries - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v2 with: - name: ${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip - path: ${{ matrix.component}}/out/${{ matrix.pkg_name }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip + name: consul-k8s_${{ matrix.goos }}_${{ matrix.goarch }}.zip + path: control-plane/consul-k8s_${{ matrix.goos }}_${{ matrix.goarch }}.zip golangci-lint-acceptance: needs: @@ -307,92 +272,56 @@ jobs: directory: cli go-version: ${{ needs.get-go-version.outputs.go-version }} - # upload dev docker image - dev-upload-docker: - if: github.repository_owner == 'hashicorp' # Do not run on forks as this requires secrets - needs: [ get-product-version, build-distros ] - runs-on: ubuntu-latest - strategy: - matrix: - arch: ["amd64"] - env: - repo: ${{ github.event.repository.name }} - version: ${{ needs.get-product-version.outputs.product-version }} - steps: - - uses: actions/checkout@v3 - - uses: actions/download-artifact@v3 - with: - name: consul-cni_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip - path: control-plane/dist/cni/linux/${{ matrix.arch }} - - uses: actions/download-artifact@v3 - with: - name: consul-k8s-control-plane_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip - path: control-plane/dist/linux/${{ matrix.arch }} - - name: extract consul-cni zip - env: - ZIP_LOCATION: control-plane/dist/cni/linux/${{ matrix.arch }} - run: | - cd "${ZIP_LOCATION}" - unzip -j *.zip - - name: extract control-plane zip - env: - ZIP_LOCATION: control-plane/dist/linux/${{ matrix.arch }} - run: | - cd "${ZIP_LOCATION}" - unzip -j *.zip - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASS }} - - name: Docker Build (Action) - uses: docker/build-push-action@v3 - with: - push: true - context: control-plane - platforms: ${{ matrix.arch }} - target: release-default - tags: docker.io/hashicorppreview/${{ env.repo }}-control-plane:${{ env.version }}-pr-${{ github.sha }} - -# Disable GHA acceptance tests until GHA formally supported -# acceptance: -# needs: [ get-product-version, dev-upload-docker, get-go-version ] -# uses: ./.github/workflows/reusable-acceptance.yml -# with: -# name: acceptance -# directory: acceptance/tests -# go-version: ${{ needs.get-go-version.outputs.go-version }} -# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev" -# gotestsum-version: 1.8.2 -# consul-k8s-image: docker.io/hashicorppreview/${{ github.event.repository.name }}-control-plane:${{ needs.get-product-version.outputs.product-version }}-pr-${{ github.sha }} -# secrets: -# CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} +# Disabling for now until we get faster VMs to run acceptance tests. Faster VMs for Github Actions are supposed +# to be available in the summer of 2022. For now, run the dev-upload docker and acceptance tests in CircleCI +# dev-upload-docker: +# if: github.repository_owner == 'hashicorp' # Do not run on forks as this requires secrets +# needs: build-distros +# runs-on: ubuntu-latest +# +# env: +# GITHUB_PULL_REQUEST: ${{github.event.pull_request.number}} +# DOCKER_USER: ${{secrets.DOCKER_USER}} +# DOCKER_PASS: ${{secrets.DOCKER_PASS}} +# steps: +# - uses: actions/checkout@v2 +# +# - run: mkdir -p control-plane/pkg/bin/linux_amd64 +# +# - uses: actions/download-artifact@v3 +# with: +# name: consul-k8s_linux_amd64.zip +# path: control-plane +# +# - name: Docker build +# working-directory: control-plane +# run: | +# unzip consul-k8s_linux_amd64.zip -d ./pkg/bin/linux_amd64 +# make ci.dev-docker-github # # acceptance-tproxy: -# needs: [ get-product-version, dev-upload-docker, get-go-version ] +# needs: [get-go-version, unit-cli, dev-upload-docker, unit-acceptance-framework, unit-test-helm-templates] +# needs: dev-upload-docker # uses: ./.github/workflows/reusable-acceptance.yml # with: # name: acceptance-tproxy # directory: acceptance/tests # go-version: ${{ needs.get-go-version.outputs.go-version }} -# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-transparent-proxy -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev" -# gotestsum-version: 1.8.2 -# consul-k8s-image: docker.io/hashicorppreview/${{ github.event.repository.name }}-control-plane:${{ needs.get-product-version.outputs.product-version }}-pr-${{ github.sha }} +# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-transparent-proxy" +# gotestsum-version: 1.6.4 # secrets: # CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} # -# acceptance-cni: -# needs: [ get-product-version, dev-upload-docker, get-go-version ] +# acceptance: +# #needs: [get-go-version, unit-cli, dev-upload-docker, unit-acceptance-framework, unit-test-helm-templates] +# needs: dev-upload-docker # uses: ./.github/workflows/reusable-acceptance.yml # with: # name: acceptance # directory: acceptance/tests # go-version: ${{ needs.get-go-version.outputs.go-version }} -# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-transparent-proxy -enable-cni -consul-image=docker.mirror.hashicorp.services/hashicorppreview/consul-enterprise:1.14-dev" -# gotestsum-version: 1.8.2 -# consul-k8s-image: docker.io/hashicorppreview/${{ github.event.repository.name }}-control-plane:${{ needs.get-product-version.outputs.product-version }}-pr-${{ github.sha }} +# additional-flags: "-use-kind -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2" +# gotestsum-version: 1.6.4 # secrets: # CONSUL_ENT_LICENSE: ${{ secrets.CONSUL_ENT_LICENSE }} diff --git a/.gitignore b/.gitignore index ecc38e82e0..20ab4d997b 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,3 @@ bin/ pkg/ .idea/ .vscode -.bob/ diff --git a/.go-version b/.go-version index 0044d6cb96..b9fb27ab4f 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.20.1 +1.18.3 diff --git a/.release/release-metadata.hcl b/.release/release-metadata.hcl index c053fdac2f..0695fea61f 100644 --- a/.release/release-metadata.hcl +++ b/.release/release-metadata.hcl @@ -1,4 +1,4 @@ url_docker_registry_dockerhub = "https://hub.docker.com/r/hashicorp/consul-k8s-control-plane" -url_license = "https://github.com/hashicorp/consul-k8s/blob/main/LICENSE" +url_license = "https://github.com/hashicorp/consul-k8s/blob/main/LICENSE.md" url_project_website = "https://www.consul.io/docs/k8s" url_source_repository = "https://github.com/hashicorp/consul-k8s" diff --git a/CHANGELOG.md b/CHANGELOG.md index dac76400c8..75beb62542 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,12 @@ ## UNRELEASED +## 0.49.4 (February 7, 2023) + BREAKING CHANGES: * Helm: * Change defaults to exclude the `openebs` namespace from sidecar injection. If you previously had pods in that namespace that you wanted to be injected, you must now set `namespaceSelector` as follows: - + ```yaml connectInject: namespaceSelector: | @@ -16,42 +18,8 @@ BREAKING CHANGES: [[GH-1869](https://github.com/hashicorp/consul-k8s/pull/1869)] IMPROVEMENTS: -* Helm: - * Kubernetes v1.26 is now supported. Minimum tested version of Kubernetes is now v1.23. [[GH-1852](https://github.com/hashicorp/consul-k8s/pull/1852)] - * Add a `global.extraLabels` stanza to allow setting global Kubernetes labels for all components deployed by the `consul-k8s` Helm chart. [[GH-1778](https://github.com/hashicorp/consul-k8s/pull/1778)] - * Add the `accessLogs` field to the `ProxyDefaults` CRD. [[GH-1816](https://github.com/hashicorp/consul-k8s/pull/1816)] - * Add the `envoyExtensions` field to the `ProxyDefaults` and `ServiceDefaults` CRD. [[GH-1823]](https://github.com/hashicorp/consul-k8s/pull/1823) - * Add the `balanceInboundConnections` field to the `ServiceDefaults` CRD. [[GH-1823]](https://github.com/hashicorp/consul-k8s/pull/1823) - * Add the `upstreamConfig.overrides[].peer` field to the `ServiceDefaults` CRD. [[GH-1853]](https://github.com/hashicorp/consul-k8s/pull/1853) -* Control-Plane - * Update minimum go version for project to 1.20 [[GH-1908](https://github.com/hashicorp/consul-k8s/pull/1908)] - * Add support for the annotation `consul.hashicorp.com/use-proxy-health-check`. When this annotation is used by a service, it configures a readiness endpoint on Consul Dataplane and queries it instead of the proxy's inbound port which forwards requests to the application. [[GH-1824](https://github.com/hashicorp/consul-k8s/pull/1824)], [[GH-1841](https://github.com/hashicorp/consul-k8s/pull/1841)] - * Add health check for synced services based on the status of the Kubernetes readiness probe on synced pod. [[GH-1821](https://github.com/hashicorp/consul-k8s/pull/1821)] - * Remove extraneous `gnupg` dependency from `consul-k8s-control-plane` since it is no longer needed for validating binary artifacts prior to release. [[GH-1882](https://github.com/hashicorp/consul-k8s/pull/1882)] - * Server ACL Init always appends both, the secrets from the serviceAccount's secretRefs and the one created by the Helm chart, to support Openshift secret handling. [[GH-1770](https://github.com/hashicorp/consul-k8s/pull/1770)] -* CLI: - * Update minimum go version for project to 1.20 [[GH-1908](https://github.com/hashicorp/consul-k8s/pull/1908)] - * Add `consul-k8s proxy log podname` command for displaying and modifying Envoy log levels for a given Pod. [GH-1844](https://github.com/hashicorp/consul-k8s/pull/1844), [GH-1849](https://github.com/hashicorp/consul-k8s/pull/1849), [GH-1864](https://github.com/hashicorp/consul-k8s/pull/1864) - -BUG FIXES: * Control Plane - * Don't incorrectly diff intention config entries when upgrading from Consul pre-1.12 to 1.12+ [[GH-1804](https://github.com/hashicorp/consul-k8s/pull/1804)] -* Security: - * Upgrade to use Go 1.20.1 This resolves vulnerabilities [CVE-2022-41724](https://go.dev/issue/58001) in `crypto/tls` and [CVE-2022-41723](https://go.dev/issue/57855) in `net/http`. [[GH-1908](https://github.com/hashicorp/consul-k8s/pull/1908)] - -## 1.0.3 (January 30, 2023) - -IMPROVEMENTS: -* Helm: - * Kubernetes v1.26 is now supported. Minimum tested version of Kubernetes is now v1.23. [[GH-1852](https://github.com/hashicorp/consul-k8s/pull/1852)] - * Add a `global.extraLabels` stanza to allow setting global Kubernetes labels for all components deployed by the `consul-k8s` Helm chart. [[GH-1778](https://github.com/hashicorp/consul-k8s/pull/1778)] -* Control-Plane - * Add support for the annotation `consul.hashicorp.com/use-proxy-health-check`. When this annotation is used by a service, it configures a readiness endpoint on Consul Dataplane and queries it instead of the proxy's inbound port which forwards requests to the application. [[GH-1824](https://github.com/hashicorp/consul-k8s/pull/1824)], [[GH-1841](https://github.com/hashicorp/consul-k8s/pull/1841)] - * Add health check for synced services based on the status of the Kubernetes readiness probe on synced pod. [[GH-1821](https://github.com/hashicorp/consul-k8s/pull/1821)] - -BUG FIXES: -* Control Plane - * Don't incorrectly diff intention config entries when upgrading from Consul pre-1.12 to 1.12+ [[GH-1804](https://github.com/hashicorp/consul-k8s/pull/1804)] + * Remove extraneous `gnupg` dependency from `consul-k8s-control-plane` since it is no longer needed for validating binary artifacts prior to release. [[GH-1882](https://github.com/hashicorp/consul-k8s/pull/1882)] ## 0.49.3 (January 30, 2023) @@ -66,17 +34,6 @@ BUG FIXES: * Control Plane * Don't incorrectly diff intention config entries when upgrading from Consul pre-1.12 to 1.12+ [[GH-1804](https://github.com/hashicorp/consul-k8s/pull/1804)] -## 1.0.2 (December 1, 2022) - -IMPROVEMENTS: -* Helm: - * CNI: Add `connectInject.cni.namespace` stanza which allows the CNI plugin resources to be deployed in a namespace other than the namespace that Consul is installed. [[GH-1756](https://github.com/hashicorp/consul-k8s/pull/1756)] - -BUG FIXES: -* Helm: - * Use the correct autogenerated cert for the API Gateway Controller when connecting to servers versus clients. [[GH-1753](https://github.com/hashicorp/consul-k8s/pull/1753)] - * Don't mount the CA cert when `externalServers.useSystemRoots` is `true`. [[GH-1753](https://github.com/hashicorp/consul-k8s/pull/1753)] - ## 0.49.2 (December 1, 2022) IMPROVEMENTS: @@ -89,117 +46,6 @@ BUG FIXES: * Helm: * Disable PodSecurityPolicies templating for `gossip-encryption-autogenerate` and `partition-init` when `global.enablePodSecurityPolicies` is `false`. [[GH-1693](https://github.com/hashicorp/consul-k8s/pull/1693)] -## 1.0.1 (November 21, 2022) - -BUG FIXES: -* Control Plane - * Add discover binary to control-plane image [[GH-1749](https://github.com/hashicorp/consul-k8s/pull/1749)] -* Helm: - * Don't pass in a CA file to the API Gateway controller when `externalServers.useSystemRoots` is `true`. [[GH-1743](https://github.com/hashicorp/consul-k8s/pull/1743)] - -## 1.0.0 (November 17, 2022) - -BREAKING CHANGES: -* Admin Partitions **(Consul Enterprise only)**: Remove the partition service. When configuring Admin Partitions, the expose-servers service should be used instead. -* Consul Dataplane: - * Consul client agents are no longer deployed by default, and Consul service mesh no longer uses Consul clients to operate. This change affects several main areas listed below. [[GH-1552](https://github.com/hashicorp/consul-k8s/pull/1552)] - * A new component `consul-dataplane` is now injected as a sidecar-proxy instead of plain Envoy. `consul-dataplane` manages the Envoy proxy process and proxies xDS requests from Envoy to Consul servers. - * All services on the service mesh are now registered directly with the central catalog in Consul servers. - * All service-mesh consul-k8s components are configured to talk directly to Consul servers. - * Mesh, ingress, and terminating gateways are now registered centrally by the endpoints controller, similar to how service-mesh services are registered. -* CLI: - * Change default behavior of `consul-k8s install` to perform the installation when no answer is provided to the prompt. [[GH-1673](https://github.com/hashicorp/consul-k8s/pull/1673)] -* Helm: - * Kubernetes-1.25 is now supported with the caveat that `global.enablePodSecurityPolicies` is not supported since PodSecurityPolicies have been removed in favor of PodSecurityStandards in Kubernetes-1.25. Full support for PodSecurityStandards will be added in a follow-on commit. [[GH-1726](https://github.com/hashicorp/consul-k8s/pull/1726)] - * Support simplified default deployment values to allow for easier quick starts and testing: - * Set `connectInject.replicas` to 1 [[GH-1702](https://github.com/hashicorp/consul-k8s/pull/1702)] - * Set `meshGateway.affinity` to null and `meshGateway.replicas` to 1 [[GH-1702](https://github.com/hashicorp/consul-k8s/pull/1702)] - * Set `ingressGateways.defaults.affinity` to null and `ingressGateways.defaults.replicas` to 1 [[GH-1702](https://github.com/hashicorp/consul-k8s/pull/1702)] - * Set `terminatingGateways.defaults.affinity` to null and `terminatingGateways.defaults.replicas` to 1 [[GH-1702](https://github.com/hashicorp/consul-k8s/pull/1702)] - * Set `server.replicas` to `1`. Formerly, this defaulted to `3`. [[GH-1551](https://github.com/hashicorp/consul-k8s/pull/1551)] - * `client.enabled` now defaults to `false`. Setting it to `true` will deploy client agents, however, none of the consul-k8s components will use clients for their operation. - * `global.imageEnvoy` is no longer used for sidecar proxies, as well as mesh, terminating, and ingress gateways. - * `externalServers.grpcPort` default is now `8502` instead of `8503`. - * `meshGateway.service.enabled` value is removed. Mesh gateways now will always have a Kubernetes service as this is required to register them as a service with Consul. - * `meshGateway.initCopyConsulContainer`, `ingressGateways.initCopyConsulContainer`, `terminatingGateways.initCopyConsulContainer` values are removed. - * `connectInject.enabled` now defaults to `true`. [[GH-1551](https://github.com/hashicorp/consul-k8s/pull/1551)] - * `syncCatalog.consulNamespaces.mirroringK8S` now defaults to `true`. [[GH-1601](https://github.com/hashicorp/consul-k8s/pull/1601)] - * `connectInject.consulNamespaces.mirroringK8S` now defaults to `true`. [[GH-1601](https://github.com/hashicorp/consul-k8s/pull/1601)] - * Remove `controller` section from the values file as the controller has now been merged into the connect-inject deployment. [[GH-1697](https://github.com/hashicorp/consul-k8s/pull/1697)] - * Remove `global.consulSidecarContainer` from values file as there is no longer a consul sidecar. [[GH-1635](https://github.com/hashicorp/consul-k8s/pull/1635)] - * Consul snapshot-agent now runs as a sidecar with Consul servers. [[GH-1620](https://github.com/hashicorp/consul-k8s/pull/1620)] - - This results in the following changes to Helm values: - * Move `client.snapshotAgent` values to `server.snapshotAgent`, with the exception of the following values: - * `client.snaphostAgent.replicas` - * `client.snaphostAgent.serviceAccount` - * Remove `global.secretsBackend.vault.consulSnapshotAgentRole` value. You should now use the `global.secretsBackend.vault.consulServerRole` for access to any Vault secrets. - * Change `dns.enabled` and `dns.enableRedirection` to default to the value of `connectInject.transparentProxy.defaultEnabled`. - Previously, `dns.enabled` defaulted to the value of `global.enabled` and `dns.enableRedirection` defaulted to the - value to `false`. [[GH-1688](https://github.com/hashicorp/consul-k8s/pull/1688)] - * Remove `global.imageEnvoy` and replace with `global.imageConsulDataplane` for running the sidecar proxy. - * Add `apiGateway.imageEnvoy` as for configuring the version of Envoy that the API Gateway uses. [[GH-1698](https://github.com/hashicorp/consul-k8s/pull/1698)] -* Peering: - * Rename `PeerName` to `Peer` in ExportedServices CRD. [[GH-1596](https://github.com/hashicorp/consul-k8s/pull/1596)] - * Remove support for customizing the server addresses in peering token generation. Instead, mesh gateways should be used - to establish peering connections if the server pods are not directly reachable. [[GH-1610](https://github.com/hashicorp/consul-k8s/pull/1610)] - * Require `global.tls.enabled` when peering is enabled. [[GH-1610](https://github.com/hashicorp/consul-k8s/pull/1610)] - * Require `meshGateway.enabled` when peering is enabled. [[GH-1683](https://github.com/hashicorp/consul-k8s/pull/1683)] - -FEATURES: -* CLI: - * Add the ability to install HCP self-managed clusters. [[GH-1540](https://github.com/hashicorp/consul-k8s/pull/1540)] - * Add the ability to install the HashiCups demo application via the -demo flag. [[GH-1540](https://github.com/hashicorp/consul-k8s/pull/1540)] -* Consul Dataplane: - * Support merged metrics with consul-dataplane. [[GH-1635](https://github.com/hashicorp/consul-k8s/pull/1635)] - * Support transparent proxying when using consul-dataplane. [[GH-1625](https://github.com/hashicorp/consul-k8s/pull/1478),[GH-1632](https://github.com/hashicorp/consul-k8s/pull/1632)] - * Enable sync-catalog to only talk to Consul servers. [[GH-1659](https://github.com/hashicorp/consul-k8s/pull/1659)] -* Ingress Gateway - * Add support for MaxConnections, MaxConcurrentRequests, and MaxPendingRequests to Ingress Gateway CRD. [[GH-1691](https://github.com/hashicorp/consul-k8s/pull/1691)] -* Peering: - * Support peering over mesh gateways. - * Add support for `PeerThroughMeshGateways` in Mesh CRD. [[GH-1478](https://github.com/hashicorp/consul-k8s/pull/1478)] - -IMPROVEMENTS: -* CLI - * `consul-k8s status` command will only show status of servers if they are expected to be present in the Kubernetes cluster. [[GH-1603](https://github.com/hashicorp/consul-k8s/pull/1603)] - * Update demo charts and CLI command to not presume tproxy when using HCP preset. Also, use the most recent version of hashicups. [[GH-1657](https://github.com/hashicorp/consul-k8s/pull/1657)] - * Update minimum go version for project to 1.19 [[GH-1633](https://github.com/hashicorp/consul-k8s/pull/1633)] - * Enable `consul-k8s uninstall` to delete custom resources when uninstalling Consul. This is done by default. [[GH-1623](https://github.com/hashicorp/consul-k8s/pull/1623)] -* Control Plane - * Update minimum go version for project to 1.19 [[GH-1633](https://github.com/hashicorp/consul-k8s/pull/1633)] - * Remove unneeded `agent:read` ACL permissions from mesh gateway policy. [[GH-1255](https://github.com/hashicorp/consul-k8s/pull/1255)] - * Support updating health checks on consul clients during an upgrade to agentless. [[GH-1690](https://github.com/hashicorp/consul-k8s/pull/1690)] - * Remove unused curl from docker images [[1624](https://github.com/hashicorp/consul-k8s/pull/1624)] - * Bump Dockerfile base image for RedHat UBI `consul-k8s-control-plane` image to `ubi-minimal:9.1`. [[GH-1725][https://github.com/hashicorp/consul-k8s/pull/1725]] -* Helm: - * Remove deprecated annotation `service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"` in the `server-service` template. [[GH-1619](https://github.com/hashicorp/consul-k8s/pull/1619)] - * Support `minAvailable` on connect injector `PodDisruptionBudget`. [[GH-1557](https://github.com/hashicorp/consul-k8s/pull/1557)] - * Add `tolerations` and `nodeSelector` to Server ACL init jobs and `nodeSelector` to Webhook cert manager. [[GH-1581](https://github.com/hashicorp/consul-k8s/pull/1581)] - * API Gateway: Add `tolerations` to `apiGateway.managedGatewayClass` and `apiGateway.controller` [[GH-1650](https://github.com/hashicorp/consul-k8s/pull/1650)] - * API Gateway: Create PodSecurityPolicy for controller when `global.enablePodSecurityPolicies=true`. [[GH-1656](https://github.com/hashicorp/consul-k8s/pull/1656)] - * API Gateway: Create PodSecurityPolicy and allow controller to bind it to ServiceAccounts that it creates for Gateway Deployments when `global.enablePodSecurityPolicies=true`. [[GH-1672](https://github.com/hashicorp/consul-k8s/pull/1672)] - * Deploy `expose-servers` service only when Admin Partitions(ENT) is enabled. [[GH-1683](https://github.com/hashicorp/consul-k8s/pull/1683)] - * Use a distroless image for `consul-dataplane`. [[GH-1676](https://github.com/hashicorp/consul-k8s/pull/1676)] - * The Envoy version is now 1.24.0 for `consul-dataplane`. [[GH-1676](https://github.com/hashicorp/consul-k8s/pull/1676)] - * Allow addition of extra labels to Connect Inject pods. [[GH-1678](https://github.com/hashicorp/consul-k8s/pull/1678)] - * Add fields `localConnectTimeoutMs` and `localRequestTimeoutMs` to the `ServiceDefaults` CRD. [[GH-1647](https://github.com/hashicorp/consul-k8s/pull/1647)] - * API Gateway: Enable API Gateways to directly connect to Consul servers when running in the agentless configuration. [[GH-1694](https://github.com/hashicorp/consul-k8s/pull/1694)] - * Add `connectInject.consulNode.meta` to allow users to provide custom metadata to append to the NodeMeta [[GH-1707](https://github.com/hashicorp/consul-k8s/pull/1707)] - * Add `externalServers.skipServerWatch` which prevents consul-dataplane from consuming the server update stream. This is useful for situations where Consul servers are behind a load balancer. [[GH-1686](https://github.com/hashicorp/consul-k8s/pull/1686)] - * API Gateway: Allow controller to read MeshServices for use as a route backend. [[GH-1574](https://github.com/hashicorp/consul-k8s/pull/1574)] - * API Gateway: Add support for using dynamic server discovery strings when running without agents. [[GH-1732](https://github.com/hashicorp/consul-k8s/pull/1732)] - -BUG FIXES: -* CLI - * Allow optional environment variables for use in the cloud preset to the CLI for cluster bootstrapping. [[GH-1608](https://github.com/hashicorp/consul-k8s/pull/1608)] - * Configure `-tls-server-name` when `global.cloud.enabled=true` so that it matches the server certificate created via HCP [[GH-1591](https://github.com/hashicorp/consul-k8s/pull/1591)] - * Do not query clients in the status command since clients no longer exist. [[GH-1573](https://github.com/hashicorp/consul-k8s/pull/1573)] -* Peering - * Add `peering:read` permissions to mesh gateway token to fix peering connections through the mesh gateways. [[GH-1685](https://github.com/hashicorp/consul-k8s/pull/1685)] -* Helm: - * Disable PodSecurityPolicies in all templates when `global.enablePodSecurityPolicies` is `false`. [[GH-1693](https://github.com/hashicorp/consul-k8s/pull/1693)] - ## 0.49.1 (November 14, 2022) BREAKING CHANGES: * Peering: @@ -217,7 +63,7 @@ IMPROVEMENTS: * API Gateway: Create PodSecurityPolicy for controller when `global.enablePodSecurityPolicies=true`. [[GH-1656](https://github.com/hashicorp/consul-k8s/pull/1656)] * API Gateway: Create PodSecurityPolicy and allow controller to bind it to ServiceAccounts that it creates for Gateway Deployments when `global.enablePodSecurityPolicies=true`. [[GH-1672](https://github.com/hashicorp/consul-k8s/pull/1672)] -## 0.49.0 (September 29, 2022) +## 0.49.0 (September 30, 2022) FEATURES: * CLI: @@ -251,9 +97,6 @@ FEATURES: * Kubernetes 1.24 Support * Add support for Kubernetes 1.24 where ServiceAccounts no longer have long-term JWT tokens. [[GH-1431](https://github.com/hashicorp/consul-k8s/pull/1431)] * Upgrade kubeVersion in helm chart to support Kubernetes 1.21+. -* Cluster Peering: - * Add support for setting failover `Targets` on the Service Resolver CRD. [[GH-1284](https://github.com/hashicorp/consul-k8s/pull/1284)] - * Add support for redirecting to cluster peers on the Service Resolver CRD. [[GH-1284](https://github.com/hashicorp/consul-k8s/pull/1284)] BREAKING CHANGES: * Kubernetes 1.24 Support @@ -1816,3 +1659,4 @@ Features: ## 0.1.0 (September 26, 2018) * Initial release + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 510d4c3b3f..14875e1d55 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,9 +22,9 @@ 1. [Running the tests](#running-the-tests) 1. [Writing Unit tests](#writing-unit-tests) 1. [Writing Acceptance tests](#writing-acceptance-tests) -1. [Using the Acceptance Test Framework to Debug](#using-acceptance-test-framework-to-debug) 1. [Helm Reference Docs](#helm-reference-docs) + ## Contributing 101 ### Building and running `consul-k8s-control-plane` @@ -349,7 +349,7 @@ rebase the branch on main, fixing any conflicts along the way before the code ca ... IngressGateway string = "ingressgateway" ``` -1. Update `control-plane/subcommand/inject-connect/command.go` and add your controller: +1. Update `control-plane/subcommand/controller/command.go` and add your controller: ```go if err = (&controller.IngressGatewayController{ ConfigEntryController: configEntryReconciler, @@ -361,7 +361,7 @@ rebase the branch on main, fixing any conflicts along the way before the code ca return 1 } ``` -1. Update `control-plane/subcommand/inject-connect/command.go` and add your webhook (the path should match the kubebuilder annotation): +1. Update `control-plane/subcommand/controller/command.go` and add your webhook (the path should match the kubebuilder annotation): ```go mgr.GetWebhookServer().Register("/mutate-v1alpha1-ingressgateway", &webhook.Admission{Handler: &v1alpha1.IngressGatewayWebhook{ @@ -424,27 +424,20 @@ rebase the branch on main, fixing any conflicts along the way before the code ca manage your resource type. ### Testing A New CRD -1. Build a Docker image for consul-k8s via `make control-plane-dev-docker` and push to a docker repository: - ``` - docker tag consul-k8s-control-plane-dev /consul-k8s-control-plane-dev: - docker push /consul-k8s-control-plane-dev: - ``` +1. Build a Docker image for consul-k8s via `make dev-docker` and tagging your image appropriately. Remember to CD into the `control-plane` directory! 1. Install using the updated Helm repository, with a values like: ```yaml global: - imageK8S: lkysow/consul-k8s-control-plane-dev:nov26 + imageK8S: ghcr.io/lkysow/consul-k8s-dev:nov26 name: consul server: replicas: 1 bootstrapExpect: 1 - ui: - enabled: true - connectInject: + controller: enabled: true ``` -1. Create a sample CRD -1. Run `kubectl apply -f ` to apply your sample CRD. -1. Check its synced status (for example CRD called ingressgateway): +1. `kubectl apply` your sample CRD. +1. Check its synced status: ```bash kubectl get ingressgateway NAME SYNCED AGE @@ -639,7 +632,7 @@ You can run other tests by enabling them by passing appropriate flags to `go tes For example, to run mesh gateway tests, which require two Kubernetes clusters, you may use the following command: - go test ./... -p 1 -timeout 20m \ + go test ./charts/consul/... -p 1 -timeout 20m \ -enable-multi-cluster \ -kubecontext= \ -secondary-kubecontext= @@ -886,7 +879,7 @@ func TestExample(t *testing.T) { } ``` -Please see [wan federation tests](acceptance/tests/wan-federation/wan_federation_test.go) +Please see [mesh gateway tests](acceptance/tests/mesh-gateway/mesh_gateway_test.go) for an example of how to use write a test that uses multiple contexts. #### Writing Assertions @@ -947,198 +940,6 @@ Here are some things to consider before adding a test: --- -## Using Acceptance Test Framework to Debug -### Acceptance Tests - -The [consul-k8s](https://github.com/hashicorp/consul-k8s) repository has an extensive list of [acceptance](https://github.com/hashicorp/consul-k8s/tree/main/acceptance/tests) -tests that are used by CI to run per-PR and nightly acceptance tests. -It is built on its own framework that uses Helm and the consul-k8s CLI to deploy consul (and other tools) in various -configurations that provide test coverage for most features that exist and provides coverage for more advanced deployments -than are typically covered in guides. -Importantly, it is **automated**, so you are able to rapidly deploy known working -configurations in known working environments. -It can be very helpful for bootstrapping complex environments such as when using Vault as a CA for Consul or for federating test clusters. - -The tests are organized like this : -```shell -demo $ tree -L 1 -d acceptance/tests -acceptance/tests -├── basic -├── cli -├── config-entries -├── connect -├── consul-dns -├── example -├── fixtures -├── ingress-gateway -├── metrics -├── partitions -├── peering -├── snapshot-agent -├── sync -├── terminating-gateway -├── vault -└── wan-federation -``` - -### Basic Running of Tests -Any given test can be run either through GoLand or another IDE, or via command line using `go test -run`. - -To run all of the connect tests from command line: -```shell -$ cd acceptance/tests -$ go test ./connect/... -v -p 1 -timeout 2h -failfast -use-kind -no-cleanup-on-failure -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-enterprise -enable-multi-cluster -debug-directory=/tmp/debug -consul-k8s-image=kyleschochenmaier/consul-k8s-acls -``` - -When running from command line a few things are important: -* Some tests use Enterprise features, in which case you need: - * Set environment variables `CONSUL_ENT_LICENSE` and possibly `VAULT_LICENSE`. - * Use `-enable-enterprise` on command line when running the test. -* Multi-cluster tests require `-enable-multi-cluster -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2` -* Using `.//...` is required as part of the command-line to pick up necessary environmental config. - -### Using the framework to debug in an environment -=> NOTE: It is helpful to tune the docker desktop resource settings so that docker has at least 4GB memory, plenty of cpu cores and 2GB of swap. - -* If using Kind, `-use-kind` should be added, and be sure you cluster is up and running: -```shell -$ kind create cluster --name=dc1 && kind create cluster --name=dc2 -``` -* Pick a test which replicates the environment you are wanting to work with. - Ex: pick a test from `partitions/` or `vault/` or `connect/`. -* If you need the environment to persist, add a `time.Sleep(1*time.Hour)` to the end of the test in the test file. -* Use the following flags if you need to use or test out a specific consul/k8s image: - `-consul-k8s-image=` && `-consul-image=` -* You can set custom helm flags by modifying the test file directly in the respective directory. - -Finally, run the test like shown above: -```shell -$ cd acceptance/tests -$ go test -run Vault_WANFederationViaGateways ./vault/... -p 1 -timeout 2h -failfast -use-kind -no-cleanup-on-failure -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-multi-cluster -debug-directory=/tmp/debug -``` -You can interact with the running kubernetes clusters now using `kubectl [COMMAND] --context=` - -* `kind delete clusters --all` is helpful for cleanup! - -### Example Debugging session using the acceptance test framework to bootstrap and debug a Vault backed federated Consul installation: -This test utilizes the `consul-k8s` acceptance test framework, with a custom consul-k8s branch which: -* Modifies the acceptance test to use custom consul+consul-k8s images and sleeps at the end of the test to allow analysis. -* Modifies the helm chart to pass in `connect_ca.intermediate_cert_ttl` and `connect_ca.leaf_cert_ttl` in the `server-configmap` - -1. First clone the consul-k8s repo and then check out the branch locally: `git checkout origin/consul-vault-provider-wanfed-acceptance`. -2. Start the kind clusters: `kind create cluster --name=dc1 && kind create cluster --name=dc2` -3. run the `TestVault_WANFederationViaGateways` acceptance test in `acceptance/tests/vault/vault_wan_fed_test.go` - I use goland, but this command should get you most of the way: -```shell -$ cd acceptance/tests -$ go test -run Vault_WANFederationViaGateways ./vault/... -p 1 -timeout 2h -failfast -use-kind -no-cleanup-on-failure -kubecontext=kind-dc1 -secondary-kubecontext=kind-dc2 -enable-multi-cluster -debug-directory=/tmp/debug -``` -NOTE: This specific acceptance test is considered FLAKY with Kind, if things don't come up it's best to run against GKE/AKS/etc, in which case you just modify the `kubecontext` command parameters to point to your clusters. It is worth noting that you will need to setup any necessary networking for non-Kind clusters manually. - -NOTE: This test requires a VAULT_LICENSE set as an environment variable in the shell where you run `go test` - -4. Wait 10-20 minutes to allow the first intermediate ca renewal, this test is particularly resource intensive so it can take time for everything to come online on a laptop, use `kubectl get pods` to validate that `static-server` and `static-client` have been deployed and are online. - -You can validate the ICA rotation by doing: -```shell -# Fetch the vault root token: -$ kubectl get secrets -root-token -o json //----> b64 decode the `data.token` field. -$ kubectl exec -it -- sh -$ export VAULT_TOKEN= -$ export VAULT_ADDR=https://-vault:8200 - -# Fetch the consul bootstrap token -$ vault kv get consul/secret/bootstrap - -# Examine the vault issuers, there should be 2 by now if ICA renewal has occured: -# NOTE: for a federated setup the issuers url for dc2 is `vault list dc2/connect_inter/issuers`! -$ vault list dc1/connect_inter/issuers - -Keys ----- -29bdffbd-87ec-cfe0-fd05-b78f99eba243 -344eea3c-f085-943a-c3ff-66721ef408f4 - -# Now login to the consul-server -$ kubectl exec -it -- sh -$ export CONSUL_HTTP_TOKEN= -$ export CONSUL_HTTP_ADDR=https://localhost:8501 -$ export CONSUL_HTTP_SSL_VERIFY=false - -# Read the `connect/ca/roots` endpoint: -# It should change + rotate with the expiration of the ICA (defined by `intermediate_cert_ttl` which is `15m` in the branch for this gist. - -$ curl -k --header "X-Consul-Token: 1428da53-5e88-db1a-6ad5-e50212b011da" https://127.0.0.1:8501/v1/agent/connect/ca/roots | jq - . - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed -100 3113 100 3113 0 0 6222 0 --:--:-- --:--:-- --:--:-- 7705 -{ - "ActiveRootID": "36:be:19:0e:56:d1:c2:1a:d8:54:22:97:88:3c:91:17:1d:d2:d3:e0", - "TrustDomain": "34a76791-b9b2-b93e-b0e4-1989ed11a28e.consul", - "Roots": [ - { - "ID": "36:be:19:0e:56:d1:c2:1a:d8:54:22:97:88:3c:91:17:1d:d2:d3:e0", - "Name": "Vault CA Primary Cert", - "SerialNumber": 15998414315735550000, - "SigningKeyID": "fe:b9:d6:0b:c6:ce:2c:25:4f:d8:59:cb:11:ea:a5:42:5f:8e:41:4b", - "ExternalTrustDomain": "34a76791-b9b2-b93e-b0e4-1989ed11a28e", - "NotBefore": "2022-11-16T20:16:15Z", - "NotAfter": "2032-11-13T20:16:45Z", - "RootCert": "-----BEGIN CERTIFICATE-----\nMIICLDCCAdKgAwIBAgIUKQ9BPHF9mtC7yFPC3gXJDpLxCHIwCgYIKoZIzj0EAwIw\nLzEtMCsGA1UEAxMkcHJpLTEwOTJudTEudmF1bHQuY2EuMzRhNzY3OTEuY29uc3Vs\nMB4XDTIyMTExNjIwMTYxNVoXDTMyMTExMzIwMTY0NVowLzEtMCsGA1UEAxMkcHJp\nLTEwOTJudTEudmF1bHQuY2EuMzRhNzY3OTEuY29uc3VsMFkwEwYHKoZIzj0CAQYI\nKoZIzj0DAQcDQgAETnpGixC1kW8ep2JcGjRR2jbdESvjlEm9nSIWVAcilemUGFwi\nJ0YW0XUmJeEzRyfwLXnOw6voPzXRf1zXKjdTD6OByzCByDAOBgNVHQ8BAf8EBAMC\nAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUtb6EjDxyI+myIjDc+7KbiN8u\n8XowHwYDVR0jBBgwFoAUtb6EjDxyI+myIjDc+7KbiN8u8XowZQYDVR0RBF4wXIIk\ncHJpLTEwOTJudTEudmF1bHQuY2EuMzRhNzY3OTEuY29uc3VshjRzcGlmZmU6Ly8z\nNGE3Njc5MS1iOWIyLWI5M2UtYjBlNC0xOTg5ZWQxMWEyOGUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCIHBezFSQAK5Nolf0rs3ErvlDcA8Z9esldh6gHupuGsNkAiEA\n9qL+P9PJAW4CrbTL0iF2yZUyJC2nwSSa2K0nYG8bXWQ=\n-----END CERTIFICATE-----\n", - "IntermediateCerts": [ - "-----BEGIN CERTIFICATE-----\nMIICLzCCAdSgAwIBAgIUbILCP3ODM4ScNBOm0jw59Fxju0swCgYIKoZIzj0EAwIw\nLzEtMCsGA1UEAxMkcHJpLTEwOTJudTEudmF1bHQuY2EuMzRhNzY3OTEuY29uc3Vs\nMB4XDTIyMTExNjIwMzIxNloXDTIyMTExNjIwNDc0NlowMDEuMCwGA1UEAxMlcHJp\nLTE4MThxNWlnLnZhdWx0LmNhLjM0YTc2NzkxLmNvbnN1bDBZMBMGByqGSM49AgEG\nCCqGSM49AwEHA0IABI30ikgrwTjbPaGgfNYkushvrEUUpxLzxMMEBlE82ilog1RW\nqwuEU29Qsa+N4SrfOf37xNv/Ey8SXPs5l2HmXJWjgcwwgckwDgYDVR0PAQH/BAQD\nAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFCZpC/BTdaggL2kj6Dfyk3+a\nNqBvMB8GA1UdIwQYMBaAFLW+hIw8ciPpsiIw3Puym4jfLvF6MGYGA1UdEQRfMF2C\nJXByaS0xODE4cTVpZy52YXVsdC5jYS4zNGE3Njc5MS5jb25zdWyGNHNwaWZmZTov\nLzM0YTc2NzkxLWI5YjItYjkzZS1iMGU0LTE5ODllZDExYTI4ZS5jb25zdWwwCgYI\nKoZIzj0EAwIDSQAwRgIhAJ8RHgR5qkyW2q866vGYJy+7BJ4zUXs3OJ76QLmxxU3K\nAiEA70S7wBEm1ZduTAk1ZfZPJEUGxvAXAcgy7EWeO/6MJ5o=\n-----END CERTIFICATE-----\n", - "-----BEGIN CERTIFICATE-----\nMIICLTCCAdKgAwIBAgIUU3qwESuhh4PgW3/tnHDn3qnBMrAwCgYIKoZIzj0EAwIw\nLzEtMCsGA1UEAxMkcHJpLTEwOTJudTEudmF1bHQuY2EuMzRhNzY3OTEuY29uc3Vs\nMB4XDTIyMTExNjIwNDAxNloXDTIyMTExNjIwNTU0NlowLzEtMCsGA1UEAxMkcHJp\nLTFkY2hkbGkudmF1bHQuY2EuMzRhNzY3OTEuY29uc3VsMFkwEwYHKoZIzj0CAQYI\nKoZIzj0DAQcDQgAEpj0BWPkcH82su9XGOo9rN5Zr5+Jyp68LiHy+qlIgH3L+OAir\nYgmXmJfuNwI8S2BB8cu0Gk3w5cTF7O0p/qAghaOByzCByDAOBgNVHQ8BAf8EBAMC\nAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU/rnWC8bOLCVP2FnLEeqlQl+O\nQUswHwYDVR0jBBgwFoAUtb6EjDxyI+myIjDc+7KbiN8u8XowZQYDVR0RBF4wXIIk\ncHJpLTFkY2hkbGkudmF1bHQuY2EuMzRhNzY3OTEuY29uc3VshjRzcGlmZmU6Ly8z\nNGE3Njc5MS1iOWIyLWI5M2UtYjBlNC0xOTg5ZWQxMWEyOGUuY29uc3VsMAoGCCqG\nSM49BAMCA0kAMEYCIQCtq4LiZzkiIKUES9MrzUEflg7wcwQf7Km+8RcOGQbz9QIh\nANWHWt1fe8Hl1wQ55qxsV5lSfOpGAox5WHpgnsBC7cwU\n-----END CERTIFICATE-----\n" - ], - "Active": true, - "PrivateKeyType": "ec", - "PrivateKeyBits": 256, - "CreateIndex": 11, - "ModifyIndex": 797 - } - ] -} - -# You can x509 decode the ICA certs to verify they have been updated and have correct expiry: -$ openssl x509 -in cert.crt -text -noout -Certificate: - Data: - Version: 3 (0x2) - Serial Number: - 53:7a:b0:11:2b:a1:87:83:e0:5b:7f:ed:9c:70:e7:de:a9:c1:32:b0 - Signature Algorithm: ecdsa-with-SHA256 - Issuer: CN=pri-1092nu1.vault.ca.34a76791.consul - Validity - Not Before: Nov 16 20:40:16 2022 GMT - Not After : Nov 16 20:55:46 2022 GMT - Subject: CN=pri-1dchdli.vault.ca.34a76791.consul - Subject Public Key Info: - Public Key Algorithm: id-ecPublicKey - Public-Key: (256 bit) - pub: - 04:a6:3d:01:58:f9:1c:1f:cd:ac:bb:d5:c6:3a:8f: - 6b:37:96:6b:e7:e2:72:a7:af:0b:88:7c:be:aa:52: - 20:1f:72:fe:38:08:ab:62:09:97:98:97:ee:37:02: - 3c:4b:60:41:f1:cb:b4:1a:4d:f0:e5:c4:c5:ec:ed: - 29:fe:a0:20:85 - ASN1 OID: prime256v1 - NIST CURVE: P-256 - X509v3 extensions: - X509v3 Key Usage: critical - Certificate Sign, CRL Sign - X509v3 Basic Constraints: critical - CA:TRUE - X509v3 Subject Key Identifier: - FE:B9:D6:0B:C6:CE:2C:25:4F:D8:59:CB:11:EA:A5:42:5F:8E:41:4B - X509v3 Authority Key Identifier: - keyid:B5:BE:84:8C:3C:72:23:E9:B2:22:30:DC:FB:B2:9B:88:DF:2E:F1:7A - - X509v3 Subject Alternative Name: - DNS:pri-1dchdli.vault.ca.34a76791.consul, URI:spiffe://34a76791-b9b2-b93e-b0e4-1989ed11a28e.consul - -``` - ---- - ## Helm Reference Docs The Helm reference docs (https://www.consul.io/docs/k8s/helm) are automatically diff --git a/LICENSE b/LICENSE.md similarity index 99% rename from LICENSE rename to LICENSE.md index 74f38c0103..82b4de97c7 100644 --- a/LICENSE +++ b/LICENSE.md @@ -1,5 +1,3 @@ -Copyright (c) 2018 HashiCorp, Inc. - Mozilla Public License, version 2.0 1. Definitions diff --git a/Makefile b/Makefile index 6ad59a2a91..43a8e16b0d 100644 --- a/Makefile +++ b/Makefile @@ -75,16 +75,6 @@ kind-cni: kind create cluster --config=$(CURDIR)/acceptance/framework/environment/cni-kind/kind.config --name dc2 --image kindest/node:v1.23.6 make kind-cni-calico -# Perform a terraform fmt check but don't change anything -terraform-fmt-check: - @$(CURDIR)/control-plane/build-support/scripts/terraformfmtcheck.sh $(TERRAFORM_DIR) -.PHONY: terraform-fmt-check - -# Format all terraform files according to terraform fmt -terraform-fmt: - @terraform fmt -recursive -.PHONY: terraform-fmt - # ===========> CLI Targets diff --git a/README.md b/README.md index 1d3a3733ab..35b3c4b762 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,9 @@ **We're looking for feedback on how folks are using Consul on Kubernetes. Please fill out our brief [survey](https://hashicorp.sjc1.qualtrics.com/jfe/form/SV_4MANbw1BUku7YhL)!** + +> **Warning** +> Please read the following issue to learn more about upcoming breaking changes that will be implemented by Q4 2022 for the default deployment of Consul on Kubernetes: [Enabling of service mesh by default and disabling of node-level client agents from Consul Service Mesh on Kubernetes and Catalog Sync](https://github.com/hashicorp/consul-k8s/issues/1438) ## Overview @@ -49,47 +52,28 @@ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). * Consul K8s CLI, distributed as `consul-k8s`, can be used to install and uninstall Consul Kubernetes. See the [Consul K8s CLI Reference](https://www.consul.io/docs/k8s/k8s-cli) for more details on usage. -### Prerequisites - -The following pre-requisites must be met before installing Consul on Kubernetes. + * Raw binaries are available in the [HashiCorp releases directory](https://releases.hashicorp.com/consul-k8s/). + These can be used to run `consul-k8s` directly or build custom packages. - * **Kubernetes 1.23.x - 1.26.x** - This represents the earliest versions of Kubernetes tested. - It is possible that this chart works with earlier versions, but it is - untested. - * Helm install - * **Helm 3.6+** for Helm based installs. - * Consul K8s CLI based install - * `kubectl` configured to authenticate to a Kubernetes cluster with a valid `kubeconfig` file. - * `brew`, `yum`, or `apt` package manager on your local machine +## Helm -### CLI +Within the ['charts/consul'](charts/consul) directory is the official HashiCorp Helm chart for installing +and configuring Consul on Kubernetes. This chart supports multiple use +cases of Consul on Kubernetes, depending on the values provided. -The Consul K8s CLI is the easiest way to get up and running with Consul on Kubernetes. See [Install Consul on K8s CLI](https://developer.hashicorp.com/consul/docs/k8s/installation/install-cli#install-the-cli) for more details on installation, and refer to -[Consul on Kubernetes CLI Reference](https://developer.hashicorp.com/consul/docs/k8s/k8s-cli) for more details on subcommands and a list of all available flags -for each subcommand. +For full documentation on this Helm chart along with all the ways you can +use Consul with Kubernetes, please see the +[Consul and Kubernetes documentation](https://www.consul.io/docs/platform/k8s/index.html). +### Prerequisites + * **Helm 3.2+** (Helm 2 is not supported) + * **Kubernetes 1.21-1.24** - This is the earliest version of Kubernetes tested. + It is possible that this chart works with earlier versions, but it is + untested. - 1. Install the HashiCorp tap, which is a repository of all Homebrew packages for HashiCorp: - - ``` bash - brew tap hashicorp/tap - ``` - -2. Install the Consul K8s CLI with hashicorp/tap/consul formula. - - ``` bash - brew install hashicorp/tap/consul-k8s - ``` - -3. Issue the install subcommand to install Consul on Kubernetes: - - ``` bash - consul-k8s install - ``` - -### Helm +### Usage -The Helm chart is ideal for those who prefer to use Helm for automation for either the installation or upgrade of Consul on Kubernetes. The chart supports multiple use cases of Consul on Kubernetes, depending on the values provided. Detailed installation instructions for Consul on Kubernetes are found [here](https://www.consul.io/docs/k8s/installation/overview). +Detailed installation instructions for Consul on Kubernetes are found [here](https://www.consul.io/docs/k8s/installation/overview). 1. Add the HashiCorp Helm repository: @@ -114,7 +98,7 @@ Please see the many options supported in the `values.yaml` file. These are also fully documented directly on the [Consul website](https://www.consul.io/docs/platform/k8s/helm.html). -## Tutorials +# Tutorials You can find examples and complete tutorials on how to deploy Consul on Kubernetes using Helm on the [HashiCorp Learn website](https://learn.hashicorp.com/collections/consul/kubernetes). diff --git a/acceptance/framework/config/config.go b/acceptance/framework/config/config.go index 8e131c9c59..11a04f1622 100644 --- a/acceptance/framework/config/config.go +++ b/acceptance/framework/config/config.go @@ -52,9 +52,8 @@ type TestConfig struct { NoCleanupOnFailure bool DebugDirectory string - UseAKS bool - UseGKE bool UseKind bool + UseGKE bool helmChartPath string } @@ -153,7 +152,7 @@ func (t *TestConfig) entImage() (string, error) { preRelease = fmt.Sprintf("-%s", split[1]) } - return fmt.Sprintf("hashicorp/consul-enterprise:%s%s-ent", consulImageVersion, preRelease), nil + return fmt.Sprintf("hashicorp/consul-enterprise:%s-ent%s", consulImageVersion, preRelease), nil } // setIfNotEmpty sets key to val in map m if value is not empty. diff --git a/acceptance/framework/config/config_test.go b/acceptance/framework/config/config_test.go index 7733d815db..28fc48b810 100644 --- a/acceptance/framework/config/config_test.go +++ b/acceptance/framework/config/config_test.go @@ -138,11 +138,11 @@ func TestConfig_HelmValuesFromConfig_EntImage(t *testing.T) { }, { consulImage: "hashicorp/consul:1.8.5-rc1", - expImage: "hashicorp/consul-enterprise:1.8.5-rc1-ent", + expImage: "hashicorp/consul-enterprise:1.8.5-ent-rc1", }, { consulImage: "hashicorp/consul:1.7.0-beta3", - expImage: "hashicorp/consul-enterprise:1.7.0-beta3-ent", + expImage: "hashicorp/consul-enterprise:1.7.0-ent-beta3", }, { consulImage: "invalid", @@ -173,7 +173,7 @@ func TestConfig_HelmValuesFromConfig_EntImage(t *testing.T) { require.EqualError(t, err, tt.expErr) } else { require.NoError(t, err) - require.Equal(t, tt.expImage, values["global.image"]) + require.Contains(t, values["global.image"], tt.expImage) } }) } diff --git a/acceptance/framework/consul/helm_cluster.go b/acceptance/framework/consul/helm_cluster.go index eab1ba2904..6ae2016eaf 100644 --- a/acceptance/framework/consul/helm_cluster.go +++ b/acceptance/framework/consul/helm_cluster.go @@ -3,18 +3,19 @@ package consul import ( "context" "fmt" + "net" "strings" "testing" "time" "github.com/gruntwork-io/terratest/modules/helm" + terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" terratestLogger "github.com/gruntwork-io/terratest/modules/logger" "github.com/hashicorp/consul-k8s/acceptance/framework/config" "github.com/hashicorp/consul-k8s/acceptance/framework/environment" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul-k8s/acceptance/framework/portforward" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" @@ -34,10 +35,6 @@ type HelmCluster struct { // a bootstrap token from a Kubernetes secret stored in the cluster. ACLToken string - // SkipCheckForPreviousInstallations is a toggle for skipping the check - // if there are any previous installations of this Helm chart in the cluster. - SkipCheckForPreviousInstallations bool - ctx environment.TestContext helmOptions *helm.Options releaseName string @@ -112,9 +109,7 @@ func (h *HelmCluster) Create(t *testing.T) { }) // Fail if there are any existing installations of the Helm chart. - if !h.SkipCheckForPreviousInstallations { - helpers.CheckForPriorInstallations(t, h.kubernetesClient, h.helmOptions, "consul-helm", "chart=consul-helm") - } + helpers.CheckForPriorInstallations(t, h.kubernetesClient, h.helmOptions, "consul-helm", "chart=consul-helm") chartName := config.HelmChartPath if h.helmOptions.Version != config.HelmChartPath { @@ -139,11 +134,7 @@ func (h *HelmCluster) Destroy(t *testing.T) { // Ignore the error returned by the helm delete here so that we can // always idempotently clean up resources in the cluster. - h.helmOptions.ExtraArgs = map[string][]string{ - "--wait": nil, - } - err := helm.DeleteE(t, h.helmOptions, h.releaseName, false) - require.NoError(t, err) + _ = helm.DeleteE(t, h.helmOptions, h.releaseName, false) // Retry because sometimes certain resources (like PVC) take time to delete // in cloud providers. @@ -302,7 +293,72 @@ func (h *HelmCluster) Upgrade(t *testing.T, helmValues map[string]string) { func (h *HelmCluster) CreatePortForwardTunnel(t *testing.T, remotePort int) string { serverPod := fmt.Sprintf("%s-consul-server-0", h.releaseName) - return portforward.CreateTunnelToResourcePort(t, serverPod, remotePort, h.helmOptions.KubectlOptions, h.logger) + return h.CreatePortForwardTunnelToResourcePort(t, serverPod, remotePort) +} + +func (h *HelmCluster) CreatePortForwardTunnelToResourcePort(t *testing.T, resourceName string, remotePort int) string { + localPort := terratestk8s.GetAvailablePort(t) + tunnel := terratestk8s.NewTunnelWithLogger( + h.helmOptions.KubectlOptions, + terratestk8s.ResourceTypePod, + resourceName, + localPort, + remotePort, + h.logger) + + // Retry creating the port forward since it can fail occasionally. + retry.RunWith(&retry.Counter{Wait: 1 * time.Second, Count: 3}, t, func(r *retry.R) { + // NOTE: It's okay to pass in `t` to ForwardPortE despite being in a retry + // because we're using ForwardPortE (not ForwardPort) so the `t` won't + // get used to fail the test, just for logging. + require.NoError(r, tunnel.ForwardPortE(t)) + }) + + doneChan := make(chan bool) + + t.Cleanup(func() { + close(doneChan) + }) + + go h.monitorPortForwardedServer(t, localPort, tunnel, doneChan, resourceName, remotePort) + + return fmt.Sprintf("127.0.0.1:%d", localPort) +} + +func (h *HelmCluster) monitorPortForwardedServer(t *testing.T, port int, tunnel *terratestk8s.Tunnel, doneChan chan bool, resourceName string, remotePort int) { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for { + select { + case <-doneChan: + logger.Log(t, "stopping monitor of the port-forwarded server") + tunnel.Close() + return + case <-ticker.C: + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", port)) + if err != nil { + logger.Log(t, "lost connection to port-forwarded server; restarting port-forwarding", "port", port) + tunnel.Close() + tunnel = terratestk8s.NewTunnelWithLogger( + h.helmOptions.KubectlOptions, + terratestk8s.ResourceTypePod, + resourceName, + port, + remotePort, + h.logger) + err = tunnel.ForwardPortE(t) + if err != nil { + // If we couldn't establish a port forwarding channel, continue, so we can try again. + continue + } + } + if conn != nil { + // Ignore error because we don't care if connection is closed successfully or not. + _ = conn.Close() + } + } + } } func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool) (client *api.Client, configAddress string) { @@ -509,8 +565,9 @@ func configureSCCs(t *testing.T, client kubernetes.Interface, cfg *config.TestCo func defaultValues() map[string]string { values := map[string]string{ - "global.logLevel": "debug", - "server.replicas": "1", + "server.replicas": "1", + "connectInject.envoyExtraArgs": "--log-level debug", + "connectInject.logLevel": "debug", // Disable DNS since enabling it changes the policy for the anonymous token, // which could result in tests passing due to that token having privileges to read services // (false positive). diff --git a/acceptance/framework/consul/helm_cluster_test.go b/acceptance/framework/consul/helm_cluster_test.go index af70812f9a..552aed065c 100644 --- a/acceptance/framework/consul/helm_cluster_test.go +++ b/acceptance/framework/consul/helm_cluster_test.go @@ -23,38 +23,41 @@ func TestNewHelmCluster(t *testing.T) { name: "defaults are used when no helmValues are set", helmValues: map[string]string{}, want: map[string]string{ - "global.image": "test-config-image", - "global.logLevel": "debug", - "server.replicas": "1", + "global.image": "test-config-image", + "server.replicas": "1", + "connectInject.envoyExtraArgs": "--log-level debug", + "connectInject.logLevel": "debug", "connectInject.transparentProxy.defaultEnabled": "false", - "dns.enabled": "false", - "server.extraConfig": `"{\"log_level\": \"TRACE\"}"`, - "client.extraConfig": `"{\"log_level\": \"TRACE\"}"`, + "dns.enabled": "false", + "server.extraConfig": `"{\"log_level\": \"TRACE\"}"`, + "client.extraConfig": `"{\"log_level\": \"TRACE\"}"`, }, }, { name: "when using helmValues, defaults are overridden", helmValues: map[string]string{ - "global.image": "test-image", - "global.logLevel": "debug", - "server.bootstrapExpect": "3", - "server.replicas": "3", + "global.image": "test-image", + "server.bootstrapExpect": "3", + "server.replicas": "3", + "connectInject.envoyExtraArgs": "--foo", + "connectInject.logLevel": "debug", "connectInject.transparentProxy.defaultEnabled": "true", - "dns.enabled": "true", - "server.extraConfig": `"{\"foo\": \"bar\"}"`, - "client.extraConfig": `"{\"foo\": \"bar\"}"`, - "feature.enabled": "true", + "dns.enabled": "true", + "server.extraConfig": `"{\"foo\": \"bar\"}"`, + "client.extraConfig": `"{\"foo\": \"bar\"}"`, + "feature.enabled": "true", }, want: map[string]string{ - "global.image": "test-image", - "global.logLevel": "debug", - "server.bootstrapExpect": "3", - "server.replicas": "3", + "global.image": "test-image", + "server.bootstrapExpect": "3", + "server.replicas": "3", + "connectInject.envoyExtraArgs": "--foo", + "connectInject.logLevel": "debug", "connectInject.transparentProxy.defaultEnabled": "true", - "dns.enabled": "true", - "server.extraConfig": `"{\"foo\": \"bar\"}"`, - "client.extraConfig": `"{\"foo\": \"bar\"}"`, - "feature.enabled": "true", + "dns.enabled": "true", + "server.extraConfig": `"{\"foo\": \"bar\"}"`, + "client.extraConfig": `"{\"foo\": \"bar\"}"`, + "feature.enabled": "true", }, }, } diff --git a/acceptance/framework/flags/flags.go b/acceptance/framework/flags/flags.go index 5d90d74f9e..81f8131efb 100644 --- a/acceptance/framework/flags/flags.go +++ b/acceptance/framework/flags/flags.go @@ -41,9 +41,8 @@ type TestFlags struct { flagDebugDirectory string - flagUseAKS bool - flagUseGKE bool flagUseKind bool + flagUseGKE bool flagDisablePeering bool @@ -106,12 +105,10 @@ func (t *TestFlags) init() { flag.StringVar(&t.flagDebugDirectory, "debug-directory", "", "The directory where to write debug information about failed test runs, "+ "such as logs and pod definitions. If not provided, a temporary directory will be created by the tests.") - flag.BoolVar(&t.flagUseAKS, "use-aks", false, - "If true, the tests will assume they are running against an AKS cluster(s).") - flag.BoolVar(&t.flagUseGKE, "use-gke", false, - "If true, the tests will assume they are running against a GKE cluster(s).") flag.BoolVar(&t.flagUseKind, "use-kind", false, "If true, the tests will assume they are running against a local kind cluster(s).") + flag.BoolVar(&t.flagUseGKE, "use-gke", false, + "If true, the tests will assume they are running against a GKE cluster(s).") flag.BoolVar(&t.flagDisablePeering, "disable-peering", false, "If true, the peering tests will not run.") @@ -171,8 +168,7 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig { NoCleanupOnFailure: t.flagNoCleanupOnFailure, DebugDirectory: tempDir, - UseAKS: t.flagUseAKS, - UseGKE: t.flagUseGKE, UseKind: t.flagUseKind, + UseGKE: t.flagUseGKE, } } diff --git a/acceptance/framework/k8s/debug.go b/acceptance/framework/k8s/debug.go index 5bf588f959..c1f0c5d7d6 100644 --- a/acceptance/framework/k8s/debug.go +++ b/acceptance/framework/k8s/debug.go @@ -3,8 +3,6 @@ package k8s import ( "context" "fmt" - "io" - "net/http" "os" "path/filepath" "regexp" @@ -14,8 +12,6 @@ import ( terratestLogger "github.com/gruntwork-io/terratest/modules/logger" "github.com/hashicorp/consul-k8s/acceptance/framework/environment" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul-k8s/acceptance/framework/portforward" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -60,39 +56,30 @@ func WritePodsDebugInfoIfFailed(t *testing.T, kubectlOptions *k8s.KubectlOptions // Describe pod and write it to a file. writeResourceInfoToFile(t, pod.Name, "pod", testDebugDirectory, kubectlOptions) + } + + // Get envoy configuration from the mesh gateways, if there are any. + meshGatewayPods, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "component=mesh-gateway"}) + require.NoError(t, err) - // Check if the pod is connect-injected, and if so, dump envoy config information. - _, isServiceMeshPod := pod.Annotations[constants.KeyInjectStatus] - _, isGatewayPod := pod.Annotations[constants.AnnotationGatewayKind] - if isServiceMeshPod || isGatewayPod { - localPort := portforward.CreateTunnelToResourcePort(t, pod.Name, 19000, kubectlOptions, terratestLogger.Discard) - - configDumpResp, err := http.DefaultClient.Get(fmt.Sprintf("http://%s/config_dump?format=json", localPort)) - var configDump string - if err != nil { - configDump = fmt.Sprintf("Error getting config_dump: %s: %s", err, configDump) - } else { - configDumpRespBytes, err := io.ReadAll(configDumpResp.Body) - require.NoError(t, err) - configDump = string(configDumpRespBytes) - } - - clustersResp, err := http.DefaultClient.Get(fmt.Sprintf("http://%s/clusters?format=json", localPort)) - var clusters string - if err != nil { - clusters = fmt.Sprintf("Error getting clusters: %s: %s", err, clusters) - } else { - clustersRespBytes, err := io.ReadAll(clustersResp.Body) - require.NoError(t, err) - clusters = string(clustersRespBytes) - } - - // Write config/clusters or err to file name -envoy-[configdump/clusters].json - configDumpFilename := filepath.Join(testDebugDirectory, fmt.Sprintf("%s-envoy-configdump.json", pod.Name)) - clustersFilename := filepath.Join(testDebugDirectory, fmt.Sprintf("%s-envoy-clusters.json", pod.Name)) - require.NoError(t, os.WriteFile(configDumpFilename, []byte(configDump), 0600)) - require.NoError(t, os.WriteFile(clustersFilename, []byte(clusters), 0600)) + for _, mpod := range meshGatewayPods.Items { + // Get configdump from mesh gateway, passing the discard logger since we only need these logs written to the file (below). + configDump, err := RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", mpod.Name, "-c", "consul-sidecar", "--", "curl", "-s", "localhost:19000/config_dump?format=json") + if err != nil { + configDump = fmt.Sprintf("Error getting config_dump: %s: %s", err, configDump) } + // Get cluster config from mesh gateway, passing the discard logger since we only need these logs written to the file (below). + clusters, err := RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", mpod.Name, "-c", "consul-sidecar", "--", "curl", "-s", "localhost:19000/clusters?format=json") + if err != nil { + clusters = fmt.Sprintf("Error getting clusters: %s: %s", err, clusters) + } + + // Write config/clusters or err to file name -envoy-[configdump/clusters].json + configDumpFilename := filepath.Join(testDebugDirectory, fmt.Sprintf("%s-envoy-configdump.json", mpod.Name)) + clustersFilename := filepath.Join(testDebugDirectory, fmt.Sprintf("%s-envoy-clusters.json", mpod.Name)) + require.NoError(t, os.WriteFile(configDumpFilename, []byte(configDump), 0600)) + require.NoError(t, os.WriteFile(clustersFilename, []byte(clusters), 0600)) + } // Describe any stateful sets. diff --git a/acceptance/framework/k8s/deploy.go b/acceptance/framework/k8s/deploy.go index 869ebdd804..2a258dcd96 100644 --- a/acceptance/framework/k8s/deploy.go +++ b/acceptance/framework/k8s/deploy.go @@ -96,7 +96,7 @@ func CheckStaticServerConnectionMultipleFailureMessages(t *testing.T, options *k expectedOutput = expectedSuccessOutput } - retrier := &retry.Timer{Timeout: 320 * time.Second, Wait: 2 * time.Second} + retrier := &retry.Timer{Timeout: 160 * time.Second, Wait: 2 * time.Second} args := []string{"exec", "deploy/" + sourceApp, "-c", sourceApp, "--", "curl", "-vvvsSf"} args = append(args, curlArgs...) diff --git a/acceptance/framework/portforward/port_forward.go b/acceptance/framework/portforward/port_forward.go deleted file mode 100644 index 97bf3f7856..0000000000 --- a/acceptance/framework/portforward/port_forward.go +++ /dev/null @@ -1,79 +0,0 @@ -package portforward - -import ( - "fmt" - "net" - "testing" - "time" - - terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" - terratestLogger "github.com/gruntwork-io/terratest/modules/logger" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/stretchr/testify/require" -) - -func CreateTunnelToResourcePort(t *testing.T, resourceName string, remotePort int, options *terratestk8s.KubectlOptions, logger terratestLogger.TestLogger) string { - localPort := terratestk8s.GetAvailablePort(t) - tunnel := terratestk8s.NewTunnelWithLogger( - options, - terratestk8s.ResourceTypePod, - resourceName, - localPort, - remotePort, - logger) - - // Retry creating the port forward since it can fail occasionally. - retry.RunWith(&retry.Counter{Wait: 1 * time.Second, Count: 3}, t, func(r *retry.R) { - // NOTE: It's okay to pass in `t` to ForwardPortE despite being in a retry - // because we're using ForwardPortE (not ForwardPort) so the `t` won't - // get used to fail the test, just for logging. - require.NoError(r, tunnel.ForwardPortE(t)) - }) - - doneChan := make(chan bool) - - t.Cleanup(func() { - close(doneChan) - }) - - go monitorPortForwardedServer(t, localPort, tunnel, doneChan, resourceName, remotePort, options, logger) - - return fmt.Sprintf("127.0.0.1:%d", localPort) -} - -func monitorPortForwardedServer(t *testing.T, port int, tunnel *terratestk8s.Tunnel, doneChan chan bool, resourceName string, remotePort int, options *terratestk8s.KubectlOptions, log terratestLogger.TestLogger) { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - for { - select { - case <-doneChan: - logger.Log(t, "stopping monitor of the port-forwarded server") - tunnel.Close() - return - case <-ticker.C: - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", port)) - if err != nil { - logger.Log(t, "lost connection to port-forwarded server; restarting port-forwarding", "port", port) - tunnel.Close() - tunnel = terratestk8s.NewTunnelWithLogger( - options, - terratestk8s.ResourceTypePod, - resourceName, - port, - remotePort, - log) - err = tunnel.ForwardPortE(t) - if err != nil { - // If we couldn't establish a port forwarding channel, continue, so we can try again. - continue - } - } - if conn != nil { - // Ignore error because we don't care if connection is closed successfully or not. - _ = conn.Close() - } - } - } -} diff --git a/acceptance/go.mod b/acceptance/go.mod index f81eff4066..b7b11bc27c 100644 --- a/acceptance/go.mod +++ b/acceptance/go.mod @@ -1,16 +1,16 @@ module github.com/hashicorp/consul-k8s/acceptance -go 1.20 +go 1.18 require ( github.com/gruntwork-io/terratest v0.31.2 - github.com/hashicorp/consul-k8s/control-plane v0.0.0-20221117191905-0b1cc2b631e3 - github.com/hashicorp/consul/api v1.16.0 - github.com/hashicorp/consul/sdk v0.12.0 + github.com/hashicorp/consul-k8s/control-plane v0.0.0-20211207212234-aea9efea5638 + github.com/hashicorp/consul/api v1.14.0 + github.com/hashicorp/consul/sdk v0.11.0 github.com/hashicorp/go-uuid v1.0.3 - github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/go-version v1.2.0 github.com/hashicorp/vault/api v1.2.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.7.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.22.2 k8s.io/apimachinery v0.22.2 @@ -18,16 +18,16 @@ require ( ) require ( - cloud.google.com/go v0.81.0 // indirect - github.com/armon/go-metrics v0.4.1 // indirect + cloud.google.com/go v0.54.0 // indirect + github.com/armon/go-metrics v0.3.10 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.30.27 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cenkalti/backoff/v3 v3.0.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/evanphx/json-patch v4.11.0+incompatible // indirect + github.com/fatih/color v1.12.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect github.com/go-logr/logr v0.4.0 // indirect @@ -42,7 +42,7 @@ require ( github.com/gruntwork-io/gruntwork-cli v0.7.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.2.2 // indirect + github.com/hashicorp/go-hclog v0.16.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.0.1 // indirect @@ -53,14 +53,14 @@ require ( github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/serf v0.10.1 // indirect + github.com/hashicorp/serf v0.9.7 // indirect github.com/hashicorp/vault/sdk v0.2.1 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.3.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/json-iterator/go v1.1.11 // indirect + github.com/mattn/go-colorable v0.1.8 // indirect + github.com/mattn/go-isatty v0.0.13 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.0 // indirect @@ -68,7 +68,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect github.com/oklog/run v1.0.0 // indirect github.com/onsi/ginkgo v1.16.4 // indirect github.com/onsi/gomega v1.15.0 // indirect @@ -82,23 +82,23 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect - golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect + golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect + golang.org/x/text v0.3.6 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect - google.golang.org/grpc v1.48.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/grpc v1.38.0 // indirect + google.golang.org/protobuf v1.26.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect k8s.io/klog/v2 v2.9.0 // indirect k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e // indirect - k8s.io/utils v0.0.0-20220812165043-ad590609e2e5 // indirect + k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/acceptance/go.sum b/acceptance/go.sum index 2b154c10f7..8bb13efef3 100644 --- a/acceptance/go.sum +++ b/acceptance/go.sum @@ -10,34 +10,19 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -92,14 +77,13 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -128,12 +112,7 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -192,20 +171,16 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -214,8 +189,8 @@ github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -273,16 +248,12 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -290,9 +261,7 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= @@ -305,14 +274,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-containerregistry v0.0.0-20200110202235-f4fb41bf00a3/go.mod h1:2wIuQute9+hhWqvL3vEI7YB0EKluF4WcPzI1eAliazk= @@ -321,19 +284,11 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -357,17 +312,17 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gruntwork-io/gruntwork-cli v0.7.0 h1:YgSAmfCj9c61H+zuvHwKfYUwlMhu5arnQQLM4RH+CYs= github.com/gruntwork-io/gruntwork-cli v0.7.0/go.mod h1:jp6Z7NcLF2avpY8v71fBx6hds9eOFPELSuD/VPv7w00= github.com/gruntwork-io/terratest v0.31.2 h1:xvYHA80MUq5kx670dM18HInewOrrQrAN+XbVVtytUHg= github.com/gruntwork-io/terratest v0.31.2/go.mod h1:EEgJie28gX/4AD71IFqgMj6e99KP5mi81hEtzmDjxTo= -github.com/hashicorp/consul-k8s/control-plane v0.0.0-20221117191905-0b1cc2b631e3 h1:4wROIZB8Y4cN/wPILChc2zQ/q00z1VyJitdgyLbITdU= -github.com/hashicorp/consul-k8s/control-plane v0.0.0-20221117191905-0b1cc2b631e3/go.mod h1:j9Db/whkzvNC+KP2GftY0HxxleLm9swxXjlu3tYaOAw= -github.com/hashicorp/consul/api v1.16.0 h1:Vf/QVFIwz+PdHR4T4lSwYzLULtbHVq0BheXCUAKP50M= -github.com/hashicorp/consul/api v1.16.0/go.mod h1:GJI1Sif0Wc/iYyqg7EXHJV37IPush6eJTewvYdF9uO8= -github.com/hashicorp/consul/sdk v0.12.0 h1:qsNQToBEs9v5MUWOv/JhiOu4wPeq9VdK7Jcgf7shOrU= -github.com/hashicorp/consul/sdk v0.12.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul-k8s/control-plane v0.0.0-20211207212234-aea9efea5638 h1:z68s6H6O3RjxDmNvou/2/3UBrsJkrMcNzI0IQN5scAM= +github.com/hashicorp/consul-k8s/control-plane v0.0.0-20211207212234-aea9efea5638/go.mod h1:7ZeaiADGbvJDuoWAT8UKj6KCcLsFUk+34OkUGMVtdXg= +github.com/hashicorp/consul/api v1.14.0 h1:Y64GIJ8hYTu+tuGekwO4G4ardXoiCivX9wv1iP/kihk= +github.com/hashicorp/consul/api v1.14.0/go.mod h1:bcaw5CSZ7NE9qfOfKCI1xb7ZKjzu/MyvQkCLTfqLqxQ= +github.com/hashicorp/consul/sdk v0.10.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= +github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -379,16 +334,16 @@ github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9 github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.2 h1:ihRI7YFwcZdiSD7SIenIhHfQH3OuDvWerAUBZbeQS3M= -github.com/hashicorp/go-hclog v1.2.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -416,9 +371,8 @@ github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -428,10 +382,11 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= -github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE= github.com/hashicorp/vault/api v1.2.0 h1:ysGFc6XRGbv05NsWPzuO5VTv68Lj8jtwATxRLFOpP9s= github.com/hashicorp/vault/api v1.2.0/go.mod h1:dAjw0T5shMnrfH7Q/Mst+LrcTKvStZBVs1PICEDpUqY= @@ -442,7 +397,6 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= @@ -460,9 +414,8 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -490,17 +443,16 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= @@ -536,9 +488,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -615,7 +566,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= @@ -661,10 +611,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -677,9 +625,7 @@ github.com/vdemeester/k8s-pkg-credentialprovider v0.0.0-20200107171650-7c61ffa44 github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -688,10 +634,6 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= @@ -714,9 +656,8 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -741,7 +682,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -751,8 +691,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -783,52 +721,30 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -875,61 +791,39 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -972,26 +866,10 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1011,24 +889,12 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1048,30 +914,9 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1086,20 +931,9 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1111,10 +945,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1141,7 +973,6 @@ gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1150,9 +981,8 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1161,7 +991,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= @@ -1196,9 +1025,8 @@ k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2R k8s.io/legacy-cloud-providers v0.17.0/go.mod h1:DdzaepJ3RtRy+e5YhNtrCYwlgyK87j/5+Yfp0L9Syp8= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220812165043-ad590609e2e5 h1:XmRqFcQlCy/lKRZ39j+RVpokYNroHPqV3mcBRfnhT5o= -k8s.io/utils v0.0.0-20220812165043-ad590609e2e5/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/acceptance/tests/basic/basic_test.go b/acceptance/tests/basic/basic_test.go index 91047711a1..0edab960d9 100644 --- a/acceptance/tests/basic/basic_test.go +++ b/acceptance/tests/basic/basic_test.go @@ -50,7 +50,6 @@ func TestBasicInstallation(t *testing.T) { "global.tls.enabled": strconv.FormatBool(c.secure), "global.gossipEncryption.autoGenerate": strconv.FormatBool(c.secure), "global.tls.enableAutoEncrypt": strconv.FormatBool(c.autoEncrypt), - "client.enabled": "true", } consulCluster := consul.NewHelmCluster(t, helmValues, suite.Environment().DefaultContext(t), suite.Config(), releaseName) diff --git a/acceptance/tests/cli/cli_install_test.go b/acceptance/tests/cli/cli_install_test.go deleted file mode 100644 index b6c52e9fc4..0000000000 --- a/acceptance/tests/cli/cli_install_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package cli - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/consul-k8s/acceptance/framework/cli" - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ipv4RegEx = "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" - -// TestInstall tests that we can install consul service mesh with the CLI -// and see that services can connect. -func TestInstall(t *testing.T) { - cases := map[string]struct { - secure bool - tproxy bool - }{ - "not-secure": {secure: false, tproxy: false}, - "secure": {secure: true, tproxy: false}, - "not-secure-tproxy": {secure: false, tproxy: true}, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - cli, err := cli.NewCLI() - require.NoError(t, err) - - cfg := suite.Config() - cfg.EnableTransparentProxy = c.tproxy - ctx := suite.Environment().DefaultContext(t) - - connHelper := connhelper.ConnectHelper{ - ClusterKind: consul.CLI, - Secure: c.secure, - ReleaseName: consul.CLIReleaseName, - Ctx: ctx, - Cfg: cfg, - } - - connHelper.Setup(t) - - connHelper.Install(t) - connHelper.DeployClientAndServer(t) - if c.secure { - connHelper.TestConnectionFailureWithoutIntention(t) - connHelper.CreateIntention(t) - } - - // Run proxy list and get the two results. - listOut, err := cli.Run(t, ctx.KubectlOptions(t), "proxy", "list") - require.NoError(t, err) - logger.Log(t, string(listOut)) - list := translateListOutput(listOut) - require.Equal(t, 2, len(list)) - for _, proxyType := range list { - require.Equal(t, "Sidecar", proxyType) - } - - // Run proxy read and check that the connection is present in the output. - retrier := &retry.Timer{Timeout: 160 * time.Second, Wait: 2 * time.Second} - retry.RunWith(retrier, t, func(r *retry.R) { - for podName := range list { - out, err := cli.Run(t, ctx.KubectlOptions(t), "proxy", "read", podName) - require.NoError(t, err) - - output := string(out) - logger.Log(t, output) - - // Both proxies must see their own local agent and app as clusters. - require.Regexp(r, "consul-dataplane.*STATIC", output) - require.Regexp(r, "local_app.*STATIC", output) - - // Static Client must have Static Server as a cluster and endpoint. - if strings.Contains(podName, "static-client") { - require.Regexp(r, "static-server.*static-server\\.default\\.dc1\\.internal.*EDS", output) - require.Regexp(r, ipv4RegEx+".*static-server", output) - } - } - }) - - // Troubleshoot: Get the client pod so we can portForward to it and get the 'troubleshoot upstreams' output - clientPod, err := connHelper.Ctx.KubernetesClient(t).CoreV1().Pods(connHelper.Ctx.KubectlOptions(t).Namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: "app=static-client", - }) - require.NoError(t, err) - - clientPodName := clientPod.Items[0].Name - upstreamsOut, err := cli.Run(t, ctx.KubectlOptions(t), "troubleshoot", "upstreams", "-pod", clientPodName) - logger.Log(t, string(upstreamsOut)) - require.NoError(t, err) - - if c.tproxy { - // If tproxy is enabled we are looking for the upstream ip which is the ClusterIP of the Kubernetes Service - serverService, err := connHelper.Ctx.KubernetesClient(t).CoreV1().Services(connHelper.Ctx.KubectlOptions(t).Namespace).List(context.Background(), metav1.ListOptions{ - FieldSelector: "metadata.name=static-server", - }) - require.NoError(t, err) - serverIP := serverService.Items[0].Spec.ClusterIP - - proxyOut, err := cli.Run(t, ctx.KubectlOptions(t), "troubleshoot", "proxy", "-pod", clientPodName, "-upstream-ip", serverIP) - require.NoError(t, err) - require.Regexp(t, "Upstream resources are valid", string(proxyOut)) - logger.Log(t, string(proxyOut)) - } else { - // With tproxy disabled and explicit upstreams we need the envoy-id of the server - require.Regexp(t, "static-server", string(upstreamsOut)) - - proxyOut, err := cli.Run(t, ctx.KubectlOptions(t), "troubleshoot", "proxy", "-pod", clientPodName, "-upstream-envoy-id", "static-server") - require.NoError(t, err) - require.Regexp(t, "Upstream resources are valid", string(proxyOut)) - logger.Log(t, string(proxyOut)) - } - - connHelper.TestConnectionSuccess(t) - connHelper.TestConnectionFailureWhenUnhealthy(t) - }) - } -} - -// translateListOutput takes the raw output from the proxy list command and -// translates the table into a map. -func translateListOutput(raw []byte) map[string]string { - formatted := make(map[string]string) - for _, pod := range strings.Split(strings.TrimSpace(string(raw)), "\n")[3:] { - row := strings.Split(strings.TrimSpace(pod), "\t") - - var name string - if len(row) == 3 { // Handle the case where namespace is present - name = fmt.Sprintf("%s/%s", strings.TrimSpace(row[0]), strings.TrimSpace(row[1])) - } else if len(row) == 2 { - name = strings.TrimSpace(row[0]) - } - formatted[name] = row[len(row)-1] - } - - return formatted -} diff --git a/acceptance/tests/cli/cli_upgrade_test.go b/acceptance/tests/cli/cli_upgrade_test.go deleted file mode 100644 index 6fcf82f738..0000000000 --- a/acceptance/tests/cli/cli_upgrade_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package cli - -import ( - "context" - "testing" - - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// TestConnectInjectOnUpgrade tests that Connect works before and after an -// upgrade is performed on the cluster. -func TestUpgrade(t *testing.T) { - cfg := suite.Config() - ctx := suite.Environment().DefaultContext(t) - - connHelper := connhelper.ConnectHelper{ - ClusterKind: consul.CLI, - ReleaseName: consul.CLIReleaseName, - Ctx: ctx, - Cfg: cfg, - } - - connHelper.Setup(t) - - connHelper.Install(t) - - // Change a value on the connect-injector to force an update. - connHelper.HelmValues = map[string]string{ - "ingressGateways.enabled": "true", - "ingressGateways.defaults.replicas": "1", - } - - connHelper.Upgrade(t) - - t.Log("checking that the ingress gateway was install as a result of the upgrade") - k8sClient := ctx.KubernetesClient(t) - igwPods, err := k8sClient.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{LabelSelector: "component=ingress-gateway"}) - require.NoError(t, err) - require.Len(t, igwPods.Items, 1) -} diff --git a/acceptance/tests/cli/main_test.go b/acceptance/tests/cli/main_test.go deleted file mode 100644 index 85cef25abe..0000000000 --- a/acceptance/tests/cli/main_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package cli - -import ( - "os" - "testing" - - testsuite "github.com/hashicorp/consul-k8s/acceptance/framework/suite" -) - -var suite testsuite.Suite - -func TestMain(m *testing.M) { - suite = testsuite.NewSuite(m) - os.Exit(suite.Run()) -} diff --git a/acceptance/tests/connect/connect_external_servers_test.go b/acceptance/tests/connect/connect_external_servers_test.go deleted file mode 100644 index 5132b66e7a..0000000000 --- a/acceptance/tests/connect/connect_external_servers_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package connect - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// TestConnectInject_ExternalServers tests that connect works when using external servers. -// It sets up an external Consul server in the same cluster but a different Helm installation -// and then treats this server as external. -func TestConnectInject_ExternalServers(t *testing.T) { - for _, secure := range []bool{ - false, - true, - } { - caseName := fmt.Sprintf("secure: %t", secure) - t.Run(caseName, func(t *testing.T) { - cfg := suite.Config() - ctx := suite.Environment().DefaultContext(t) - - serverHelmValues := map[string]string{ - "global.acls.manageSystemACLs": strconv.FormatBool(secure), - "global.tls.enabled": strconv.FormatBool(secure), - - // Don't install injector, controller and cni on this cluster so that it's not installed twice. - "connectInject.enabled": "false", - "connectInject.cni.enabled": "false", - } - serverReleaseName := helpers.RandomName() - consulServerCluster := consul.NewHelmCluster(t, serverHelmValues, ctx, cfg, serverReleaseName) - - consulServerCluster.Create(t) - - helmValues := map[string]string{ - "server.enabled": "false", - "global.acls.manageSystemACLs": strconv.FormatBool(secure), - - "global.tls.enabled": strconv.FormatBool(secure), - - "connectInject.enabled": "true", - - "externalServers.enabled": "true", - "externalServers.hosts[0]": fmt.Sprintf("%s-consul-server", serverReleaseName), - "externalServers.httpsPort": "8500", - } - - if secure { - helmValues["global.tls.caCert.secretName"] = fmt.Sprintf("%s-consul-ca-cert", serverReleaseName) - helmValues["global.tls.caCert.secretKey"] = "tls.crt" - helmValues["global.acls.bootstrapToken.secretName"] = fmt.Sprintf("%s-consul-bootstrap-acl-token", serverReleaseName) - helmValues["global.acls.bootstrapToken.secretKey"] = "token" - helmValues["externalServers.httpsPort"] = "8501" - } - - releaseName := helpers.RandomName() - consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) - consulCluster.SkipCheckForPreviousInstallations = true - - consulCluster.Create(t) - - logger.Log(t, "creating static-server and static-client deployments") - k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") - if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") - } else { - k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") - } - - // Check that both static-server and static-client have been injected and now have 2 containers. - for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := ctx.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - require.NoError(t, err) - require.Len(t, podList.Items, 1) - require.Len(t, podList.Items[0].Spec.Containers, 2) - } - - if secure { - consulClient, _ := consulServerCluster.SetupConsulClient(t, true) - - logger.Log(t, "checking that the connection is not successful because there's no intention") - if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://static-server") - } else { - k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") - } - - intention := &api.ServiceIntentionsConfigEntry{ - Kind: api.ServiceIntentions, - Name: connhelper.StaticServerName, - Sources: []*api.SourceIntention{ - { - Name: connhelper.StaticClientName, - Action: api.IntentionActionAllow, - }, - }, - } - - logger.Log(t, "creating intention") - _, _, err := consulClient.ConfigEntries().Set(intention, nil) - require.NoError(t, err) - } - - logger.Log(t, "checking that connection is successful") - if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://static-server") - } else { - k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") - } - - // Test that kubernetes readiness status is synced to Consul. - // Create the file so that the readiness probe of the static-server pod fails. - logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, ctx.KubectlOptions(t), "exec", "deploy/"+connhelper.StaticServerName, "--", "touch", "/tmp/unhealthy") - - // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry - // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. - // We are expecting a "connection reset by peer" error because in a case of health checks, - // there will be no healthy proxy host to connect to. That's why we can't assert that we receive an empty reply - // from server, which is the case when a connection is unsuccessful due to intentions in other tests. - logger.Log(t, "checking that connection is unsuccessful") - if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server port 80: Connection refused"}, "", "http://static-server") - } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - } - }) - } -} diff --git a/acceptance/framework/connhelper/connect_helper.go b/acceptance/tests/connect/connect_helper.go similarity index 92% rename from acceptance/framework/connhelper/connect_helper.go rename to acceptance/tests/connect/connect_helper.go index c5d677ba6f..473c2ab562 100644 --- a/acceptance/framework/connhelper/connect_helper.go +++ b/acceptance/tests/connect/connect_helper.go @@ -1,10 +1,9 @@ -package connhelper +package connect import ( "context" "strconv" "testing" - "time" "github.com/hashicorp/consul-k8s/acceptance/framework/config" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" @@ -20,7 +19,7 @@ import ( const ( StaticClientName = "static-client" - StaticServerName = "static-server" + staticServerName = "static-server" ) // ConnectHelper configures a Consul cluster for connect injection tests. @@ -32,13 +31,16 @@ type ConnectHelper struct { // Secure configures the Helm chart for the test to use ACL tokens. Secure bool + // AutoEncrypt configures the Helm chart for the test to use AutoEncrypt. + AutoEncrypt bool + // HelmValues are the additional helm values to use when installing or // upgrading the cluster beyond connectInject.enabled, global.tls.enabled, - // global.tls.enableAutoEncrypt, global.acls.manageSystemACLs which are + // global.tls.enableAutoEncrypt, global.acls.mangageSystemACLs which are // set by the Secure and AutoEncrypt fields. HelmValues map[string]string - // ReleaseName is the name of the Consul cluster. + // RelaseName is the name of the Consul cluster. ReleaseName string Ctx environment.TestContext @@ -91,12 +93,11 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) { // deployments because golang will execute them in reverse order // (i.e. the last registered cleanup function will be executed first). t.Cleanup(func() { - retrier := &retry.Timer{Timeout: 30 * time.Second, Wait: 100 * time.Millisecond} - retry.RunWith(retrier, t, func(r *retry.R) { + retry.Run(t, func(r *retry.R) { tokens, _, err := c.consulClient.ACL().TokenList(nil) require.NoError(r, err) for _, token := range tokens { - require.NotContains(r, token.Description, StaticServerName) + require.NotContains(r, token.Description, staticServerName) require.NotContains(r, token.Description, StaticClientName) } }) @@ -141,7 +142,7 @@ func (c *ConnectHelper) CreateIntention(t *testing.T) { logger.Log(t, "creating intention") _, _, err := c.consulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ Kind: api.ServiceIntentions, - Name: StaticServerName, + Name: staticServerName, Sources: []*api.SourceIntention{ { Name: StaticClientName, @@ -152,7 +153,7 @@ func (c *ConnectHelper) CreateIntention(t *testing.T) { require.NoError(t, err) } -// TestConnectionSuccess ensures the static-server pod can connect to the +// TestConnectionSuccessful ensures the static-server pod can connect to the // static-client pod once the intention is set. func (c *ConnectHelper) TestConnectionSuccess(t *testing.T) { logger.Log(t, "checking that connection is successful") @@ -172,7 +173,7 @@ func (c *ConnectHelper) TestConnectionFailureWhenUnhealthy(t *testing.T) { // Create a file called "unhealthy" at "/tmp/" so that the readiness probe // of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, c.Ctx.KubectlOptions(t), "exec", "deploy/"+StaticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, c.Ctx.KubectlOptions(t), "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, // CheckStaticServerConnection will retry until Consul marks the service @@ -197,7 +198,7 @@ func (c *ConnectHelper) TestConnectionFailureWhenUnhealthy(t *testing.T) { } // Return the static-server to a "healthy state". - k8s.RunKubectl(t, c.Ctx.KubectlOptions(t), "exec", "deploy/"+StaticServerName, "--", "rm", "/tmp/unhealthy") + k8s.RunKubectl(t, c.Ctx.KubectlOptions(t), "exec", "deploy/"+staticServerName, "--", "rm", "/tmp/unhealthy") } // helmValues uses the Secure and AutoEncrypt fields to set values for the Helm @@ -207,9 +208,8 @@ func (c *ConnectHelper) helmValues() map[string]string { helmValues := map[string]string{ "connectInject.enabled": "true", "global.tls.enabled": strconv.FormatBool(c.Secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.AutoEncrypt), "global.acls.manageSystemACLs": strconv.FormatBool(c.Secure), - "dns.enabled": "true", - "dns.enableRedirection": "true", } helpers.MergeMaps(helmValues, c.HelmValues) diff --git a/acceptance/tests/connect/connect_inject_namespaces_test.go b/acceptance/tests/connect/connect_inject_namespaces_test.go index db48465bda..22fcc19c7a 100644 --- a/acceptance/tests/connect/connect_inject_namespaces_test.go +++ b/acceptance/tests/connect/connect_inject_namespaces_test.go @@ -8,7 +8,6 @@ import ( "testing" terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" @@ -67,6 +66,7 @@ func TestConnectInjectNamespaces(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) + cfg := suite.Config() helmValues := map[string]string{ "global.enableConsulNamespaces": "true", @@ -129,13 +129,13 @@ func TestConnectInjectNamespaces(t *testing.T) { tokens, _, err := consulClient.ACL().TokenList(serverQueryOpts) require.NoError(r, err) for _, token := range tokens { - require.NotContains(r, token.Description, connhelper.StaticServerName) + require.NotContains(r, token.Description, staticServerName) } tokens, _, err = consulClient.ACL().TokenList(clientQueryOpts) require.NoError(r, err) for _, token := range tokens { - require.NotContains(r, token.Description, connhelper.StaticClientName) + require.NotContains(r, token.Description, StaticClientName) } }) } @@ -166,29 +166,29 @@ func TestConnectInjectNamespaces(t *testing.T) { // Kubernetes namespace. // If a single destination namespace is set, we expect all services // to be registered in that destination Consul namespace. - services, _, err := consulClient.Catalog().Service(connhelper.StaticServerName, "", serverQueryOpts) + services, _, err := consulClient.Catalog().Service(staticServerName, "", serverQueryOpts) require.NoError(t, err) require.Len(t, services, 1) - services, _, err = consulClient.Catalog().Service(connhelper.StaticClientName, "", clientQueryOpts) + services, _, err = consulClient.Catalog().Service(StaticClientName, "", clientQueryOpts) require.NoError(t, err) require.Len(t, services, 1) if c.secure { logger.Log(t, "checking that the connection is not successful because there's no intention") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, connhelper.StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, connhelper.StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, StaticClientName, "http://localhost:1234") } intention := &api.ServiceIntentionsConfigEntry{ Kind: api.ServiceIntentions, - Name: connhelper.StaticServerName, + Name: staticServerName, Namespace: staticServerNamespace, Sources: []*api.SourceIntention{ { - Name: connhelper.StaticClientName, + Name: StaticClientName, Namespace: StaticClientNamespace, Action: api.IntentionActionAllow, }, @@ -209,15 +209,15 @@ func TestConnectInjectNamespaces(t *testing.T) { logger.Log(t, "checking that connection is successful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, connhelper.StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, connhelper.StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, StaticClientName, "http://localhost:1234") } // Test that kubernetes readiness status is synced to Consul. // Create the file so that the readiness probe of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, staticServerOpts, "exec", "deploy/"+connhelper.StaticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, staticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. @@ -226,9 +226,9 @@ func TestConnectInjectNamespaces(t *testing.T) { // from server, which is the case when a connection is unsuccessful due to intentions in other tests. logger.Log(t, "checking that connection is unsuccessful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") } }) } diff --git a/acceptance/tests/connect/connect_inject_test.go b/acceptance/tests/connect/connect_inject_test.go index 2e75846884..ec694b8a95 100644 --- a/acceptance/tests/connect/connect_inject_test.go +++ b/acceptance/tests/connect/connect_inject_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" + "github.com/hashicorp/consul-k8s/acceptance/framework/cli" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" @@ -19,25 +19,61 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TestConnectInject tests that Connect works in a default and a secure installation using Helm CLI. +const ipv4RegEx = "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + +// TestConnectInject tests that Connect works in a default and a secure installation. func TestConnectInject(t *testing.T) { cases := map[string]struct { - secure bool + clusterKind consul.ClusterKind + releaseName string + secure bool + autoEncrypt bool }{ - "not-secure": {secure: false}, - "secure": {secure: true}, + "Helm install without secure or auto-encrypt": { + clusterKind: consul.Helm, + releaseName: helpers.RandomName(), + }, + "Helm install with secure": { + clusterKind: consul.Helm, + releaseName: helpers.RandomName(), + secure: true, + }, + "Helm install with secure and auto-encrypt": { + clusterKind: consul.Helm, + releaseName: helpers.RandomName(), + secure: true, + autoEncrypt: true, + }, + "CLI install without secure or auto-encrypt": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + }, + "CLI install with secure": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + secure: true, + }, + "CLI install with secure and auto-encrypt": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + secure: true, + autoEncrypt: true, + }, } for name, c := range cases { t.Run(name, func(t *testing.T) { + cli, err := cli.NewCLI() + require.NoError(t, err) + cfg := suite.Config() ctx := suite.Environment().DefaultContext(t) - releaseName := helpers.RandomName() - connHelper := connhelper.ConnectHelper{ - ClusterKind: consul.Helm, + connHelper := ConnectHelper{ + ClusterKind: c.clusterKind, Secure: c.secure, - ReleaseName: releaseName, + AutoEncrypt: c.autoEncrypt, + ReleaseName: c.releaseName, Ctx: ctx, Cfg: cfg, } @@ -51,6 +87,99 @@ func TestConnectInject(t *testing.T) { connHelper.CreateIntention(t) } + // Run proxy list and get the two results. + listOut, err := cli.Run(t, ctx.KubectlOptions(t), "proxy", "list") + require.NoError(t, err) + logger.Log(t, string(listOut)) + list := translateListOutput(listOut) + require.Equal(t, 2, len(list)) + for _, proxyType := range list { + require.Equal(t, "Sidecar", proxyType) + } + + // Run proxy read and check that the connection is present in the output. + retrier := &retry.Timer{Timeout: 160 * time.Second, Wait: 2 * time.Second} + retry.RunWith(retrier, t, func(r *retry.R) { + for podName := range list { + out, err := cli.Run(t, ctx.KubectlOptions(t), "proxy", "read", podName) + require.NoError(t, err) + + output := string(out) + logger.Log(t, output) + + // Both proxies must see their own local agent and app as clusters. + require.Regexp(r, "local_agent.*STATIC", output) + require.Regexp(r, "local_app.*STATIC", output) + + // Static Client must have Static Server as a cluster and endpoint. + if strings.Contains(podName, "static-client") { + require.Regexp(r, "static-server.*static-server\\.default\\.dc1\\.internal.*EDS", output) + require.Regexp(r, ipv4RegEx+".*static-server", output) + } + + } + }) + + connHelper.TestConnectionSuccess(t) + connHelper.TestConnectionFailureWhenUnhealthy(t) + }) + } +} + +// TestConnectInjectOnUpgrade tests that Connect works before and after an +// upgrade is performed on the cluster. +func TestConnectInjectOnUpgrade(t *testing.T) { + cases := map[string]struct { + clusterKind consul.ClusterKind + releaseName string + initial, upgrade map[string]string + }{ + "CLI upgrade changes nothing": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + }, + "CLI upgrade to enable ingressGateway": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + initial: map[string]string{}, + upgrade: map[string]string{ + "ingressGateways.enabled": "true", + "ingressGateways.defaults.replicas": "1", + }, + }, + "CLI upgrade to enable UI": { + clusterKind: consul.CLI, + releaseName: consul.CLIReleaseName, + initial: map[string]string{}, + upgrade: map[string]string{ + "ui.enabled": "true", + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + cfg := suite.Config() + ctx := suite.Environment().DefaultContext(t) + + connHelper := ConnectHelper{ + ClusterKind: c.clusterKind, + HelmValues: c.initial, + ReleaseName: c.releaseName, + Ctx: ctx, + Cfg: cfg, + } + + connHelper.Setup(t) + + connHelper.Install(t) + connHelper.DeployClientAndServer(t) + connHelper.TestConnectionSuccess(t) + connHelper.TestConnectionFailureWhenUnhealthy(t) + + connHelper.HelmValues = c.upgrade + + connHelper.Upgrade(t) connHelper.TestConnectionSuccess(t) connHelper.TestConnectionFailureWhenUnhealthy(t) }) @@ -59,16 +188,26 @@ func TestConnectInject(t *testing.T) { // Test the endpoints controller cleans up force-killed pods. func TestConnectInject_CleanupKilledPods(t *testing.T) { - for _, secure := range []bool{false, true} { - name := fmt.Sprintf("secure: %t", secure) + cases := []struct { + secure bool + autoEncrypt bool + }{ + {false, false}, + {true, false}, + {true, true}, + } + + for _, c := range cases { + name := fmt.Sprintf("secure: %t; auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { cfg := suite.Config() ctx := suite.Environment().DefaultContext(t) helmValues := map[string]string{ "connectInject.enabled": "true", - "global.tls.enabled": strconv.FormatBool(secure), - "global.acls.manageSystemACLs": strconv.FormatBool(secure), + "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.autoEncrypt), + "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), } releaseName := helpers.RandomName() @@ -80,7 +219,7 @@ func TestConnectInject_CleanupKilledPods(t *testing.T) { k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") logger.Log(t, "waiting for static-client to be registered with Consul") - consulClient, _ := consulCluster.SetupConsulClient(t, secure) + consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) retry.Run(t, func(r *retry.R) { for _, name := range []string{"static-client", "static-client-sidecar-proxy"} { instances, _, err := consulClient.Catalog().Service(name, "", nil) @@ -120,6 +259,48 @@ func TestConnectInject_CleanupKilledPods(t *testing.T) { } } +// Test that when Consul clients are restarted and lose all their registrations, +// the services get re-registered and can continue to talk to each other. +func TestConnectInject_RestartConsulClients(t *testing.T) { + cfg := suite.Config() + ctx := suite.Environment().DefaultContext(t) + + helmValues := map[string]string{ + "connectInject.enabled": "true", + } + + releaseName := helpers.RandomName() + consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) + + consulCluster.Create(t) + + logger.Log(t, "creating static-server and static-client deployments") + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + if cfg.EnableTransparentProxy { + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + } else { + k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + } + + logger.Log(t, "checking that connection is successful") + if cfg.EnableTransparentProxy { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://static-server") + } else { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:1234") + } + + logger.Log(t, "restarting Consul client daemonset") + k8s.RunKubectl(t, ctx.KubectlOptions(t), "rollout", "restart", fmt.Sprintf("ds/%s-consul-client", releaseName)) + k8s.RunKubectl(t, ctx.KubectlOptions(t), "rollout", "status", fmt.Sprintf("ds/%s-consul-client", releaseName)) + + logger.Log(t, "checking that connection is still successful") + if cfg.EnableTransparentProxy { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://static-server") + } else { + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:1234") + } +} + const multiport = "multiport" const multiportAdmin = "multiport-admin" @@ -127,8 +308,17 @@ const multiportAdmin = "multiport-admin" // two ports. This tests inbound connections to each port of the multiport app, and outbound connections from the // multiport app to static-server. func TestConnectInject_MultiportServices(t *testing.T) { - for _, secure := range []bool{false, true} { - name := fmt.Sprintf("secure: %t", secure) + cases := []struct { + secure bool + autoEncrypt bool + }{ + {false, false}, + {true, false}, + {true, true}, + } + + for _, c := range cases { + name := fmt.Sprintf("secure: %t; auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { cfg := suite.Config() ctx := suite.Environment().DefaultContext(t) @@ -141,8 +331,9 @@ func TestConnectInject_MultiportServices(t *testing.T) { helmValues := map[string]string{ "connectInject.enabled": "true", - "global.tls.enabled": strconv.FormatBool(secure), - "global.acls.manageSystemACLs": strconv.FormatBool(secure), + "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.autoEncrypt), + "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), } releaseName := helpers.RandomName() @@ -150,10 +341,10 @@ func TestConnectInject_MultiportServices(t *testing.T) { consulCluster.Create(t) - consulClient, _ := consulCluster.SetupConsulClient(t, secure) + consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) // Check that the ACL token is deleted. - if secure { + if c.secure { // We need to register the cleanup function before we create the deployments // because golang will execute them in reverse order i.e. the last registered // cleanup function will be executed first. @@ -165,8 +356,8 @@ func TestConnectInject_MultiportServices(t *testing.T) { for _, token := range tokens { require.NotContains(r, token.Description, multiport) require.NotContains(r, token.Description, multiportAdmin) - require.NotContains(r, token.Description, connhelper.StaticClientName) - require.NotContains(r, token.Description, connhelper.StaticServerName) + require.NotContains(r, token.Description, StaticClientName) + require.NotContains(r, token.Description, staticServerName) } }) }) @@ -192,10 +383,10 @@ func TestConnectInject_MultiportServices(t *testing.T) { require.Len(t, podList.Items, 1) require.Len(t, podList.Items[0].Spec.Containers, 4) - if secure { + if c.secure { logger.Log(t, "checking that the connection is not successful because there's no intention") - k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:2234") + k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:2234") logger.Log(t, fmt.Sprintf("creating intention for %s", multiport)) _, _, err := consulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ @@ -203,7 +394,7 @@ func TestConnectInject_MultiportServices(t *testing.T) { Name: multiport, Sources: []*api.SourceIntention{ { - Name: connhelper.StaticClientName, + Name: StaticClientName, Action: api.IntentionActionAllow, }, }, @@ -215,7 +406,7 @@ func TestConnectInject_MultiportServices(t *testing.T) { Name: multiportAdmin, Sources: []*api.SourceIntention{ { - Name: connhelper.StaticClientName, + Name: StaticClientName, Action: api.IntentionActionAllow, }, }, @@ -224,10 +415,10 @@ func TestConnectInject_MultiportServices(t *testing.T) { } // Check connection from static-client to multiport. - k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, "http://localhost:1234") // Check connection from static-client to multiport-admin. - k8s.CheckStaticServerConnectionSuccessfulWithMessage(t, ctx.KubectlOptions(t), connhelper.StaticClientName, "hello world from 9090 admin", "http://localhost:2234") + k8s.CheckStaticServerConnectionSuccessfulWithMessage(t, ctx.KubectlOptions(t), StaticClientName, "hello world from 9090 admin", "http://localhost:2234") // Now that we've checked inbound connections to a multi port pod, check outbound connection from multi port // pod to static-server. @@ -237,15 +428,15 @@ func TestConnectInject_MultiportServices(t *testing.T) { // For outbound connections from the multi port pod, only intentions from the first service in the multiport // pod need to be created, since all upstream connections are made through the first service's envoy proxy. - if secure { + if c.secure { logger.Log(t, "checking that the connection is not successful because there's no intention") k8s.CheckStaticServerConnectionFailing(t, ctx.KubectlOptions(t), multiport, "http://localhost:3234") - logger.Log(t, fmt.Sprintf("creating intention for %s", connhelper.StaticServerName)) + logger.Log(t, fmt.Sprintf("creating intention for %s", staticServerName)) _, _, err := consulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ Kind: api.ServiceIntentions, - Name: connhelper.StaticServerName, + Name: staticServerName, Sources: []*api.SourceIntention{ { Name: multiport, @@ -272,8 +463,27 @@ func TestConnectInject_MultiportServices(t *testing.T) { // We are expecting a "connection reset by peer" error because in a case of health checks, // there will be no healthy proxy host to connect to. That's why we can't assert that we receive an empty reply // from server, which is the case when a connection is unsuccessful due to intentions in other tests. - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), connhelper.StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:2234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, ctx.KubectlOptions(t), StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:2234") }) } } + +// translateListOutput takes the raw output from the proxy list command and +// translates the table into a map. +func translateListOutput(raw []byte) map[string]string { + formatted := make(map[string]string) + for _, pod := range strings.Split(strings.TrimSpace(string(raw)), "\n")[3:] { + row := strings.Split(strings.TrimSpace(pod), "\t") + + var name string + if len(row) == 3 { // Handle the case where namespace is present + name = fmt.Sprintf("%s/%s", strings.TrimSpace(row[0]), strings.TrimSpace(row[1])) + } else if len(row) == 2 { + name = strings.TrimSpace(row[0]) + } + formatted[name] = row[len(row)-1] + } + + return formatted +} diff --git a/acceptance/tests/consul-dns/consul_dns_test.go b/acceptance/tests/consul-dns/consul_dns_test.go index 47cfb4af07..e6973459cc 100644 --- a/acceptance/tests/consul-dns/consul_dns_test.go +++ b/acceptance/tests/consul-dns/consul_dns_test.go @@ -14,16 +14,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const podName = "dns-pod" + func TestConsulDNS(t *testing.T) { cfg := suite.Config() if cfg.EnableCNI { t.Skipf("skipping because -enable-cni is set") } - - if cfg.UseAKS { - t.Skipf("skipping because -use-aks is set") - } - for _, secure := range []bool{false, true} { name := fmt.Sprintf("secure: %t", secure) t.Run(name, func(t *testing.T) { @@ -57,9 +54,8 @@ func TestConsulDNS(t *testing.T) { serverIPs = append(serverIPs, serverPod.Status.PodIP) } - dnsPodName := fmt.Sprintf("%s-dns-pod", releaseName) dnsTestPodArgs := []string{ - "run", "-i", dnsPodName, "--restart", "Never", "--image", "anubhavmishra/tiny-tools", "--", "dig", fmt.Sprintf("@%s-consul-dns", releaseName), "consul.service.consul", + "run", "-i", podName, "--restart", "Never", "--image", "anubhavmishra/tiny-tools", "--", "dig", fmt.Sprintf("@%s-consul-dns", releaseName), "consul.service.consul", } helpers.Cleanup(t, suite.Config().NoCleanupOnFailure, func() { @@ -67,7 +63,7 @@ func TestConsulDNS(t *testing.T) { // This shouldn't cause any test pollution because the underlying // objects are deployments, and so when other tests create these // they should have different pod names. - k8s.RunKubectl(t, ctx.KubectlOptions(t), "delete", "pod", dnsPodName) + k8s.RunKubectl(t, ctx.KubectlOptions(t), "delete", "pod", podName) }) retry.Run(t, func(r *retry.R) { diff --git a/acceptance/tests/config-entries/config_entries_namespaces_test.go b/acceptance/tests/controller/controller_namespaces_test.go similarity index 99% rename from acceptance/tests/config-entries/config_entries_namespaces_test.go rename to acceptance/tests/controller/controller_namespaces_test.go index a2580069a8..51135556b1 100644 --- a/acceptance/tests/config-entries/config_entries_namespaces_test.go +++ b/acceptance/tests/controller/controller_namespaces_test.go @@ -1,4 +1,4 @@ -package config_entries +package controller import ( "fmt" @@ -79,6 +79,7 @@ func TestControllerNamespaces(t *testing.T) { helmValues := map[string]string{ "global.enableConsulNamespaces": "true", "global.adminPartitions.enabled": "true", + "controller.enabled": "true", "connectInject.enabled": "true", // When mirroringK8S is set, this setting is ignored. diff --git a/acceptance/tests/config-entries/config_entries_test.go b/acceptance/tests/controller/controller_test.go similarity index 98% rename from acceptance/tests/config-entries/config_entries_test.go rename to acceptance/tests/controller/controller_test.go index 035edf6a8f..9451140aa0 100644 --- a/acceptance/tests/config-entries/config_entries_test.go +++ b/acceptance/tests/controller/controller_test.go @@ -1,4 +1,4 @@ -package config_entries +package controller import ( "fmt" @@ -32,13 +32,18 @@ func TestController(t *testing.T) { t.Skipf("skipping because -enable-cni is set and controller is already tested with regular tproxy") } cases := []struct { - secure bool - useVault bool + secure bool + autoEncrypt bool + useVault bool }{ - {false, false}, - {true, false}, - {false, true}, - {true, true}, + {false, false, false}, + {true, false, false}, + {true, true, false}, + {true, true, true}, + {false, false, true}, + // Vault with TLS requires autoEncrypt set to true as well, so the below + // is not valid + // {true, false, true}, } // The name of a service intention in consul is @@ -47,13 +52,15 @@ func TestController(t *testing.T) { const IntentionName = "svc1" for _, c := range cases { - name := fmt.Sprintf("secure: %t, vault: %t", c.secure, c.useVault) + name := fmt.Sprintf("secure: %t; auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) helmValues := map[string]string{ + "controller.enabled": "true", "connectInject.enabled": "true", "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.autoEncrypt), "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), } diff --git a/acceptance/tests/config-entries/main_test.go b/acceptance/tests/controller/main_test.go similarity index 90% rename from acceptance/tests/config-entries/main_test.go rename to acceptance/tests/controller/main_test.go index 64034f7663..115627fb85 100644 --- a/acceptance/tests/config-entries/main_test.go +++ b/acceptance/tests/controller/main_test.go @@ -1,4 +1,4 @@ -package config_entries +package controller import ( "os" diff --git a/acceptance/tests/fixtures/bases/crds-oss/proxydefaults.yaml b/acceptance/tests/fixtures/bases/crds-oss/proxydefaults.yaml index 6a90f29506..040da247f8 100644 --- a/acceptance/tests/fixtures/bases/crds-oss/proxydefaults.yaml +++ b/acceptance/tests/fixtures/bases/crds-oss/proxydefaults.yaml @@ -19,14 +19,3 @@ spec: - path: /health listenerPort: 22000 localPathPort: 8080 - envoyExtensions: - - name: builtin/aws/lambda - required: false - arguments: - payloadPassthrough: false - arn: arn:aws:lambda:us-west-2:111111111111:function:lambda-1234 - - name: builtin/aws/lambda - required: false - arguments: - payloadPassthrough: false - arn: arn:aws:lambda:us-east-1:111111111111:function:lambda-1234 diff --git a/acceptance/tests/fixtures/bases/crds-oss/servicedefaults.yaml b/acceptance/tests/fixtures/bases/crds-oss/servicedefaults.yaml index 413eb68d22..7b5f23eff5 100644 --- a/acceptance/tests/fixtures/bases/crds-oss/servicedefaults.yaml +++ b/acceptance/tests/fixtures/bases/crds-oss/servicedefaults.yaml @@ -24,16 +24,4 @@ spec: maxConnections: 5 passiveHealthCheck: interval: 10s - maxFailures: 2 - balanceInboundConnections: "exact_balance" - envoyExtensions: - - name: builtin/aws/lambda - required: false - arguments: - payloadPassthrough: false - arn: arn:aws:lambda:us-west-2:111111111111:function:lambda-1234 - - name: builtin/aws/lambda - required: false - arguments: - payloadPassthrough: false - arn: arn:aws:lambda:us-east-1:111111111111:function:lambda-1234 \ No newline at end of file + maxFailures: 2 \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/mesh-peering/kustomization.yaml b/acceptance/tests/fixtures/bases/mesh-peering/kustomization.yaml deleted file mode 100644 index b48237763e..0000000000 --- a/acceptance/tests/fixtures/bases/mesh-peering/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: - - meshpeering.yaml diff --git a/acceptance/tests/fixtures/bases/mesh-peering/meshpeering.yaml b/acceptance/tests/fixtures/bases/mesh-peering/meshpeering.yaml deleted file mode 100644 index de84382d3e..0000000000 --- a/acceptance/tests/fixtures/bases/mesh-peering/meshpeering.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: consul.hashicorp.com/v1alpha1 -kind: Mesh -metadata: - name: mesh -spec: - peering: - peerThroughMeshGateways: true diff --git a/acceptance/tests/fixtures/bases/static-client/deployment.yaml b/acceptance/tests/fixtures/bases/static-client/deployment.yaml index 66bb771f6f..620b975d2e 100644 --- a/acceptance/tests/fixtures/bases/static-client/deployment.yaml +++ b/acceptance/tests/fixtures/bases/static-client/deployment.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: static-client - image: docker.mirror.hashicorp.services/buildpack-deps:jammy-curl + image: docker.mirror.hashicorp.services/curlimages/curl:latest command: [ "/bin/sh", "-c", "--" ] args: [ "while true; do sleep 30; done;" ] env: diff --git a/acceptance/tests/fixtures/bases/static-metrics-app/deployment.yaml b/acceptance/tests/fixtures/bases/static-metrics-app/deployment.yaml index a3020ddb47..9283a8aae6 100644 --- a/acceptance/tests/fixtures/bases/static-metrics-app/deployment.yaml +++ b/acceptance/tests/fixtures/bases/static-metrics-app/deployment.yaml @@ -24,4 +24,4 @@ spec: - name: METRICS_ENABLE_PROMETHEUS value: "true" ports: - - containerPort: 9090 + - containerPort: 9090 \ No newline at end of file diff --git a/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go b/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go index b713620f1e..d1e5757f91 100644 --- a/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go +++ b/acceptance/tests/ingress-gateway/ingress_gateway_namespaces_test.go @@ -31,10 +31,10 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { secure bool }{ { - secure: false, + false, }, { - secure: true, + true, }, } for _, c := range cases { @@ -42,7 +42,8 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) - igName := "ingress-gateway" + // Install the Helm chart without the ingress gateway first + // so that we can create the namespace for it. helmValues := map[string]string{ "connectInject.enabled": "true", "connectInject.consulNamespaces.consulDestinationNamespace": testNamespace, @@ -50,11 +51,6 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { "global.enableConsulNamespaces": "true", "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), - - "ingressGateways.enabled": "true", - "ingressGateways.gateways[0].name": igName, - "ingressGateways.gateways[0].replicas": "1", - "ingressGateways.gateways[0].consulNamespace": testNamespace, } releaseName := helpers.RandomName() @@ -64,6 +60,25 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) + // Create the destination namespace in the non-secure case. + // In the secure installation, this namespace is created by the server-acl-init job. + if !c.secure { + logger.Logf(t, "creating the %s namespace in Consul", testNamespace) + _, _, err := consulClient.Namespaces().Create(&api.Namespace{ + Name: testNamespace, + }, nil) + require.NoError(t, err) + } + + igName := "ingress-gateway" + logger.Log(t, "upgrading with ingress gateways enabled") + consulCluster.Upgrade(t, map[string]string{ + "ingressGateways.enabled": "true", + "ingressGateways.gateways[0].name": igName, + "ingressGateways.gateways[0].replicas": "1", + "ingressGateways.gateways[0].consulNamespace": testNamespace, + }) + logger.Logf(t, "creating Kubernetes namespace %s", testNamespace) k8s.RunKubectl(t, ctx.KubectlOptions(t), "create", "ns", testNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { @@ -143,6 +158,9 @@ func TestIngressGatewaySingleNamespace(t *testing.T) { // Test we can connect through the ingress gateway when both // the ingress gateway and the connect service are in different namespaces. +// These tests currently only test non-secure and secure without auto-encrypt installations +// because in the case of namespaces there isn't a significant distinction in code between auto-encrypt +// and non-auto-encrypt secure installations, so testing just one is enough. func TestIngressGatewayNamespaceMirroring(t *testing.T) { cfg := suite.Config() if !cfg.EnableEnterprise { @@ -153,10 +171,10 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { secure bool }{ { - secure: false, + false, }, { - secure: true, + true, }, } for _, c := range cases { @@ -165,6 +183,8 @@ func TestIngressGatewayNamespaceMirroring(t *testing.T) { ctx := suite.Environment().DefaultContext(t) igName := "ingress" + // Install the Helm chart without the ingress gateway first + // so that we can create the namespace for it. helmValues := map[string]string{ "connectInject.enabled": "true", "connectInject.consulNamespaces.mirroringK8S": "true", diff --git a/acceptance/tests/ingress-gateway/ingress_gateway_test.go b/acceptance/tests/ingress-gateway/ingress_gateway_test.go index b6535439c1..a913d2c024 100644 --- a/acceptance/tests/ingress-gateway/ingress_gateway_test.go +++ b/acceptance/tests/ingress-gateway/ingress_gateway_test.go @@ -18,17 +18,24 @@ const StaticClientName = "static-client" // Test that ingress gateways work in a default installation and a secure installation. func TestIngressGateway(t *testing.T) { cases := []struct { - secure bool + secure bool + autoEncrypt bool }{ { - secure: false, + false, + false, }, { - secure: true, + true, + false, + }, + { + true, + true, }, } for _, c := range cases { - name := fmt.Sprintf("secure: %t", c.secure) + name := fmt.Sprintf("secure: %t; auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) cfg := suite.Config() @@ -41,6 +48,7 @@ func TestIngressGateway(t *testing.T) { "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.autoEncrypt": strconv.FormatBool(c.autoEncrypt), } releaseName := helpers.RandomName() diff --git a/acceptance/tests/wan-federation/main_test.go b/acceptance/tests/mesh-gateway/main_test.go similarity index 72% rename from acceptance/tests/wan-federation/main_test.go rename to acceptance/tests/mesh-gateway/main_test.go index 197a3181e8..fb8935441e 100644 --- a/acceptance/tests/wan-federation/main_test.go +++ b/acceptance/tests/mesh-gateway/main_test.go @@ -1,4 +1,4 @@ -package wanfederation +package meshgateway import ( "fmt" @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { if suite.Config().EnableMultiCluster { os.Exit(suite.Run()) } else { - fmt.Println("Skipping wan federation tests because -enable-multi-cluster is not set") + fmt.Println("Skipping mesh gateway tests because -enable-multi-cluster is not set") os.Exit(0) } } diff --git a/acceptance/tests/mesh-gateway/mesh_gateway_test.go b/acceptance/tests/mesh-gateway/mesh_gateway_test.go new file mode 100644 index 0000000000..557f5befc1 --- /dev/null +++ b/acceptance/tests/mesh-gateway/mesh_gateway_test.go @@ -0,0 +1,301 @@ +package meshgateway + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const StaticClientName = "static-client" + +// Test that Connect and wan federation over mesh gateways work in a default installation +// i.e. without ACLs because TLS is required for WAN federation over mesh gateways. +func TestMeshGatewayDefault(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + primaryContext := env.DefaultContext(t) + secondaryContext := env.Context(t, environment.SecondaryContextName) + + primaryHelmValues := map[string]string{ + "global.datacenter": "dc1", + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.federation.enabled": "true", + "global.federation.createFederationSecret": "true", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + } + + if cfg.UseKind { + primaryHelmValues["meshGateway.service.type"] = "NodePort" + primaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + releaseName := helpers.RandomName() + + // Install the primary consul cluster in the default kubernetes context + primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) + primaryConsulCluster.Create(t) + + // Get the federation secret from the primary cluster and apply it to secondary cluster + federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) + logger.Logf(t, "retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) + federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) + federationSecret.ResourceVersion = "" + require.NoError(t, err) + _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) + require.NoError(t, err) + + // Create secondary cluster + secondaryHelmValues := map[string]string{ + "global.datacenter": "dc2", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.tls.caCert.secretName": federationSecretName, + "global.tls.caCert.secretKey": "caCert", + "global.tls.caKey.secretName": federationSecretName, + "global.tls.caKey.secretKey": "caKey", + + "global.federation.enabled": "true", + + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": federationSecretName, + "server.extraVolumes[0].load": "true", + "server.extraVolumes[0].items[0].key": "serverConfigJSON", + "server.extraVolumes[0].items[0].path": "config.json", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + } + + if cfg.UseKind { + secondaryHelmValues["meshGateway.service.type"] = "NodePort" + secondaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + // Install the secondary consul cluster in the secondary kubernetes context + secondaryConsulCluster := consul.NewHelmCluster(t, secondaryHelmValues, secondaryContext, cfg, releaseName) + secondaryConsulCluster.Create(t) + + if cfg.UseKind { + // This is a temporary workaround that seems to fix mesh gateway tests on kind 1.22.x. + // TODO (ishustava): we need to investigate this further and remove once we've found the issue. + k8s.RunKubectl(t, primaryContext.KubectlOptions(t), "rollout", "restart", fmt.Sprintf("sts/%s-consul-server", releaseName)) + k8s.RunKubectl(t, primaryContext.KubectlOptions(t), "rollout", "status", fmt.Sprintf("sts/%s-consul-server", releaseName)) + } + + primaryClient, _ := primaryConsulCluster.SetupConsulClient(t, false) + secondaryClient, _ := secondaryConsulCluster.SetupConsulClient(t, false) + + // Verify federation between servers + logger.Log(t, "verifying federation was successful") + helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, false) + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + k8s.KubectlApplyK(t, primaryContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, primaryContext.KubectlOptions(t), kustomizeDir) + }) + + // Check that we can connect services over the mesh gateways + logger.Log(t, "creating static-server in dc2") + k8s.DeployKustomize(t, secondaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Log(t, "creating static-client in dc1") + k8s.DeployKustomize(t, primaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-multi-dc") + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, primaryContext.KubectlOptions(t), StaticClientName, "http://localhost:1234") +} + +// Test that Connect and wan federation over mesh gateways work in a secure installation, +// with ACLs and TLS with and without auto-encrypt enabled. +func TestMeshGatewaySecure(t *testing.T) { + cases := []struct { + name string + enableAutoEncrypt string + }{ + { + "with ACLs and TLS without auto-encrypt", + "false", + }, + { + "with ACLs and auto-encrypt", + "true", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + primaryContext := env.DefaultContext(t) + secondaryContext := env.Context(t, environment.SecondaryContextName) + + primaryHelmValues := map[string]string{ + "global.datacenter": "dc1", + "global.tls.enabled": "true", + "global.tls.enableAutoEncrypt": c.enableAutoEncrypt, + + "global.acls.manageSystemACLs": "true", + "global.acls.createReplicationToken": "true", + + "global.federation.enabled": "true", + "global.federation.createFederationSecret": "true", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + } + + if cfg.UseKind { + primaryHelmValues["meshGateway.service.type"] = "NodePort" + primaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + releaseName := helpers.RandomName() + + // Install the primary consul cluster in the default kubernetes context + primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) + primaryConsulCluster.Create(t) + + // Get the federation secret from the primary cluster and apply it to secondary cluster + federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) + logger.Logf(t, "retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) + federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) + require.NoError(t, err) + federationSecret.ResourceVersion = "" + _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) + require.NoError(t, err) + + var k8sAuthMethodHost string + // When running on kind, the kube API address in kubeconfig will have a localhost address + // which will not work from inside the container. That's why we need to use the endpoints address instead + // which will point the node IP. + if cfg.UseKind { + // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. + kubernetesEndpoint, err := secondaryContext.KubernetesClient(t).CoreV1().Endpoints("default").Get(context.Background(), "kubernetes", metav1.GetOptions{}) + require.NoError(t, err) + k8sAuthMethodHost = fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) + } else { + k8sAuthMethodHost = k8s.KubernetesAPIServerHostFromOptions(t, secondaryContext.KubectlOptions(t)) + } + + // Create secondary cluster + secondaryHelmValues := map[string]string{ + "global.datacenter": "dc2", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.tls.enableAutoEncrypt": c.enableAutoEncrypt, + "global.tls.caCert.secretName": federationSecretName, + "global.tls.caCert.secretKey": "caCert", + "global.tls.caKey.secretName": federationSecretName, + "global.tls.caKey.secretKey": "caKey", + + "global.acls.manageSystemACLs": "true", + "global.acls.replicationToken.secretName": federationSecretName, + "global.acls.replicationToken.secretKey": "replicationToken", + + "global.federation.enabled": "true", + "global.federation.k8sAuthMethodHost": k8sAuthMethodHost, + "global.federation.primaryDatacenter": "dc1", + + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": federationSecretName, + "server.extraVolumes[0].load": "true", + "server.extraVolumes[0].items[0].key": "serverConfigJSON", + "server.extraVolumes[0].items[0].path": "config.json", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + "controller.enabled": "true", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + } + + if cfg.UseKind { + secondaryHelmValues["meshGateway.service.type"] = "NodePort" + secondaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + // Install the secondary consul cluster in the secondary kubernetes context + secondaryConsulCluster := consul.NewHelmCluster(t, secondaryHelmValues, secondaryContext, cfg, releaseName) + secondaryConsulCluster.Create(t) + + if cfg.UseKind { + // This is a temporary workaround that seems to fix mesh gateway tests on kind 1.22.x. + // TODO (ishustava): we need to investigate this further and remove once we've found the issue. + k8s.RunKubectl(t, primaryContext.KubectlOptions(t), "rollout", "restart", fmt.Sprintf("sts/%s-consul-server", releaseName)) + k8s.RunKubectl(t, primaryContext.KubectlOptions(t), "rollout", "status", fmt.Sprintf("sts/%s-consul-server", releaseName)) + } + + primaryClient, _ := primaryConsulCluster.SetupConsulClient(t, true) + secondaryClient, _ := secondaryConsulCluster.SetupConsulClient(t, true) + + // Verify federation between servers + logger.Log(t, "verifying federation was successful") + helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, true) + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + k8s.KubectlApplyK(t, secondaryContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, secondaryContext.KubectlOptions(t), kustomizeDir) + }) + + // Check that we can connect services over the mesh gateways + logger.Log(t, "creating static-server in dc2") + k8s.DeployKustomize(t, secondaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Log(t, "creating static-client in dc1") + k8s.DeployKustomize(t, primaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-multi-dc") + + logger.Log(t, "creating intention") + _, _, err = primaryClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ + Kind: api.ServiceIntentions, + Name: "static-server", + Sources: []*api.SourceIntention{ + { + Name: StaticClientName, + Action: api.IntentionActionAllow, + }, + }, + }, nil) + require.NoError(t, err) + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, primaryContext.KubectlOptions(t), StaticClientName, "http://localhost:1234") + }) + } +} diff --git a/acceptance/tests/metrics/metrics_test.go b/acceptance/tests/metrics/metrics_test.go index 5d05e45aa4..16bc101125 100644 --- a/acceptance/tests/metrics/metrics_test.go +++ b/acceptance/tests/metrics/metrics_test.go @@ -31,12 +31,9 @@ func TestComponentMetrics(t *testing.T) { "global.datacenter": "dc1", "global.metrics.enabled": "true", "global.metrics.enableAgentMetrics": "true", - // Agents have been removed but there could potentially be customers that are still running them. We - // are using client.enabled to cover that scenario and to make sure agent metrics still works with - // consul-dataplane. - "client.enabled": "true", "connectInject.enabled": "true", + "controller.enabled": "true", "meshGateway.enabled": "true", "meshGateway.replicas": "1", @@ -80,13 +77,13 @@ func TestComponentMetrics(t *testing.T) { require.NoError(t, err) require.Contains(t, metricsOutput, `consul_acl_ResolveToken{quantile="0.5"}`) - logger.Log(t, "ingress gateway metrics") + // Ingress Gateway Metrics assertGatewayMetricsEnabled(t, ctx, ns, "ingress-gateway", `envoy_cluster_assignment_stale{local_cluster="ingress-gateway",consul_source_service="ingress-gateway"`) - logger.Log(t, "terminating gateway metrics") + // Terminating Gateway Metrics assertGatewayMetricsEnabled(t, ctx, ns, "terminating-gateway", `envoy_cluster_assignment_stale{local_cluster="terminating-gateway",consul_source_service="terminating-gateway"`) - logger.Log(t, "mesh gateway metrics") + // Mesh Gateway Metrics assertGatewayMetricsEnabled(t, ctx, ns, "mesh-gateway", `envoy_cluster_assignment_stale{local_cluster="mesh-gateway",consul_source_service="mesh-gateway"`) } @@ -99,8 +96,9 @@ func TestAppMetrics(t *testing.T) { ns := ctx.KubectlOptions(t).Namespace helmValues := map[string]string{ - "global.datacenter": "dc1", - "global.metrics.enabled": "true", + "global.datacenter": "dc1", + "global.metrics.enabled": "true", + "connectInject.enabled": "true", "connectInject.metrics.defaultEnableMerging": "true", } diff --git a/acceptance/tests/partitions/partitions_connect_test.go b/acceptance/tests/partitions/partitions_connect_test.go index f205ac67e2..e1a43850d6 100644 --- a/acceptance/tests/partitions/partitions_connect_test.go +++ b/acceptance/tests/partitions/partitions_connect_test.go @@ -23,7 +23,7 @@ const staticServerName = "static-server" const staticServerNamespace = "ns1" const StaticClientNamespace = "ns2" -// Test that Connect works in a default and ACLsEnabled installations for X-Partition and in-partition networking. +// Test that Connect works in a default and ACLsAndAutoEncryptEnabled installations for X-Partition and in-partition networking. func TestPartitions_Connect(t *testing.T) { env := suite.Environment() cfg := suite.Config() @@ -36,10 +36,10 @@ func TestPartitions_Connect(t *testing.T) { const secondaryPartition = "secondary" const defaultNamespace = "default" cases := []struct { - name string - destinationNamespace string - mirrorK8S bool - ACLsEnabled bool + name string + destinationNamespace string + mirrorK8S bool + ACLsAndAutoEncryptEnabled bool }{ { "default destination namespace", @@ -48,7 +48,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "default destination namespace; ACLs enabled", + "default destination namespace; ACLs and auto-encrypt enabled", defaultNamespace, false, true, @@ -60,7 +60,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "single destination namespace; ACLs enabled", + "single destination namespace; ACLs and auto-encrypt enabled", staticServerNamespace, false, true, @@ -72,7 +72,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "mirror k8s namespaces; ACLs enabled", + "mirror k8s namespaces; ACLs and auto-encrypt enabled", staticServerNamespace, true, true, @@ -81,18 +81,21 @@ func TestPartitions_Connect(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - defaultPartitionClusterContext := env.DefaultContext(t) - secondaryPartitionClusterContext := env.Context(t, environment.SecondaryContextName) + serverClusterContext := env.DefaultContext(t) + clientClusterContext := env.Context(t, environment.SecondaryContextName) + + ctx := context.Background() commonHelmValues := map[string]string{ "global.adminPartitions.enabled": "true", - "global.enableConsulNamespaces": "true", - "global.logLevel": "debug", - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), + "global.enableConsulNamespaces": "true", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), - "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsEnabled), + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), "connectInject.enabled": "true", // When mirroringK8S is set, this setting is ignored. @@ -102,50 +105,62 @@ func TestPartitions_Connect(t *testing.T) { "meshGateway.enabled": "true", "meshGateway.replicas": "1", + "controller.enabled": "true", + "dns.enabled": "true", "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), } - defaultPartitionHelmValues := make(map[string]string) + serverHelmValues := map[string]string{ + "server.exposeGossipAndRPCPorts": "true", + } // On Kind, there are no load balancers but since all clusters // share the same node network (docker bridge), we can use // a NodePort service so that we can access node(s) in a different Kind cluster. if cfg.UseKind { - defaultPartitionHelmValues["meshGateway.service.type"] = "NodePort" - defaultPartitionHelmValues["meshGateway.service.nodePort"] = "30200" // todo: do we need to set this port? - defaultPartitionHelmValues["server.exposeService.type"] = "NodePort" - defaultPartitionHelmValues["server.exposeService.nodePort.https"] = "30000" - defaultPartitionHelmValues["server.exposeService.nodePort.grpc"] = "30100" + serverHelmValues["global.adminPartitions.service.type"] = "NodePort" + serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" + serverHelmValues["meshGateway.service.type"] = "NodePort" + serverHelmValues["meshGateway.service.nodePort"] = "30100" + serverHelmValues["server.exposeService.type"] = "NodePort" } releaseName := helpers.RandomName() - helpers.MergeMaps(defaultPartitionHelmValues, commonHelmValues) + helpers.MergeMaps(serverHelmValues, commonHelmValues) // Install the consul cluster with servers in the default kubernetes context. - serverConsulCluster := consul.NewHelmCluster(t, defaultPartitionHelmValues, defaultPartitionClusterContext, cfg, releaseName) + serverConsulCluster := consul.NewHelmCluster(t, serverHelmValues, serverClusterContext, cfg, releaseName) serverConsulCluster.Create(t) // Get the TLS CA certificate and key secret from the server cluster and apply it to the client cluster. caCertSecretName := fmt.Sprintf("%s-consul-ca-cert", releaseName) + caKeySecretName := fmt.Sprintf("%s-consul-ca-key", releaseName) logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) - k8s.CopySecret(t, defaultPartitionClusterContext, secondaryPartitionClusterContext, caCertSecretName) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, caCertSecretName) + + if !c.ACLsAndAutoEncryptEnabled { + // When auto-encrypt is disabled, we need both + // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. + logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, caKeySecretName) + } partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) - k8s.CopySecret(t, defaultPartitionClusterContext, secondaryPartitionClusterContext, partitionToken) + k8s.CopySecret(t, serverClusterContext, clientClusterContext, partitionToken) } - partitionServiceName := fmt.Sprintf("%s-consul-expose-servers", releaseName) - partitionSvcAddress := k8s.ServiceHost(t, cfg, defaultPartitionClusterContext, partitionServiceName) + partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) + partitionSvcAddress := k8s.ServiceHost(t, cfg, serverClusterContext, partitionServiceName) - k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryPartitionClusterContext) + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, clientClusterContext) // Create client cluster. - secondaryPartitionHelmValues := map[string]string{ + clientHelmValues := map[string]string{ "global.enabled": "false", "global.adminPartitions.name": secondaryPartition, @@ -156,64 +171,80 @@ func TestPartitions_Connect(t *testing.T) { "externalServers.enabled": "true", "externalServers.hosts[0]": partitionSvcAddress, "externalServers.tlsServerName": "server.dc1.consul", + + "client.enabled": "true", + "client.exposeGossipPorts": "true", + "client.join[0]": partitionSvcAddress, } - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { // Setup partition token and auth method host if ACLs enabled. - secondaryPartitionHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken - secondaryPartitionHelmValues["global.acls.bootstrapToken.secretKey"] = "token" - secondaryPartitionHelmValues["externalServers.k8sAuthMethodHost"] = k8sAuthMethodHost + clientHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken + clientHelmValues["global.acls.bootstrapToken.secretKey"] = "token" + clientHelmValues["externalServers.k8sAuthMethodHost"] = k8sAuthMethodHost + } else { + // Provide CA key when auto-encrypt is disabled. + clientHelmValues["global.tls.caKey.secretName"] = caKeySecretName + clientHelmValues["global.tls.caKey.secretKey"] = "tls.key" } if cfg.UseKind { - secondaryPartitionHelmValues["externalServers.httpsPort"] = "30000" - secondaryPartitionHelmValues["externalServers.grpcPort"] = "30100" - secondaryPartitionHelmValues["meshGateway.service.type"] = "NodePort" - secondaryPartitionHelmValues["meshGateway.service.nodePort"] = "30200" + clientHelmValues["externalServers.httpsPort"] = "30000" + clientHelmValues["meshGateway.service.type"] = "NodePort" + clientHelmValues["meshGateway.service.nodePort"] = "30100" } - helpers.MergeMaps(secondaryPartitionHelmValues, commonHelmValues) + helpers.MergeMaps(clientHelmValues, commonHelmValues) // Install the consul cluster without servers in the client cluster kubernetes context. - clientConsulCluster := consul.NewHelmCluster(t, secondaryPartitionHelmValues, secondaryPartitionClusterContext, cfg, releaseName) + clientConsulCluster := consul.NewHelmCluster(t, clientHelmValues, clientClusterContext, cfg, releaseName) clientConsulCluster.Create(t) - defaultPartitionClusterStaticServerOpts := &terratestk8s.KubectlOptions{ - ContextName: defaultPartitionClusterContext.KubectlOptions(t).ContextName, - ConfigPath: defaultPartitionClusterContext.KubectlOptions(t).ConfigPath, + // Ensure consul clients are created. + agentPodList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(clientClusterContext.KubectlOptions(t).Namespace).List(ctx, metav1.ListOptions{LabelSelector: "app=consul,component=client"}) + require.NoError(t, err) + require.NotEmpty(t, agentPodList.Items) + + output, err := k8s.RunKubectlAndGetOutputE(t, clientClusterContext.KubectlOptions(t), "logs", agentPodList.Items[0].Name, "-n", clientClusterContext.KubectlOptions(t).Namespace) + require.NoError(t, err) + require.Contains(t, output, "Partition: 'secondary'") + + serverClusterStaticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: serverClusterContext.KubectlOptions(t).ContextName, + ConfigPath: serverClusterContext.KubectlOptions(t).ConfigPath, Namespace: staticServerNamespace, } - defaultPartitionClusterStaticClientOpts := &terratestk8s.KubectlOptions{ - ContextName: defaultPartitionClusterContext.KubectlOptions(t).ContextName, - ConfigPath: defaultPartitionClusterContext.KubectlOptions(t).ConfigPath, + serverClusterStaticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: serverClusterContext.KubectlOptions(t).ContextName, + ConfigPath: serverClusterContext.KubectlOptions(t).ConfigPath, Namespace: StaticClientNamespace, } - secondaryPartitionClusterStaticServerOpts := &terratestk8s.KubectlOptions{ - ContextName: secondaryPartitionClusterContext.KubectlOptions(t).ContextName, - ConfigPath: secondaryPartitionClusterContext.KubectlOptions(t).ConfigPath, + clientClusterStaticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: clientClusterContext.KubectlOptions(t).ContextName, + ConfigPath: clientClusterContext.KubectlOptions(t).ConfigPath, Namespace: staticServerNamespace, } - secondaryPartitionClusterStaticClientOpts := &terratestk8s.KubectlOptions{ - ContextName: secondaryPartitionClusterContext.KubectlOptions(t).ContextName, - ConfigPath: secondaryPartitionClusterContext.KubectlOptions(t).ConfigPath, + clientClusterStaticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: clientClusterContext.KubectlOptions(t).ContextName, + ConfigPath: clientClusterContext.KubectlOptions(t).ConfigPath, Namespace: StaticClientNamespace, } logger.Logf(t, "creating namespaces %s and %s in servers cluster", staticServerNamespace, StaticClientNamespace) - k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) - k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) + k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) }) logger.Logf(t, "creating namespaces %s and %s in clients cluster", staticServerNamespace, StaticClientNamespace) - k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) - k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) + k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) }) - consulClient, _ := serverConsulCluster.SetupConsulClient(t, c.ACLsEnabled) + consulClient, _ := serverConsulCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) serverQueryServerOpts := &api.QueryOptions{Namespace: staticServerNamespace, Partition: defaultPartition} clientQueryServerOpts := &api.QueryOptions{Namespace: StaticClientNamespace, Partition: defaultPartition} @@ -229,12 +260,12 @@ func TestPartitions_Connect(t *testing.T) { } // Check that the ACL token is deleted. - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { // We need to register the cleanup function before we create the deployments // because golang will execute them in reverse order i.e. the last registered // cleanup function will be executed first. t.Cleanup(func() { - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { retry.Run(t, func(r *retry.R) { tokens, _, err := consulClient.ACL().TokenList(serverQueryServerOpts) require.NoError(r, err) @@ -268,43 +299,43 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "creating proxy-defaults config") kustomizeDir := "../fixtures/bases/mesh-gateway" - k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), kustomizeDir) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), kustomizeDir) }) - k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), kustomizeDir) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), kustomizeDir) }) // This section of the tests runs the in-partition networking tests. t.Run("in-partition", func(t *testing.T) { logger.Log(t, "test in-partition networking") logger.Log(t, "creating static-server and static-client deployments in server cluster") - k8s.DeployKustomize(t, defaultPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, serverClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") } else { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") } } logger.Log(t, "creating static-server and static-client deployments in client cluster") - k8s.DeployKustomize(t, secondaryPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, clientClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") } else { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") } } // Check that both static-server and static-client have been injected and now have 2 containers in server cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := defaultPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := serverClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -314,7 +345,7 @@ func TestPartitions_Connect(t *testing.T) { // Check that both static-server and static-client have been injected and now have 2 containers in client cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := secondaryPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -346,14 +377,14 @@ func TestPartitions_Connect(t *testing.T) { require.NoError(t, err) require.Len(t, services, 1) - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Log(t, "checking that the connection is not successful because there's no intention") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } intention := &api.ServiceIntentionsConfigEntry{ @@ -391,18 +422,18 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is successful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } // Test that kubernetes readiness status is synced to Consul. // Create the file so that the readiness probe of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, defaultPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") - k8s.RunKubectl(t, secondaryPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, serverClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, clientClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. @@ -411,41 +442,41 @@ func TestPartitions_Connect(t *testing.T) { // from server, which is the case when a connection is unsuccessful due to intentions in other tests. logger.Log(t, "checking that connection is unsuccessful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") } }) // This section of the tests runs the cross-partition networking tests. t.Run("cross-partition", func(t *testing.T) { logger.Log(t, "test cross-partition networking") logger.Log(t, "creating static-server and static-client deployments in server cluster") - k8s.DeployKustomize(t, defaultPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, serverClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-partition") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-partition") } else { - k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-partition") + k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-partition") } } logger.Log(t, "creating static-server and static-client deployments in client cluster") - k8s.DeployKustomize(t, secondaryPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, clientClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-default-partition") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-default-partition") } else { - k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-default-partition") + k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-default-partition") } } // Check that both static-server and static-client have been injected and now have 2 containers in server cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := defaultPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := serverClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -455,7 +486,7 @@ func TestPartitions_Connect(t *testing.T) { // Check that both static-server and static-client have been injected and now have 2 containers in client cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := secondaryPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -492,34 +523,34 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "creating exported services") if c.destinationNamespace == defaultNamespace { - k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") + k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") + k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") + k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") + k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") }) } else { - k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") - k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") + k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") + k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") - k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") + k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") + k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") }) } - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Log(t, "checking that the connection is not successful because there's no intention") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } intention := &api.ServiceIntentionsConfigEntry{ @@ -560,22 +591,22 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is successful") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } // Test that kubernetes readiness status is synced to Consul. // Create the file so that the readiness probe of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, defaultPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") - k8s.RunKubectl(t, secondaryPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, serverClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, clientClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. @@ -585,15 +616,15 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is unsuccessful") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") } }) }) diff --git a/acceptance/tests/partitions/partitions_sync_test.go b/acceptance/tests/partitions/partitions_sync_test.go index e29ef18c78..59108b2d3a 100644 --- a/acceptance/tests/partitions/partitions_sync_test.go +++ b/acceptance/tests/partitions/partitions_sync_test.go @@ -1,6 +1,7 @@ package partitions import ( + "context" "fmt" "strconv" "testing" @@ -15,9 +16,10 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// Test that Sync Catalog works in a default and ACLsEnabled installations for partitions. +// Test that Sync Catalog works in a default and ACLsAndAutoEncryptEnabled installations for partitions. func TestPartitions_Sync(t *testing.T) { env := suite.Environment() cfg := suite.Config() @@ -33,10 +35,10 @@ func TestPartitions_Sync(t *testing.T) { const secondaryPartition = "secondary" const defaultNamespace = "default" cases := []struct { - name string - destinationNamespace string - mirrorK8S bool - ACLsEnabled bool + name string + destinationNamespace string + mirrorK8S bool + ACLsAndAutoEncryptEnabled bool }{ { "default destination namespace", @@ -81,14 +83,18 @@ func TestPartitions_Sync(t *testing.T) { primaryClusterContext := env.DefaultContext(t) secondaryClusterContext := env.Context(t, environment.SecondaryContextName) + ctx := context.Background() + commonHelmValues := map[string]string{ "global.adminPartitions.enabled": "true", - "global.enableConsulNamespaces": "true", - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), + "global.enableConsulNamespaces": "true", + + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), - "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsEnabled), + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), "syncCatalog.enabled": "true", // When mirroringK8S is set, this setting is ignored. @@ -108,8 +114,8 @@ func TestPartitions_Sync(t *testing.T) { // share the same node network (docker bridge), we can use // a NodePort service so that we can access node(s) in a different Kind cluster. if cfg.UseKind { - serverHelmValues["server.exposeService.type"] = "NodePort" - serverHelmValues["server.exposeService.nodePort.https"] = "30000" + serverHelmValues["global.adminPartitions.service.type"] = "NodePort" + serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" } releaseName := helpers.RandomName() @@ -127,7 +133,7 @@ func TestPartitions_Sync(t *testing.T) { logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, caCertSecretName) - if !c.ACLsEnabled { + if !c.ACLsAndAutoEncryptEnabled { // When auto-encrypt is disabled, we need both // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) @@ -135,12 +141,12 @@ func TestPartitions_Sync(t *testing.T) { } partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) k8s.CopySecret(t, primaryClusterContext, secondaryClusterContext, partitionToken) } - partitionServiceName := fmt.Sprintf("%s-consul-expose-servers", releaseName) + partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) partitionSvcAddress := k8s.ServiceHost(t, cfg, primaryClusterContext, partitionServiceName) k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryClusterContext) @@ -157,9 +163,13 @@ func TestPartitions_Sync(t *testing.T) { "externalServers.enabled": "true", "externalServers.hosts[0]": partitionSvcAddress, "externalServers.tlsServerName": "server.dc1.consul", + + "client.enabled": "true", + "client.exposeGossipPorts": "true", + "client.join[0]": partitionSvcAddress, } - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { // Setup partition token and auth method host if ACLs enabled. clientHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken clientHelmValues["global.acls.bootstrapToken.secretKey"] = "token" @@ -180,6 +190,15 @@ func TestPartitions_Sync(t *testing.T) { secondaryConsulCluster := consul.NewHelmCluster(t, clientHelmValues, secondaryClusterContext, cfg, releaseName) secondaryConsulCluster.Create(t) + // Ensure consul clients are created. + agentPodList, err := secondaryClusterContext.KubernetesClient(t).CoreV1().Pods(secondaryClusterContext.KubectlOptions(t).Namespace).List(ctx, metav1.ListOptions{LabelSelector: "app=consul,component=client"}) + require.NoError(t, err) + require.NotEmpty(t, agentPodList.Items) + + output, err := k8s.RunKubectlAndGetOutputE(t, secondaryClusterContext.KubectlOptions(t), "logs", agentPodList.Items[0].Name, "-n", secondaryClusterContext.KubectlOptions(t).Namespace) + require.NoError(t, err) + require.Contains(t, output, "Partition: 'secondary'") + primaryStaticServerOpts := &terratestk8s.KubectlOptions{ ContextName: primaryClusterContext.KubectlOptions(t).ContextName, ConfigPath: primaryClusterContext.KubectlOptions(t).ConfigPath, @@ -203,7 +222,7 @@ func TestPartitions_Sync(t *testing.T) { k8s.RunKubectl(t, secondaryClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace) }) - consulClient, _ := primaryConsulCluster.SetupConsulClient(t, c.ACLsEnabled) + consulClient, _ := primaryConsulCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) defaultPartitionQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace, Partition: defaultPartition} secondaryPartitionQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace, Partition: secondaryPartition} @@ -214,12 +233,12 @@ func TestPartitions_Sync(t *testing.T) { } // Check that the ACL token is deleted. - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { // We need to register the cleanup function before we create the deployments // because golang will execute them in reverse order i.e. the last registered // cleanup function will be executed first. t.Cleanup(func() { - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { retry.Run(t, func(r *retry.R) { tokens, _, err := consulClient.ACL().TokenList(defaultPartitionQueryOpts) require.NoError(r, err) @@ -273,6 +292,7 @@ func TestPartitions_Sync(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(service)) require.Equal(t, []string{"k8s"}, service[0].ServiceTags) + }) } } diff --git a/acceptance/tests/peering/main_test.go b/acceptance/tests/peering/main_test.go index 12bb35afd5..2a5bf4b448 100644 --- a/acceptance/tests/peering/main_test.go +++ b/acceptance/tests/peering/main_test.go @@ -10,13 +10,18 @@ import ( var suite testsuite.Suite +// TestMain for peering is DISABLED for 0.49. func TestMain(m *testing.M) { - suite = testsuite.NewSuite(m) - if suite.Config().EnableMultiCluster && !suite.Config().DisablePeering { - os.Exit(suite.Run()) - } else { - fmt.Println("Skipping peering tests because either -enable-multi-cluster is not set or -disable-peering is set") - os.Exit(0) - } + fmt.Println("Skipping peering tests because this is a beta feature and not fully supported") + os.Exit(0) + + //suite = testsuite.NewSuite(m) + // + //if suite.Config().EnableMultiCluster && !suite.Config().DisablePeering { + // os.Exit(suite.Run()) + //} else { + // fmt.Println("Skipping peering tests because either -enable-multi-cluster is not set or -disable-peering is set") + // os.Exit(0) + //} } diff --git a/acceptance/tests/peering/peering_connect_namespaces_test.go b/acceptance/tests/peering/peering_connect_namespaces_test.go index 3e243b4781..05915eb507 100644 --- a/acceptance/tests/peering/peering_connect_namespaces_test.go +++ b/acceptance/tests/peering/peering_connect_namespaces_test.go @@ -44,10 +44,10 @@ func TestPeering_ConnectNamespaces(t *testing.T) { const staticClientPeer = "client" const defaultNamespace = "default" cases := []struct { - name string - destinationNamespace string - mirrorK8S bool - ACLsEnabled bool + name string + destinationNamespace string + mirrorK8S bool + ACLsAndAutoEncryptEnabled bool }{ { "default destination namespace", @@ -96,10 +96,11 @@ func TestPeering_ConnectNamespaces(t *testing.T) { "global.peering.enabled": "true", "global.enableConsulNamespaces": "true", - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), - "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsEnabled), + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), "connectInject.enabled": "true", @@ -110,6 +111,8 @@ func TestPeering_ConnectNamespaces(t *testing.T) { "meshGateway.enabled": "true", "meshGateway.replicas": "1", + "controller.enabled": "true", + "dns.enabled": "true", "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), } @@ -129,6 +132,8 @@ func TestPeering_ConnectNamespaces(t *testing.T) { staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticServerPeerHelmValues["server.exposeService.type"] = "NodePort" + staticServerPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" } releaseName := helpers.RandomName() @@ -151,6 +156,8 @@ func TestPeering_ConnectNamespaces(t *testing.T) { staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticClientPeerHelmValues["server.exposeService.type"] = "NodePort" + staticClientPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" } helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) @@ -159,41 +166,6 @@ func TestPeering_ConnectNamespaces(t *testing.T) { staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) staticClientPeerCluster.Create(t) - // Create Mesh resource to use mesh gateways. - logger.Log(t, "creating mesh config") - kustomizeMeshDir := "../fixtures/bases/mesh-peering" - - k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - }) - - k8s.KubectlApplyK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - }) - - staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsEnabled) - staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsEnabled) - - // Ensure mesh config entries are created in Consul. - timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} - retry.RunWith(timer, t, func(r *retry.R) { - ceServer, _, err := staticServerPeerClient.ConfigEntries().Get(api.MeshConfig, "mesh", &api.QueryOptions{}) - require.NoError(r, err) - configEntryServer, ok := ceServer.(*api.MeshConfigEntry) - require.True(r, ok) - require.Equal(r, configEntryServer.GetName(), "mesh") - require.NoError(r, err) - - ceClient, _, err := staticClientPeerClient.ConfigEntries().Get(api.MeshConfig, "mesh", &api.QueryOptions{}) - require.NoError(r, err) - configEntryClient, ok := ceClient.(*api.MeshConfigEntry) - require.True(r, ok) - require.Equal(r, configEntryClient.GetName(), "mesh") - require.NoError(r, err) - }) - // Create the peering acceptor on the client peer. k8s.KubectlApply(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { @@ -201,6 +173,7 @@ func TestPeering_ConnectNamespaces(t *testing.T) { }) // Ensure the secret is created. + timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} retry.RunWith(timer, t, func(r *retry.R) { acceptorSecretName, err := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "get", "peeringacceptor", "server", "-o", "jsonpath={.status.secret.name}") require.NoError(r, err) @@ -240,6 +213,9 @@ func TestPeering_ConnectNamespaces(t *testing.T) { k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) }) + staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + serverQueryOpts := &api.QueryOptions{Namespace: staticServerNamespace} clientQueryOpts := &api.QueryOptions{Namespace: staticClientNamespace} @@ -320,12 +296,10 @@ func TestPeering_ConnectNamespaces(t *testing.T) { }) } - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Log(t, "checking that the connection is not successful because there's no allow intention") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, staticClientName, false, - []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", fmt.Sprintf("curl: (6) Could not resolve host: static-server.virtual.%s.%s.consul", c.destinationNamespace, staticServerPeer)}, - "", fmt.Sprintf("http://static-server.virtual.%s.%s.consul", c.destinationNamespace, staticServerPeer)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, staticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", fmt.Sprintf("curl: (7) Failed to connect to static-server.%s port 80: Connection refused", c.destinationNamespace)}, "", fmt.Sprintf("http://static-server.virtual.%s.%s.consul", c.destinationNamespace, staticServerPeer)) } else { k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, staticClientName, "http://localhost:1234") } diff --git a/acceptance/tests/peering/peering_connect_test.go b/acceptance/tests/peering/peering_connect_test.go index 0761854497..6a884b2efa 100644 --- a/acceptance/tests/peering/peering_connect_test.go +++ b/acceptance/tests/peering/peering_connect_test.go @@ -34,8 +34,8 @@ func TestPeering_Connect(t *testing.T) { const staticServerPeer = "server" const staticClientPeer = "client" cases := []struct { - name string - ACLsEnabled bool + name string + ACLsAndAutoEncryptEnabled bool }{ { "default installation", @@ -55,16 +55,19 @@ func TestPeering_Connect(t *testing.T) { commonHelmValues := map[string]string{ "global.peering.enabled": "true", - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), + "global.tls.enableAutoEncrypt": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), - "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsEnabled), + "global.acls.manageSystemACLs": strconv.FormatBool(c.ACLsAndAutoEncryptEnabled), "connectInject.enabled": "true", "meshGateway.enabled": "true", "meshGateway.replicas": "1", + "controller.enabled": "true", + "dns.enabled": "true", "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), } @@ -84,6 +87,8 @@ func TestPeering_Connect(t *testing.T) { staticServerPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticServerPeerHelmValues["meshGateway.service.type"] = "NodePort" staticServerPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticServerPeerHelmValues["server.exposeService.type"] = "NodePort" + staticServerPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" } releaseName := helpers.RandomName() @@ -99,13 +104,15 @@ func TestPeering_Connect(t *testing.T) { } if !cfg.UseKind { - staticClientPeerHelmValues["server.replicas"] = "3" + staticServerPeerHelmValues["server.replicas"] = "3" } if cfg.UseKind { staticClientPeerHelmValues["server.exposeGossipAndRPCPorts"] = "true" staticClientPeerHelmValues["meshGateway.service.type"] = "NodePort" staticClientPeerHelmValues["meshGateway.service.nodePort"] = "30100" + staticClientPeerHelmValues["server.exposeService.type"] = "NodePort" + staticClientPeerHelmValues["server.exposeService.nodePort.grpc"] = "30200" } helpers.MergeMaps(staticClientPeerHelmValues, commonHelmValues) @@ -114,41 +121,6 @@ func TestPeering_Connect(t *testing.T) { staticClientPeerCluster := consul.NewHelmCluster(t, staticClientPeerHelmValues, staticClientPeerClusterContext, cfg, releaseName) staticClientPeerCluster.Create(t) - // Create Mesh resource to use mesh gateways. - logger.Log(t, "creating mesh config") - kustomizeMeshDir := "../fixtures/bases/mesh-peering" - - k8s.KubectlApplyK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - }) - - k8s.KubectlApplyK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, staticClientPeerClusterContext.KubectlOptions(t), kustomizeMeshDir) - }) - - staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsEnabled) - staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsEnabled) - - // Ensure mesh config entries are created in Consul. - timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} - retry.RunWith(timer, t, func(r *retry.R) { - ceServer, _, err := staticServerPeerClient.ConfigEntries().Get(api.MeshConfig, "mesh", &api.QueryOptions{}) - require.NoError(r, err) - configEntryServer, ok := ceServer.(*api.MeshConfigEntry) - require.True(r, ok) - require.Equal(r, configEntryServer.GetName(), "mesh") - require.NoError(r, err) - - ceClient, _, err := staticClientPeerClient.ConfigEntries().Get(api.MeshConfig, "mesh", &api.QueryOptions{}) - require.NoError(r, err) - configEntryClient, ok := ceClient.(*api.MeshConfigEntry) - require.True(r, ok) - require.Equal(r, configEntryClient.GetName(), "mesh") - require.NoError(r, err) - }) - // Create the peering acceptor on the client peer. k8s.KubectlApply(t, staticClientPeerClusterContext.KubectlOptions(t), "../fixtures/bases/peering/peering-acceptor.yaml") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { @@ -156,6 +128,7 @@ func TestPeering_Connect(t *testing.T) { }) // Ensure the secret is created. + timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} retry.RunWith(timer, t, func(r *retry.R) { acceptorSecretName, err := k8s.RunKubectlAndGetOutputE(t, staticClientPeerClusterContext.KubectlOptions(t), "get", "peeringacceptor", "server", "-o", "jsonpath={.status.secret.name}") require.NoError(r, err) @@ -195,6 +168,9 @@ func TestPeering_Connect(t *testing.T) { k8s.RunKubectl(t, staticClientPeerClusterContext.KubectlOptions(t), "delete", "ns", staticClientNamespace) }) + staticServerPeerClient, _ := staticServerPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + staticClientPeerClient, _ := staticClientPeerCluster.SetupConsulClient(t, c.ACLsAndAutoEncryptEnabled) + // Create a ProxyDefaults resource to configure services to use the mesh // gateways. logger.Log(t, "creating proxy-defaults config") @@ -251,12 +227,10 @@ func TestPeering_Connect(t *testing.T) { k8s.KubectlDeleteK(t, staticServerPeerClusterContext.KubectlOptions(t), "../fixtures/cases/crd-peers/default") }) - if c.ACLsEnabled { + if c.ACLsAndAutoEncryptEnabled { logger.Log(t, "checking that the connection is not successful because there's no allow intention") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, staticClientName, false, - []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", fmt.Sprintf("curl: (6) Could not resolve host: static-server.virtual.%s.consul", staticServerPeer)}, - "", fmt.Sprintf("http://static-server.virtual.%s.consul", staticServerPeer)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, staticClientOpts, staticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.consul", staticServerPeer)) } else { k8s.CheckStaticServerConnectionFailing(t, staticClientOpts, staticClientName, "http://localhost:1234") } @@ -284,7 +258,6 @@ func TestPeering_Connect(t *testing.T) { } else { k8s.CheckStaticServerConnectionSuccessful(t, staticClientOpts, staticClientName, "http://localhost:1234") } - }) } } diff --git a/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go b/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go index a4c1ef7494..531e7a5274 100644 --- a/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go +++ b/acceptance/tests/snapshot-agent/snapshot_agent_k8s_secret_test.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "strconv" + "strings" "testing" "time" @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -31,75 +31,83 @@ func TestSnapshotAgent_K8sSecret(t *testing.T) { if cfg.EnableCNI { t.Skipf("skipping because -enable-cni is set and snapshot agent is already tested with regular tproxy") } + ctx := suite.Environment().DefaultContext(t) + kubectlOptions := ctx.KubectlOptions(t) + ns := kubectlOptions.Namespace + releaseName := helpers.RandomName() - cases := map[string]struct { - secure bool - }{ - "non-secure": {secure: false}, - "secure": {secure: true}, - } + // Generate a bootstrap token + bootstrapToken, err := uuid.GenerateUUID() + require.NoError(t, err) - for name, c := range cases { - t.Run(name, func(t *testing.T) { - ctx := suite.Environment().DefaultContext(t) - kubectlOptions := ctx.KubectlOptions(t) - ns := kubectlOptions.Namespace - releaseName := helpers.RandomName() + bsSecretName := fmt.Sprintf("%s-acl-bootstrap-token", releaseName) + bsSecretKey := "token" + saSecretName := fmt.Sprintf("%s-snapshot-agent-config", releaseName) + saSecretKey := "token" - saSecretName := fmt.Sprintf("%s-snapshot-agent-config", releaseName) - saSecretKey := "config" + // Create cluster + helmValues := map[string]string{ + "global.tls.enabled": "true", + "global.gossipEncryption.autoGenerate": "true", + "global.acls.manageSystemACLs": "true", + "global.acls.bootstrapToken.secretName": bsSecretName, + "global.acls.bootstrapToken.secretKey": bsSecretKey, + "client.snapshotAgent.enabled": "true", + "client.snapshotAgent.configSecret.secretName": saSecretName, + "client.snapshotAgent.configSecret.secretKey": saSecretKey, + } - // Create cluster - helmValues := map[string]string{ - "global.tls.enabled": strconv.FormatBool(c.secure), - "global.gossipEncryption.autoGenerate": strconv.FormatBool(c.secure), - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - "server.snapshotAgent.enabled": "true", - "server.snapshotAgent.configSecret.secretName": saSecretName, - "server.snapshotAgent.configSecret.secretKey": saSecretKey, - "connectInject.enabled": "false", - } + // Get new cluster + consulCluster := consul.NewHelmCluster(t, helmValues, suite.Environment().DefaultContext(t), cfg, releaseName) + client := environment.KubernetesClientFromOptions(t, kubectlOptions) - // Get new cluster - consulCluster := consul.NewHelmCluster(t, helmValues, suite.Environment().DefaultContext(t), cfg, releaseName) - client := environment.KubernetesClientFromOptions(t, kubectlOptions) + // Add bootstrap token secret + logger.Log(t, "Storing bootstrap token as a k8s secret") + consul.CreateK8sSecret(t, client, cfg, ns, bsSecretName, bsSecretKey, bootstrapToken) - // Add snapshot agent config secret - logger.Log(t, "Storing snapshot agent config as a k8s secret") - config := generateSnapshotAgentConfig(t) - logger.Logf(t, "Snapshot agent config: %s", config) - consul.CreateK8sSecret(t, client, cfg, ns, saSecretName, saSecretKey, config) + // Add snapshot agent config secret + logger.Log(t, "Storing snapshot agent config as a k8s secret") + config := generateSnapshotAgentConfig(t, bootstrapToken) + logger.Logf(t, "Snapshot agent config: %s", config) + consul.CreateK8sSecret(t, client, cfg, ns, saSecretName, saSecretKey, config) - // Create cluster - consulCluster.Create(t) - // ---------------------------------- + // Create cluster + consulCluster.Create(t) + // ---------------------------------- - // Validate that consul snapshot agent is running correctly and is generating snapshot files - logger.Log(t, "Confirming that Consul Snapshot Agent is generating snapshot files") - // Create k8s client from kubectl options. + // Validate that consul snapshot agent is running correctly and is generating snapshot files + logger.Log(t, "Confirming that Consul Snapshot Agent is generating snapshot files") + // Create k8s client from kubectl options. + + podList, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), + metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=client-snapshot-agent,release=%s", releaseName)}) + require.NoError(t, err) + require.True(t, len(podList.Items) > 0) - podList, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), - metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=server,release=%s", releaseName)}) - require.NoError(t, err) - require.Len(t, podList.Items, 1, "expected to find only 1 consul server instance") + // Wait for 10seconds to allow snapsot to write. + time.Sleep(10 * time.Second) - // We need to give some extra time for ACLs to finish bootstrapping and for servers to come up. - timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} - retry.RunWith(timer, t, func(r *retry.R) { - // Loop through snapshot agents. Only one will be the leader and have the snapshot files. - pod := podList.Items[0] - snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/tmp") - require.NoError(r, err) - logger.Logf(t, "Snapshot: \n%s", snapshotFileListOutput) - require.Contains(r, snapshotFileListOutput, ".snap", "Agent pod does not contain snapshot files") - }) - }) + // Loop through snapshot agents. Only one will be the leader and have the snapshot files. + hasSnapshots := false + for _, pod := range podList.Items { + snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/") + logger.Logf(t, "Snapshot: \n%s", snapshotFileListOutput) + require.NoError(t, err) + if strings.Contains(snapshotFileListOutput, ".snap") { + logger.Logf(t, "Agent pod contains snapshot files") + hasSnapshots = true + break + } else { + logger.Logf(t, "Agent pod does not contain snapshot files") + } } + require.True(t, hasSnapshots, ".snap") } -func generateSnapshotAgentConfig(t *testing.T) string { +func generateSnapshotAgentConfig(t *testing.T, token string) string { config := map[string]interface{}{ "snapshot_agent": map[string]interface{}{ + "token": token, "log": map[string]interface{}{ "level": "INFO", "enable_syslog": false, @@ -116,7 +124,7 @@ func generateSnapshotAgentConfig(t *testing.T) string { "local_scratch_path": "", }, "local_storage": map[string]interface{}{ - "path": "/tmp", + "path": ".", }, }, } diff --git a/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go b/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go index ee20ffd9d7..a0f3539592 100644 --- a/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go +++ b/acceptance/tests/snapshot-agent/snapshot_agent_vault_test.go @@ -3,6 +3,7 @@ package snapshotagent import ( "context" "fmt" + "strings" "testing" "time" @@ -13,7 +14,6 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul-k8s/acceptance/framework/vault" - "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-version" "github.com/stretchr/testify/require" @@ -54,6 +54,13 @@ func TestSnapshotAgent_Vault(t *testing.T) { // ------------------------- // PKI // ------------------------- + // Configure Service Mesh CA + connectCAPolicy := "connect-ca-dc1" + connectCARootPath := "connect_root" + connectCAIntermediatePath := "dc1/connect_inter" + // Configure Policy for Connect CA + vault.CreateConnectCARootAndIntermediatePKIPolicy(t, vaultClient, connectCAPolicy, connectCARootPath, connectCAIntermediatePath) + // Configure Server PKI serverPKIConfig := &vault.PKIAndAuthRoleConfiguration{ BaseURL: "pki", @@ -105,7 +112,7 @@ func TestSnapshotAgent_Vault(t *testing.T) { bootstrapTokenSecret.SaveSecretAndAddReadPolicy(t, vaultClient) // Snapshot Agent config - snapshotAgentConfig := generateSnapshotAgentConfig(t) + snapshotAgentConfig := generateSnapshotAgentConfig(t, bootstrapToken) require.NoError(t, err) snapshotAgentConfigSecret := &vault.KV2Secret{ Path: "consul/data/secret/snapshot-agent-config", @@ -118,7 +125,7 @@ func TestSnapshotAgent_Vault(t *testing.T) { // ------------------------- // Additional Auth Roles // ------------------------- - serverPolicies := fmt.Sprintf("%s,%s,%s,%s", gossipSecret.PolicyName, serverPKIConfig.PolicyName, bootstrapTokenSecret.PolicyName, snapshotAgentConfigSecret.PolicyName) + serverPolicies := fmt.Sprintf("%s,%s,%s,%s", gossipSecret.PolicyName, connectCAPolicy, serverPKIConfig.PolicyName, bootstrapTokenSecret.PolicyName) if cfg.EnableEnterprise { serverPolicies += fmt.Sprintf(",%s", licenseSecret.PolicyName) } @@ -134,6 +141,18 @@ func TestSnapshotAgent_Vault(t *testing.T) { } srvAuthRoleConfig.ConfigureK8SAuthRole(t, vaultClient) + // client + consulClientRole := "client" + consulClientServiceAccountName := fmt.Sprintf("%s-consul-%s", consulReleaseName, "client") + clientAuthRoleConfig := &vault.KubernetesAuthRoleConfiguration{ + ServiceAccountName: consulClientServiceAccountName, + KubernetesNamespace: ns, + AuthMethodPath: "kubernetes", + RoleName: consulClientRole, + PolicyNames: gossipSecret.PolicyName, + } + clientAuthRoleConfig.ConfigureK8SAuthRole(t, vaultClient) + // manageSystemACLs manageSystemACLsRole := "server-acl-init" manageSystemACLsServiceAccountName := fmt.Sprintf("%s-consul-%s", consulReleaseName, "server-acl-init") @@ -156,6 +175,18 @@ func TestSnapshotAgent_Vault(t *testing.T) { } srvCAAuthRoleConfig.ConfigureK8SAuthRole(t, vaultClient) + // snapshot agent config + snapAgentRole := "snapshot-agent" + snapAgentServiceAccountName := fmt.Sprintf("%s-consul-%s", consulReleaseName, "snapshot-agent") + saAuthRoleConfig := &vault.KubernetesAuthRoleConfiguration{ + ServiceAccountName: snapAgentServiceAccountName, + KubernetesNamespace: ns, + AuthMethodPath: "kubernetes", + RoleName: snapAgentRole, + PolicyNames: fmt.Sprintf("%s,%s", licenseSecret.PolicyName, snapshotAgentConfigSecret.PolicyName), + } + saAuthRoleConfig.ConfigureK8SAuthRole(t, vaultClient) + vaultCASecret := vault.CASecretName(vaultReleaseName) consulHelmValues := map[string]string{ @@ -163,16 +194,23 @@ func TestSnapshotAgent_Vault(t *testing.T) { "server.extraVolumes[0].name": vaultCASecret, "server.extraVolumes[0].load": "false", - "connectInject.enabled": "false", + "connectInject.enabled": "true", "connectInject.replicas": "1", + "controller.enabled": "true", "global.secretsBackend.vault.enabled": "true", "global.secretsBackend.vault.consulServerRole": consulServerRole, + "global.secretsBackend.vault.consulClientRole": consulClientRole, + "global.secretsBackend.vault.consulCARole": serverPKIConfig.RoleName, "global.secretsBackend.vault.manageSystemACLsRole": manageSystemACLsRole, "global.secretsBackend.vault.ca.secretName": vaultCASecret, "global.secretsBackend.vault.ca.secretKey": "tls.crt", + "global.secretsBackend.vault.connectCA.address": vaultCluster.Address(), + "global.secretsBackend.vault.connectCA.rootPKIPath": connectCARootPath, + "global.secretsBackend.vault.connectCA.intermediatePKIPath": connectCAIntermediatePath, + "global.acls.manageSystemACLs": "true", "global.acls.bootstrapToken.secretName": bootstrapTokenSecret.Path, "global.acls.bootstrapToken.secretKey": bootstrapTokenSecret.Key, @@ -182,9 +220,10 @@ func TestSnapshotAgent_Vault(t *testing.T) { "global.tls.caCert.secretName": serverPKIConfig.CAPath, "global.tls.enableAutoEncrypt": "true", - "server.snapshotAgent.enabled": "true", - "server.snapshotAgent.configSecret.secretName": snapshotAgentConfigSecret.Path, - "server.snapshotAgent.configSecret.secretKey": snapshotAgentConfigSecret.Key, + "client.snapshotAgent.enabled": "true", + "client.snapshotAgent.configSecret.secretName": snapshotAgentConfigSecret.Path, + "client.snapshotAgent.configSecret.secretKey": snapshotAgentConfigSecret.Key, + "global.secretsBackend.vault.consulSnapshotAgentRole": snapAgentRole, } if cfg.EnableEnterprise { @@ -201,18 +240,26 @@ func TestSnapshotAgent_Vault(t *testing.T) { // Create k8s client from kubectl options. client := environment.KubernetesClientFromOptions(t, kubectlOptions) podList, err := client.CoreV1().Pods(kubectlOptions.Namespace).List(context.Background(), - metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=server,release=%s", consulReleaseName)}) + metav1.ListOptions{LabelSelector: fmt.Sprintf("app=consul,component=client-snapshot-agent,release=%s", consulReleaseName)}) require.NoError(t, err) - require.Len(t, podList.Items, 1, "expected to find only 1 consul server instance") - - // We need to give some extra time for ACLs to finish bootstrapping and for servers to come up. - timer := &retry.Timer{Timeout: 1 * time.Minute, Wait: 1 * time.Second} - retry.RunWith(timer, t, func(r *retry.R) { - // Loop through snapshot agents. Only one will be the leader and have the snapshot files. - pod := podList.Items[0] - snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/tmp") - require.NoError(r, err) + require.True(t, len(podList.Items) > 0) + + // Wait for 10 seconds to allow snapshot to write. + time.Sleep(10 * time.Second) + + // Loop through snapshot agents. Only one will be the leader and have the snapshot files. + hasSnapshots := false + for _, pod := range podList.Items { + snapshotFileListOutput, err := k8s.RunKubectlAndGetOutputWithLoggerE(t, kubectlOptions, terratestLogger.Discard, "exec", pod.Name, "-c", "consul-snapshot-agent", "--", "ls", "/") logger.Logf(t, "Snapshot: \n%s", snapshotFileListOutput) - require.Contains(r, snapshotFileListOutput, ".snap", "Agent pod does not contain snapshot files") - }) + require.NoError(t, err) + if strings.Contains(snapshotFileListOutput, ".snap") { + logger.Logf(t, "Agent pod contains snapshot files") + hasSnapshots = true + break + } else { + logger.Logf(t, "Agent pod does not contain snapshot files") + } + } + require.True(t, hasSnapshots) } diff --git a/acceptance/tests/sync/sync_catalog_test.go b/acceptance/tests/sync/sync_catalog_test.go index c4f873fcbd..b43ef66099 100644 --- a/acceptance/tests/sync/sync_catalog_test.go +++ b/acceptance/tests/sync/sync_catalog_test.go @@ -2,7 +2,6 @@ package sync import ( "fmt" - "strconv" "testing" "time" @@ -24,25 +23,45 @@ func TestSyncCatalog(t *testing.T) { if cfg.EnableCNI { t.Skipf("skipping because -enable-cni is set and sync catalog is already tested with regular tproxy") } - - cases := map[string]struct { - secure bool + cases := []struct { + name string + helmValues map[string]string + secure bool }{ - "non-secure": {secure: false}, - "secure": {secure: true}, + { + "Default installation", + map[string]string{ + "syncCatalog.enabled": "true", + }, + false, + }, + { + "Secure installation (with TLS and ACLs enabled)", + map[string]string{ + "syncCatalog.enabled": "true", + "global.tls.enabled": "true", + "global.acls.manageSystemACLs": "true", + }, + true, + }, + { + "Secure installation (with TLS with auto-encrypt and ACLs enabled)", + map[string]string{ + "syncCatalog.enabled": "true", + "global.tls.enabled": "true", + "global.tls.enableAutoEncrypt": "true", + "global.acls.manageSystemACLs": "true", + }, + true, + }, } - for name, c := range cases { - t.Run(name, func(t *testing.T) { + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) - helmValues := map[string]string{ - "syncCatalog.enabled": "true", - "global.tls.enabled": strconv.FormatBool(c.secure), - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - } releaseName := helpers.RandomName() - consulCluster := consul.NewHelmCluster(t, helmValues, ctx, suite.Config(), releaseName) + consulCluster := consul.NewHelmCluster(t, c.helmValues, ctx, suite.Config(), releaseName) consulCluster.Create(t) diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go index 4ff4ae7bd4..d18d971743 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_destinations_test.go @@ -2,6 +2,9 @@ package terminatinggateway import ( "fmt" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-version" + "github.com/stretchr/testify/require" "strconv" "testing" @@ -9,9 +12,6 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-version" - "github.com/stretchr/testify/require" ) // Test that egress Destinations route through terminating gateways. @@ -60,6 +60,7 @@ func TestTerminatingGatewayDestinations(t *testing.T) { "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.autoEncrypt": strconv.FormatBool(c.secure), } logger.Log(t, "creating consul cluster") diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go index 8c4435ae75..7168fdd4e8 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_namespaces_test.go @@ -10,12 +10,17 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" ) const testNamespace = "ns1" // Test we can connect through the terminating gateway when both // the terminating gateway and the connect service are in the same namespace. +// These tests currently only test non-secure and secure without auto-encrypt installations +// because in the case of namespaces there isn't a significant distinction in code between auto-encrypt +// and non-auto-encrypt secure installations, so testing just one is enough. func TestTerminatingGatewaySingleNamespace(t *testing.T) { cfg := suite.Config() if !cfg.EnableEnterprise { @@ -26,10 +31,10 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { secure bool }{ { - secure: false, + false, }, { - secure: true, + true, }, } for _, c := range cases { @@ -37,6 +42,8 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) + // Install the Helm chart without the terminating gateway first + // so that we can create the namespace for it. helmValues := map[string]string{ "connectInject.enabled": "true", "connectInject.consulNamespaces.consulDestinationNamespace": testNamespace, @@ -44,19 +51,33 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { "global.enableConsulNamespaces": "true", "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), - - "terminatingGateways.enabled": "true", - "terminatingGateways.gateways[0].name": "terminating-gateway", - "terminatingGateways.gateways[0].replicas": "1", - "terminatingGateways.gateways[0].consulNamespace": testNamespace, } releaseName := helpers.RandomName() consulCluster := consul.NewHelmCluster(t, helmValues, ctx, cfg, releaseName) + consulCluster.Create(t) consulClient, _ := consulCluster.SetupConsulClient(t, c.secure) + // Create the destination namespace in the non-secure case. + // In the secure installation, this namespace is created by the server-acl-init job. + if !c.secure { + logger.Logf(t, "creating the %s namespace in Consul", testNamespace) + _, _, err := consulClient.Namespaces().Create(&api.Namespace{ + Name: testNamespace, + }, nil) + require.NoError(t, err) + } + + logger.Log(t, "upgrading with terminating gateways enabled") + consulCluster.Upgrade(t, map[string]string{ + "terminatingGateways.enabled": "true", + "terminatingGateways.gateways[0].name": "terminating-gateway", + "terminatingGateways.gateways[0].replicas": "1", + "terminatingGateways.gateways[0].consulNamespace": testNamespace, + }) + logger.Logf(t, "creating Kubernetes namespace %s", testNamespace) k8s.RunKubectl(t, ctx.KubectlOptions(t), "create", "ns", testNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { @@ -111,6 +132,9 @@ func TestTerminatingGatewaySingleNamespace(t *testing.T) { // Test we can connect through the terminating gateway when the terminating gateway, // the external service, and the connect service are in different namespace. +// These tests currently only test non-secure and secure without auto-encrypt installations +// because in the case of namespaces there isn't a significant distinction in code between auto-encrypt +// and non-auto-encrypt secure installations, so testing just one is enough. func TestTerminatingGatewayNamespaceMirroring(t *testing.T) { cfg := suite.Config() if !cfg.EnableEnterprise { @@ -121,10 +145,10 @@ func TestTerminatingGatewayNamespaceMirroring(t *testing.T) { secure bool }{ { - secure: false, + false, }, { - secure: true, + true, }, } for _, c := range cases { diff --git a/acceptance/tests/terminating-gateway/terminating_gateway_test.go b/acceptance/tests/terminating-gateway/terminating_gateway_test.go index 16809de5e2..8facd30f53 100644 --- a/acceptance/tests/terminating-gateway/terminating_gateway_test.go +++ b/acceptance/tests/terminating-gateway/terminating_gateway_test.go @@ -16,17 +16,24 @@ import ( // Test that terminating gateways work in a default and secure installations. func TestTerminatingGateway(t *testing.T) { cases := []struct { - secure bool + secure bool + autoEncrypt bool }{ { - secure: false, + false, + false, }, { - secure: true, + true, + true, + }, + { + true, + true, }, } for _, c := range cases { - name := fmt.Sprintf("secure: %t", c.secure) + name := fmt.Sprintf("secure: %t, auto-encrypt: %t", c.secure, c.autoEncrypt) t.Run(name, func(t *testing.T) { ctx := suite.Environment().DefaultContext(t) cfg := suite.Config() @@ -39,6 +46,7 @@ func TestTerminatingGateway(t *testing.T) { "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), "global.tls.enabled": strconv.FormatBool(c.secure), + "global.tls.autoEncrypt": strconv.FormatBool(c.autoEncrypt), } logger.Log(t, "creating consul cluster") diff --git a/acceptance/tests/vault/vault_namespaces_test.go b/acceptance/tests/vault/vault_namespaces_test.go index 8d0beefdc0..82ed605a48 100644 --- a/acceptance/tests/vault/vault_namespaces_test.go +++ b/acceptance/tests/vault/vault_namespaces_test.go @@ -179,6 +179,7 @@ func TestVault_VaultNamespace(t *testing.T) { "connectInject.enabled": "true", "connectInject.replicas": "1", + "controller.enabled": "true", "global.secretsBackend.vault.enabled": "true", "global.secretsBackend.vault.consulServerRole": consulServerRole, diff --git a/acceptance/tests/vault/vault_partitions_test.go b/acceptance/tests/vault/vault_partitions_test.go index 0fff6726dc..5ff9ae7a6b 100644 --- a/acceptance/tests/vault/vault_partitions_test.go +++ b/acceptance/tests/vault/vault_partitions_test.go @@ -300,6 +300,7 @@ func TestVault_Partitions(t *testing.T) { "connectInject.enabled": "true", "connectInject.replicas": "1", + "controller.enabled": "true", "global.secretsBackend.vault.enabled": "true", "global.secretsBackend.vault.consulClientRole": consulClientRole, @@ -345,10 +346,11 @@ func TestVault_Partitions(t *testing.T) { // share the same node network (docker bridge), we can use // a NodePort service so that we can access node(s) in a different Kind cluster. if cfg.UseKind { + serverHelmValues["global.adminPartitions.service.type"] = "NodePort" + serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" serverHelmValues["meshGateway.service.type"] = "NodePort" serverHelmValues["meshGateway.service.nodePort"] = "30100" serverHelmValues["server.exposeService.type"] = "NodePort" - serverHelmValues["server.exposeService.nodePort.https"] = "30000" } helpers.MergeMaps(serverHelmValues, commonHelmValues) @@ -357,7 +359,7 @@ func TestVault_Partitions(t *testing.T) { consulCluster := consul.NewHelmCluster(t, serverHelmValues, serverClusterCtx, cfg, consulReleaseName) consulCluster.Create(t) - partitionServiceName := fmt.Sprintf("%s-consul-expose-servers", consulReleaseName) + partitionServiceName := fmt.Sprintf("%s-consul-partition", consulReleaseName) partitionSvcAddress := k8s.ServiceHost(t, cfg, serverClusterCtx, partitionServiceName) k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, clientClusterCtx) diff --git a/acceptance/tests/vault/vault_test.go b/acceptance/tests/vault/vault_test.go index cf0c926b22..49b1b59bf8 100644 --- a/acceptance/tests/vault/vault_test.go +++ b/acceptance/tests/vault/vault_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul-k8s/acceptance/framework/portforward" "github.com/hashicorp/consul-k8s/acceptance/framework/vault" "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-version" @@ -84,6 +83,20 @@ func TestVault(t *testing.T) { } serverPKIConfig.ConfigurePKIAndAuthRole(t, vaultClient) + // Configure controller webhook PKI + controllerWebhookPKIConfig := &vault.PKIAndAuthRoleConfiguration{ + BaseURL: "controller", + PolicyName: "controller-ca-policy", + RoleName: "controller-ca-role", + KubernetesNamespace: ns, + DataCenter: "dc1", + ServiceAccountName: fmt.Sprintf("%s-consul-%s", consulReleaseName, "controller"), + AllowedSubdomain: fmt.Sprintf("%s-consul-%s", consulReleaseName, "controller-webhook"), + MaxTTL: fmt.Sprintf("%ds", expirationInSeconds), + AuthMethodPath: KubernetesAuthMethodPath, + } + controllerWebhookPKIConfig.ConfigurePKIAndAuthRole(t, vaultClient) + // Configure connect injector webhook PKI connectInjectorWebhookPKIConfig := &vault.PKIAndAuthRoleConfiguration{ BaseURL: "connect", @@ -196,14 +209,18 @@ func TestVault(t *testing.T) { "connectInject.enabled": "true", "connectInject.replicas": "1", + "controller.enabled": "true", "global.secretsBackend.vault.connectInject.tlsCert.secretName": connectInjectorWebhookPKIConfig.CertPath, "global.secretsBackend.vault.connectInject.caCert.secretName": connectInjectorWebhookPKIConfig.CAPath, + "global.secretsBackend.vault.controller.tlsCert.secretName": controllerWebhookPKIConfig.CertPath, + "global.secretsBackend.vault.controller.caCert.secretName": controllerWebhookPKIConfig.CAPath, "global.secretsBackend.vault.enabled": "true", "global.secretsBackend.vault.consulServerRole": consulServerRole, "global.secretsBackend.vault.consulClientRole": consulClientRole, "global.secretsBackend.vault.consulCARole": serverPKIConfig.RoleName, "global.secretsBackend.vault.connectInjectRole": connectInjectorWebhookPKIConfig.RoleName, + "global.secretsBackend.vault.controllerRole": controllerWebhookPKIConfig.RoleName, "global.secretsBackend.vault.manageSystemACLsRole": manageSystemACLsRole, "global.secretsBackend.vault.ca.secretName": vaultCASecret, @@ -236,9 +253,6 @@ func TestVault(t *testing.T) { "syncCatalog.enabled": "true", "syncCatalog.toConsul": "false", "syncCatalog.toK8S": "false", - - // Enable clients to make sure vault integration still works. - "client.enabled": "true", } if cfg.EnableEnterprise { @@ -257,7 +271,7 @@ func TestVault(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, podList.Items) connectInjectorPodName := podList.Items[0].Name - connectInjectorPodAddress := portforward.CreateTunnelToResourcePort(t, connectInjectorPodName, 8080, kubectlOptions, terratestLogger.Discard) + connectInjectorPodAddress := consulCluster.CreatePortForwardTunnelToResourcePort(t, connectInjectorPodName, 8080) connectInjectorCert, err := getCertificate(t, connectInjectorPodAddress) require.NoError(t, err) logger.Logf(t, "Connect Inject Webhook Cert expiry: %s \n", connectInjectorCert.NotAfter.String()) diff --git a/acceptance/tests/vault/vault_tls_auto_reload_test.go b/acceptance/tests/vault/vault_tls_auto_reload_test.go index f079ee7492..6cbcb5d351 100644 --- a/acceptance/tests/vault/vault_tls_auto_reload_test.go +++ b/acceptance/tests/vault/vault_tls_auto_reload_test.go @@ -171,6 +171,7 @@ func TestVault_TLSAutoReload(t *testing.T) { "connectInject.enabled": "true", "connectInject.replicas": "1", + "controller.enabled": "true", "global.secretsBackend.vault.enabled": "true", "global.secretsBackend.vault.consulServerRole": consulServerRole, diff --git a/acceptance/tests/vault/vault_wan_fed_test.go b/acceptance/tests/vault/vault_wan_fed_test.go index 0a08810463..5e4a4acdc5 100644 --- a/acceptance/tests/vault/vault_wan_fed_test.go +++ b/acceptance/tests/vault/vault_wan_fed_test.go @@ -363,6 +363,7 @@ func TestVault_WANFederationViaGateways(t *testing.T) { // Mesh config. "connectInject.enabled": "true", + "controller.enabled": "true", "meshGateway.enabled": "true", "meshGateway.replicas": "1", diff --git a/acceptance/tests/wan-federation/wan_federation_test.go b/acceptance/tests/wan-federation/wan_federation_test.go deleted file mode 100644 index 1b23633077..0000000000 --- a/acceptance/tests/wan-federation/wan_federation_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package wanfederation - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/hashicorp/consul-k8s/acceptance/framework/consul" - "github.com/hashicorp/consul-k8s/acceptance/framework/environment" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" - "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" - "github.com/hashicorp/consul-k8s/acceptance/framework/logger" - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const StaticClientName = "static-client" - -// Test that Connect and wan federation over mesh gateways work in a default installation -// i.e. without ACLs because TLS is required for WAN federation over mesh gateways. -func TestWANFederation(t *testing.T) { - cases := []struct { - name string - secure bool - }{ - { - name: "secure", - secure: true, - }, - { - name: "default", - secure: false, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - - env := suite.Environment() - cfg := suite.Config() - - if cfg.UseKind { - t.Skipf("skipping wan federation tests as they currently fail on Kind even though they work on other clouds.") - } - - primaryContext := env.DefaultContext(t) - secondaryContext := env.Context(t, environment.SecondaryContextName) - - primaryHelmValues := map[string]string{ - "global.datacenter": "dc1", - - "global.tls.enabled": "true", - "global.tls.httpsOnly": strconv.FormatBool(c.secure), - - "global.federation.enabled": "true", - "global.federation.createFederationSecret": "true", - - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - "global.acls.createReplicationToken": strconv.FormatBool(c.secure), - - "connectInject.enabled": "true", - "connectInject.replicas": "1", - - "meshGateway.enabled": "true", - "meshGateway.replicas": "1", - } - - if cfg.UseKind { - primaryHelmValues["meshGateway.service.type"] = "NodePort" - primaryHelmValues["meshGateway.service.nodePort"] = "30000" - } - - releaseName := helpers.RandomName() - - // Install the primary consul cluster in the default kubernetes context - primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) - primaryConsulCluster.Create(t) - - // Get the federation secret from the primary cluster and apply it to secondary cluster - federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) - logger.Logf(t, "retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) - federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) - require.NoError(t, err) - federationSecret.ResourceVersion = "" - _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) - require.NoError(t, err) - - var k8sAuthMethodHost string - // When running on kind, the kube API address in kubeconfig will have a localhost address - // which will not work from inside the container. That's why we need to use the endpoints address instead - // which will point the node IP. - if cfg.UseKind { - // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. - kubernetesEndpoint, err := secondaryContext.KubernetesClient(t).CoreV1().Endpoints("default").Get(context.Background(), "kubernetes", metav1.GetOptions{}) - require.NoError(t, err) - k8sAuthMethodHost = fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) - } else { - k8sAuthMethodHost = k8s.KubernetesAPIServerHostFromOptions(t, secondaryContext.KubectlOptions(t)) - } - - // Create secondary cluster - secondaryHelmValues := map[string]string{ - "global.datacenter": "dc2", - - "global.tls.enabled": "true", - "global.tls.httpsOnly": "false", - "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), - "global.tls.caCert.secretName": federationSecretName, - "global.tls.caCert.secretKey": "caCert", - "global.tls.caKey.secretName": federationSecretName, - "global.tls.caKey.secretKey": "caKey", - - "global.federation.enabled": "true", - - "server.extraVolumes[0].type": "secret", - "server.extraVolumes[0].name": federationSecretName, - "server.extraVolumes[0].load": "true", - "server.extraVolumes[0].items[0].key": "serverConfigJSON", - "server.extraVolumes[0].items[0].path": "config.json", - - "connectInject.enabled": "true", - "connectInject.replicas": "1", - - "meshGateway.enabled": "true", - "meshGateway.replicas": "1", - } - - if c.secure { - secondaryHelmValues["global.acls.replicationToken.secretName"] = federationSecretName - secondaryHelmValues["global.acls.replicationToken.secretKey"] = "replicationToken" - secondaryHelmValues["global.federation.k8sAuthMethodHost"] = k8sAuthMethodHost - secondaryHelmValues["global.federation.primaryDatacenter"] = "dc1" - } - - if cfg.UseKind { - secondaryHelmValues["meshGateway.service.type"] = "NodePort" - secondaryHelmValues["meshGateway.service.nodePort"] = "30000" - } - - // Install the secondary consul cluster in the secondary kubernetes context - secondaryConsulCluster := consul.NewHelmCluster(t, secondaryHelmValues, secondaryContext, cfg, releaseName) - secondaryConsulCluster.Create(t) - - primaryClient, _ := primaryConsulCluster.SetupConsulClient(t, c.secure) - secondaryClient, _ := secondaryConsulCluster.SetupConsulClient(t, c.secure) - - // Verify federation between servers - logger.Log(t, "verifying federation was successful") - helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, c.secure) - - // Create a ProxyDefaults resource to configure services to use the mesh - // gateways. - logger.Log(t, "creating proxy-defaults config") - kustomizeDir := "../fixtures/bases/mesh-gateway" - k8s.KubectlApplyK(t, secondaryContext.KubectlOptions(t), kustomizeDir) - helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, secondaryContext.KubectlOptions(t), kustomizeDir) - }) - - // Check that we can connect services over the mesh gateways - logger.Log(t, "creating static-server in dc2") - k8s.DeployKustomize(t, secondaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") - - logger.Log(t, "creating static-client in dc1") - k8s.DeployKustomize(t, primaryContext.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-multi-dc") - - if c.secure { - logger.Log(t, "creating intention") - _, _, err = primaryClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ - Kind: api.ServiceIntentions, - Name: "static-server", - Sources: []*api.SourceIntention{ - { - Name: StaticClientName, - Action: api.IntentionActionAllow, - }, - }, - }, nil) - require.NoError(t, err) - } - - logger.Log(t, "checking that connection is successful") - k8s.CheckStaticServerConnectionSuccessful(t, primaryContext.KubectlOptions(t), StaticClientName, "http://localhost:1234") - }) - } -} diff --git a/charts/consul/Chart.yaml b/charts/consul/Chart.yaml index 41b95611e6..a6c19db870 100644 --- a/charts/consul/Chart.yaml +++ b/charts/consul/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: consul -version: 1.1.0-dev -appVersion: 1.14.4 +version: 0.49.5-dev +appVersion: 1.13.6 kubeVersion: ">=1.21.0-0" description: Official HashiCorp Consul Chart home: https://www.consul.io @@ -13,11 +13,9 @@ annotations: artifacthub.io/prerelease: true artifacthub.io/images: | - name: consul - image: hashicorp/consul:1.14.4 + image: hashicorp/consul:1.13.6 - name: consul-k8s-control-plane - image: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.1.0-dev - - name: consul-dataplane - image: hashicorp/consul-dataplane:1.0.1 + image: hashicorp/consul-k8s-control-plane:0.49.5-dev - name: envoy image: envoyproxy/envoy:v1.23.1 artifacthub.io/license: MPL-2.0 diff --git a/charts/consul/README.md b/charts/consul/README.md index e7d7fd9285..ccc695151d 100644 --- a/charts/consul/README.md +++ b/charts/consul/README.md @@ -3,6 +3,10 @@ --- **We're looking for feedback on how folks are using Consul on Kubernetes. Please fill out our brief [survey](https://hashicorp.sjc1.qualtrics.com/jfe/form/SV_4MANbw1BUku7YhL)!** + + + > **Warning** + > Please read the following issue to learn more about upcoming breaking changes that will be implemented by Q4 2022 for the default deployment of Consul on Kubernetes: [Enabling of service mesh by default and disabling of node-level client agents from Consul Service Mesh on Kubernetes and Catalog Sync](https://github.com/hashicorp/consul-k8s/issues/1438) ## Overview @@ -25,85 +29,40 @@ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). This enables Kubernetes to easily access external services and for non-Kubernetes nodes to easily discover and access Kubernetes services. -## Installation - -`consul-k8s` is distributed in multiple forms: - - * The recommended installation method is the official - [Consul Helm chart](https://github.com/hashicorp/consul-k8s/tree/main/charts/consul). This will - automatically configure the Consul and Kubernetes integration to run within - an existing Kubernetes cluster. - - * A [Docker image `hashicorp/consul-k8s-control-plane`](https://hub.docker.com/r/hashicorp/consul-k8s-control-plane) is available. This can be used to manually run `consul-k8s-control-plane` within a scheduled environment. - - * Consul K8s CLI, distributed as `consul-k8s`, can be used to install and uninstall Consul Kubernetes. See the [Consul K8s CLI Reference](https://www.consul.io/docs/k8s/k8s-cli) for more details on usage. - ### Prerequisites - -The following pre-requisites must be met before installing Consul on Kubernetes. - - * **Kubernetes 1.23.x - 1.26.x** - This represents the earliest versions of Kubernetes tested. - It is possible that this chart works with earlier versions, but it is + * **Helm 3.2+** (Helm 2 is not supported) + * **Kubernetes 1.21-1.24** - This is the earliest version of Kubernetes tested. + It is possible that this chart works with earlier versions but it is untested. - * Helm install - * **Helm 3.6+** for Helm based installs. - * Consul K8s CLI based install - * `kubectl` configured to authenticate to a Kubernetes cluster with a valid `kubeconfig` file. - * `brew`, `yum`, or `apt` package manager on your local machine - -### CLI - -The Consul K8s CLI is the easiest way to get up and running with Consul on Kubernetes. See [Install Consul on K8s CLI](https://developer.hashicorp.com/consul/docs/k8s/installation/install-cli#install-the-cli) for more details on installation, and refer to -[Consul on Kubernetes CLI Reference](https://developer.hashicorp.com/consul/docs/k8s/k8s-cli) for more details on subcommands and a list of all available flags -for each subcommand. - - 1. Install the HashiCorp tap, which is a repository of all Homebrew packages for HashiCorp: - - ``` bash - brew tap hashicorp/tap - ``` - -2. Install the Consul K8s CLI with hashicorp/tap/consul formula. - - ``` bash - brew install hashicorp/tap/consul-k8s - ``` - -3. Issue the install subcommand to install Consul on Kubernetes: - - ``` bash - consul-k8s install - ``` - -### Helm +### Usage -The Helm chart is ideal for those who prefer to use Helm for automation for either the installation or upgrade of Consul on Kubernetes. The chart supports multiple use cases of Consul on Kubernetes, depending on the values provided. Detailed installation instructions for Consul on Kubernetes are found [here](https://www.consul.io/docs/k8s/installation/overview). +Detailed installation instructions for Consul on Kubernetes are found [here](https://www.consul.io/docs/k8s/installation/overview). -1. Add the HashiCorp Helm repository: - +1. Add the HashiCorp Helm Repository: ``` bash - helm repo add hashicorp https://helm.releases.hashicorp.com + $ helm repo add hashicorp https://helm.releases.hashicorp.com ``` -2. Ensure you have access to the Consul Helm chart and you see the latest chart version listed. If you have previously added the - HashiCorp Helm repository, run `helm repo update`. +2. Ensure you have access to the Consul Helm chart and you see the latest chart version listed. + If you have previously added the HashiCorp Helm repository, run `helm repo update`. - ``` bash - helm search repo hashicorp/consul - ``` + ```bash + $ helm search repo hashicorp/consul + ``` -3. Now you're ready to install Consul! To install Consul with the default configuration using Helm 3.2 run the following command below. - This will create a `consul` Kubernetes namespace if not already present, and install Consul on the dedicated namespace. - - ``` bash - helm install consul hashicorp/consul --set global.name=consul --create-namespace -n consul +3. Now you're ready to install Consul! To install Consul with the default configuration using Helm 3.2 run the following command below. + This will create a `consul` Kubernetes namespace if not already present, and install Consul on the dedicated namespace. + + ```bash + $ helm install consul hashicorp/consul --set global.name=consul --create-namespace -n consul + ``` Please see the many options supported in the `values.yaml` file. These are also fully documented directly on the [Consul website](https://www.consul.io/docs/platform/k8s/helm.html). -## Tutorials +# Tutorials You can find examples and complete tutorials on how to deploy Consul on -Kubernetes using Helm on the [HashiCorp Learn website](https://learn.hashicorp.com/collections/consul/kubernetes). +Kubernetes using Helm on the [HashiCorp Learn website](https://learn.hashicorp.com/consul). diff --git a/charts/consul/templates/_helpers.tpl b/charts/consul/templates/_helpers.tpl index 3552c8c209..02b2adb39e 100644 --- a/charts/consul/templates/_helpers.tpl +++ b/charts/consul/templates/_helpers.tpl @@ -73,6 +73,22 @@ as well as the global.name setting. {{ "{{" }}- end -{{ "}}" }} {{- end -}} +{{- define "consul.controllerWebhookTLSCertTemplate" -}} + | + {{ "{{" }}- with secret "{{ .Values.global.secretsBackend.vault.controller.tlsCert.secretName }}" "{{- $name := include "consul.fullname" . -}}{{ printf "common_name=%s-controller-webhook" $name }}" + "alt_names={{ include "consul.controllerWebhookTLSAltNames" . }}" -{{ "}}" }} + {{ "{{" }}- .Data.certificate -{{ "}}" }} + {{ "{{" }}- end -{{ "}}" }} +{{- end -}} + +{{- define "consul.controllerWebhookTLSKeyTemplate" -}} + | + {{ "{{" }}- with secret "{{ .Values.global.secretsBackend.vault.controller.tlsCert.secretName }}" "{{- $name := include "consul.fullname" . -}}{{ printf "common_name=%s-controller-webhook" $name }}" + "alt_names={{ include "consul.controllerWebhookTLSAltNames" . }}" -{{ "}}" }} + {{ "{{" }}- .Data.private_key -{{ "}}" }} + {{ "{{" }}- end -{{ "}}" }} +{{- end -}} + {{- define "consul.serverTLSAltNames" -}} {{- $name := include "consul.fullname" . -}} {{- $ns := .Release.Namespace -}} @@ -93,6 +109,12 @@ as well as the global.name setting. {{ printf "%s-connect-injector,%s-connect-injector.%s,%s-connect-injector.%s.svc,%s-connect-injector.%s.svc.cluster.local" $name $name $ns $name $ns $name $ns}} {{- end -}} +{{- define "consul.controllerWebhookTLSAltNames" -}} +{{- $name := include "consul.fullname" . -}} +{{- $ns := .Release.Namespace -}} +{{ printf "%s-controller-webhook,%s-controller-webhook.%s,%s-controller-webhook.%s.svc,%s-controller-webhook.%s.svc.cluster.local" $name $name $ns $name $ns $name $ns}} +{{- end -}} + {{- define "consul.vaultReplicationTokenTemplate" -}} | {{ "{{" }}- with secret "{{ .Values.global.acls.replicationToken.secretName }}" -{{ "}}" }} @@ -126,6 +148,19 @@ is passed to consul as a -config-file param on command line. [ -n "${HOSTNAME}" ] && sed -Ei "s|HOSTNAME|${HOSTNAME?}|g" /consul/extra-config/extra-from-values.json {{- end -}} +{{/* +Sets up a list of recusor flags for Consul agents by iterating over the IPs of every nameserver +in /etc/resolv.conf and concatenating them into a string of arguments that can be passed directly +to the consul agent command. +*/}} +{{- define "consul.recursors" -}} + recursor_flags="" + for ip in $(cat /etc/resolv.conf | grep nameserver | cut -d' ' -f2) + do + recursor_flags="$recursor_flags -recursor=$ip" + done +{{- end -}} + {{/* Create chart name and version as used by the chart label. */}} @@ -201,9 +236,6 @@ This template is for an init container. consul-k8s-control-plane get-consul-client-ca \ -output-file=/consul/tls/client/ca/tls.crt \ -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ - {{- if .Values.global.cloud.enabled }} - -tls-server-name=server.{{.Values.global.datacenter}}.{{.Values.global.domain}} \ - {{- end}} {{- if .Values.externalServers.enabled }} {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} -server-addr={{ quote (first .Values.externalServers.hosts) }} \ @@ -263,127 +295,20 @@ Fails when at least one but not all of the following have been set: - global.secretsBackend.vault.connectInjectRole - global.secretsBackend.vault.connectInject.tlsCert.secretName - global.secretsBackend.vault.connectInject.caCert.secretName +- global.secretsBackend.vault.controllerRole +- global.secretsBackend.vault.controller.tlsCert.secretName +- global.secretsBackend.vault.controller.caCert.secretName The above values are needed in full to turn off web cert manager and allow -connect inject to manage its own webhook certs. +connect inject and controller to manage its own webhook certs. Usage: {{ template "consul.validateVaultWebhookCertConfiguration" . }} */}} {{- define "consul.validateVaultWebhookCertConfiguration" -}} -{{- if or .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName}} -{{- if or (not .Values.global.secretsBackend.vault.connectInjectRole) (not .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName) (not .Values.global.secretsBackend.vault.connectInject.caCert.secretName) }} -{{fail "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName"}} +{{- if or .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName .Values.global.secretsBackend.vault.controllerRole .Values.global.secretsBackend.vault.controller.tlsCert.secretName .Values.global.secretsBackend.vault.controller.caCert.secretName}} +{{- if or (not .Values.global.secretsBackend.vault.connectInjectRole) (not .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName) (not .Values.global.secretsBackend.vault.connectInject.caCert.secretName) (not .Values.global.secretsBackend.vault.controllerRole) (not .Values.global.secretsBackend.vault.controller.tlsCert.secretName) (not .Values.global.secretsBackend.vault.controller.caCert.secretName) }} +{{fail "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and global.secretsBackend.vault.controller.caCert.secretName."}} {{ end }} {{ end }} {{- end -}} - -{{/* -Consul server environment variables for consul-k8s commands. -*/}} -{{- define "consul.consulK8sConsulServerEnvVars" -}} -- name: CONSUL_ADDRESSES - {{- if .Values.externalServers.enabled }} - value: {{ .Values.externalServers.hosts | first }} - {{- else }} - value: {{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc - {{- end }} -- name: CONSUL_GRPC_PORT - {{- if .Values.externalServers.enabled }} - value: "{{ .Values.externalServers.grpcPort }}" - {{- else }} - value: "8502" - {{- end }} -- name: CONSUL_HTTP_PORT - {{- if .Values.externalServers.enabled }} - value: "{{ .Values.externalServers.httpsPort }}" - {{- else if .Values.global.tls.enabled }} - value: "8501" - {{- else }} - value: "8500" - {{- end }} -- name: CONSUL_DATACENTER - value: {{ .Values.global.datacenter }} -- name: CONSUL_API_TIMEOUT - value: {{ .Values.global.consulAPITimeout }} -{{- if .Values.global.adminPartitions.enabled }} -- name: CONSUL_PARTITION - value: {{ .Values.global.adminPartitions.name }} -{{- if .Values.global.acls.manageSystemACLs }} -- name: CONSUL_LOGIN_PARTITION - value: {{ .Values.global.adminPartitions.name }} -{{- end }} -{{- end }} -{{- if .Values.global.tls.enabled }} -- name: CONSUL_USE_TLS - value: "true" -{{- if (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} -- name: CONSUL_CACERT_FILE - {{- if .Values.global.secretsBackend.vault.enabled }} - value: "/vault/secrets/serverca.crt" - {{- else }} - value: "/consul/tls/ca/tls.crt" - {{- end }} -{{- end }} -{{- if and .Values.externalServers.enabled .Values.externalServers.tlsServerName }} -- name: CONSUL_TLS_SERVER_NAME - value: {{ .Values.externalServers.tlsServerName }} -{{- else if .Values.global.cloud.enabled }} -- name: CONSUL_TLS_SERVER_NAME - value: server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} -{{- end }} -{{- end }} -{{- if and .Values.externalServers.enabled .Values.externalServers.skipServerWatch }} -- name: CONSUL_SKIP_SERVER_WATCH - value: "true" -{{- end }} -{{- end -}} - -{{/* -Fails global.cloud.enabled is true and one of the following secrets is nil or empty. -- global.cloud.resourceId.secretName -- global.cloud.clientId.secretName -- global.cloud.clientSecret.secretName - -Usage: {{ template "consul.validateRequiredCloudSecretsExist" . }} - -*/}} -{{- define "consul.validateRequiredCloudSecretsExist" -}} -{{- if (and .Values.global.cloud.enabled (or (not .Values.global.cloud.resourceId.secretName) (not .Values.global.cloud.clientId.secretName) (not .Values.global.cloud.clientSecret.secretName))) }} -{{fail "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set."}} -{{- end }} -{{- end -}} - -{{/* -Fails global.cloud.enabled is true and one of the following secrets has either an empty secretName or secretKey. -- global.cloud.resourceId.secretName / secretKey -- global.cloud.clientId.secretName / secretKey -- global.cloud.clientSecret.secretName / secretKey -- global.cloud.authUrl.secretName / secretKey -- global.cloud.apiHost.secretName / secretKey -- global.cloud.scadaAddress.secretName / secretKey -Usage: {{ template "consul.validateCloudSecretKeys" . }} - -*/}} -{{- define "consul.validateCloudSecretKeys" -}} -{{- if and .Values.global.cloud.enabled }} -{{- if or (and .Values.global.cloud.resourceId.secretName (not .Values.global.cloud.resourceId.secretKey)) (and .Values.global.cloud.resourceId.secretKey (not .Values.global.cloud.resourceId.secretName)) }} -{{fail "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.clientId.secretName (not .Values.global.cloud.clientId.secretKey)) (and .Values.global.cloud.clientId.secretKey (not .Values.global.cloud.clientId.secretName)) }} -{{fail "When either global.cloud.clientId.secretName or global.cloud.clientId.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.clientSecret.secretName (not .Values.global.cloud.clientSecret.secretKey)) (and .Values.global.cloud.clientSecret.secretKey (not .Values.global.cloud.clientSecret.secretName)) }} -{{fail "When either global.cloud.clientSecret.secretName or global.cloud.clientSecret.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.authUrl.secretName (not .Values.global.cloud.authUrl.secretKey)) (and .Values.global.cloud.authUrl.secretKey (not .Values.global.cloud.authUrl.secretName)) }} -{{fail "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.apiHost.secretName (not .Values.global.cloud.apiHost.secretKey)) (and .Values.global.cloud.apiHost.secretKey (not .Values.global.cloud.apiHost.secretName)) }} -{{fail "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set."}} -{{- end }} -{{- if or (and .Values.global.cloud.scadaAddress.secretName (not .Values.global.cloud.scadaAddress.secretKey)) (and .Values.global.cloud.scadaAddress.secretKey (not .Values.global.cloud.scadaAddress.secretName)) }} -{{fail "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set."}} -{{- end }} -{{- end }} -{{- end -}} diff --git a/charts/consul/templates/api-gateway-controller-deployment.yaml b/charts/consul/templates/api-gateway-controller-deployment.yaml index a9f1806cc8..3018c0294b 100644 --- a/charts/consul/templates/api-gateway-controller-deployment.yaml +++ b/charts/consul/templates/api-gateway-controller-deployment.yaml @@ -2,8 +2,6 @@ {{- if not .Values.client.grpc }}{{ fail "client.grpc must be true for api gateway" }}{{ end }} {{- if not .Values.apiGateway.image}}{{ fail "apiGateway.image must be set to enable api gateway" }}{{ end }} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} apiVersion: apps/v1 kind: Deployment metadata: @@ -62,12 +60,10 @@ spec: name: sds protocol: TCP env: - {{- if or (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) .Values.client.enabled }} {{- if .Values.global.tls.enabled }} - name: CONSUL_CACERT value: /consul/tls/ca/tls.crt {{- end }} - {{- end }} - name: HOST_IP valueFrom: fieldRef: @@ -75,59 +71,13 @@ spec: {{- if .Values.global.acls.manageSystemACLs }} - name: CONSUL_HTTP_TOKEN_FILE value: "/consul/login/acl-token" - # CONSUL_LOGIN_DATACENTER is passed to the gateway that gets created. The controller does not use this to log in - - name: CONSUL_LOGIN_DATACENTER - value: {{ .Values.global.datacenter }} {{- end }} - name: CONSUL_HTTP_ADDR - {{- if .Values.client.enabled }} - {{/* - We use client agent nodes if we have them to support backwards compatibility for Consul API Gateway - v0.4 and older, which requires connectivity between the registered Consul agent node and a - deployment for health checking (originating from the Consul node). Always leveraging the agents in - the case that they're explicitly opted into allows us to support users with agent node + - "externalServers" configuration upgrading a Helm chart without upgrading API gateways. - */}} - {{- if .Values.global.tls.enabled }} - value: $(HOST_IP):8501 - {{- else }} - value: $(HOST_IP):8500 - {{- end }} - {{- else if .Values.externalServers.enabled }} - {{/* - "externalServers" specified and running in "agentless" mode, this will only work with - Consul API Gateway v0.5 or newer - */}} - value: {{ first .Values.externalServers.hosts }}:{{ .Values.externalServers.httpsPort }} - {{- else }} - {{/* - We have local network connectivity between deployments and the internal cluster, this - should be supported in all versions of Consul API Gateway - */}} {{- if .Values.global.tls.enabled }} - value: {{ template "consul.fullname" . }}-server:8501 + value: https://$(HOST_IP):8501 {{- else }} - value: {{ template "consul.fullname" . }}-server:8500 - {{- end }} + value: http://$(HOST_IP):8500 {{- end }} - - name: CONSUL_HTTP_SSL - value: "{{ .Values.global.tls.enabled }}" - {{- if and .Values.externalServers.enabled .Values.externalServers.tlsServerName }} - - name: CONSUL_TLS_SERVER_NAME - value: {{ .Values.externalServers.tlsServerName }} - {{- end }} - {{- if .Values.global.adminPartitions.enabled }} - - name: CONSUL_PARTITION - value: {{ .Values.global.adminPartitions.name }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_PARTITION - value: {{ .Values.global.adminPartitions.name }} - {{- end }} - {{- end }} - {{- if not .Values.client.enabled }} - - name: CONSUL_DYNAMIC_SERVER_DISCOVERY - value: "true" - {{- end }} command: - "/bin/sh" - "-ec" @@ -156,9 +106,8 @@ spec: - name: consul-bin mountPath: /consul-bin {{- end }} - {{- if or (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) .Values.client.enabled }} {{- if .Values.global.tls.enabled }} - {{- if and .Values.client.enabled .Values.global.tls.enableAutoEncrypt }} + {{- if .Values.global.tls.enableAutoEncrypt }} - name: consul-auto-encrypt-ca-cert {{- else }} - name: consul-ca-cert @@ -166,7 +115,6 @@ spec: mountPath: /consul/tls/ca readOnly: true {{- end }} - {{- end }} - mountPath: /consul/login name: consul-data readOnly: true @@ -231,45 +179,50 @@ spec: {{- if .Values.global.acls.manageSystemACLs }} - name: api-gateway-controller-acl-init env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME + - name: HOST_IP valueFrom: fieldRef: - fieldPath: metadata.name - - name: CONSUL_LOGIN_META - value: "component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)" - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} - {{- end}} - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} image: {{ .Values.global.imageK8S }} volumeMounts: - mountPath: /consul/login name: consul-data readOnly: false - {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} - name: consul-ca-cert + {{- end }} mountPath: /consul/tls/ca readOnly: true {{- end }} - {{- end }} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane acl-init \ + -component-name=api-gateway-controller \ {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - -auth-method-name={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ {{- else }} - -auth-method-name={{ template "consul.fullname" . }}-k8s-component-auth-method \ + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ default .Values.global.logLevel .Values.apiGateway.logLevel }} \ -log-json={{ .Values.global.logJSON }} resources: diff --git a/charts/consul/templates/api-gateway-gatewayclassconfig.yaml b/charts/consul/templates/api-gateway-gatewayclassconfig.yaml index ba0e6c63db..f2fecba47e 100644 --- a/charts/consul/templates/api-gateway-gatewayclassconfig.yaml +++ b/charts/consul/templates/api-gateway-gatewayclassconfig.yaml @@ -11,28 +11,6 @@ metadata: component: api-gateway spec: consul: - {{- if .Values.client.enabled }} - {{/* - We use client agent nodes if we have them to support backwards compatibility in <=0.4 releases which - require connectivity between the registered Consul agent node and a deployment for health checking - (originating from the Consul node). Always leveraging the agents in the case that they're explicitly - opted into allows us to support users with agent node + "externalServers" configuration upgrading a - helm chart without upgrading api gateways. Otherwise, using "externalServers" when provided - without local agents will break gateways <=0.4. - */}} - address: $(HOST_IP) - {{- else if .Values.externalServers.enabled }} - {{/* - "externalServers" specified and running in "agentless" mode, this will only work 0.5+ - */}} - address: {{ first .Values.externalServers.hosts }} - {{- else }} - {{/* - We have local network connectivity between deployments and the internal cluster, this - should be supported in all versions of api-gateway - */}} - address: {{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc - {{- end }} authentication: {{- if .Values.global.acls.manageSystemACLs }} managed: true @@ -47,24 +25,19 @@ spec: scheme: http {{- end }} ports: - {{- if .Values.externalServers.enabled }} - grpc: {{ .Values.externalServers.grpcPort }} - http: {{ .Values.externalServers.httpsPort }} - {{- else }} grpc: 8502 - {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enabled }} http: 8501 - {{- else }} + {{- else }} http: 8500 - {{- end }} - {{- end }} + {{- end }} {{- with .Values.apiGateway.managedGatewayClass.deployment }} deployment: {{- toYaml . | nindent 4 }} {{- end }} image: consulAPIGateway: {{ .Values.apiGateway.image }} - envoy: {{ .Values.apiGateway.imageEnvoy }} + envoy: {{ .Values.global.imageEnvoy }} {{- if .Values.apiGateway.managedGatewayClass.nodeSelector }} nodeSelector: {{ tpl .Values.apiGateway.managedGatewayClass.nodeSelector . | indent 4 | trim }} diff --git a/charts/consul/templates/client-daemonset.yaml b/charts/consul/templates/client-daemonset.yaml index 09a70b394e..95a990b4fb 100644 --- a/charts/consul/templates/client-daemonset.yaml +++ b/charts/consul/templates/client-daemonset.yaml @@ -10,8 +10,6 @@ {{- if (and .Values.global.enterpriseLicense.secretName (not .Values.global.enterpriseLicense.secretKey)) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if (and (not .Values.global.enterpriseLicense.secretName) .Values.global.enterpriseLicense.secretKey) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} # DaemonSet to run the Consul clients on every node. apiVersion: apps/v1 kind: DaemonSet @@ -277,6 +275,9 @@ spec: {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.gossipEncryption.secretName }} GOSSIP_KEY=`cat /vault/secrets/gossip.txt` {{- end }} + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} + {{ template "consul.recursors" }} + {{- end }} {{ template "consul.extraconfig" }} @@ -342,11 +343,7 @@ spec: {{- end }} {{- end }} {{- if .Values.client.grpc }} - {{- if .Values.global.tls.enabled }} - -hcl='ports { grpc = -1, grpc_tls = 8502 }' \ - {{- else }} - -hcl='ports { grpc = 8502, grpc_tls = -1 }' \ - {{- end }} + -hcl='ports { grpc = 8502 }' \ {{- end }} {{- if (and .Values.global.metrics.enabled .Values.global.metrics.enableAgentMetrics) }} -hcl='telemetry { prometheus_retention_time = "{{ .Values.global.metrics.agentMetricsRetentionTime }}" }' \ @@ -386,6 +383,9 @@ spec: {{- range $value := .Values.global.recursors }} -recursor={{ quote $value }} \ {{- end }} + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} + $recursor_flags \ + {{- end }} -config-file=/consul/extra-config/extra-from-values.json \ -domain={{ .Values.global.domain }} volumeMounts: @@ -494,34 +494,45 @@ spec: - name: client-acl-init image: {{ .Values.global.imageK8S }} env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - value: {{ template "consul.fullname" . }}-k8s-component-auth-method - - name: CONSUL_LOGIN_META - value: "component=client,pod=$(NAMESPACE)/$(POD_NAME)" + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8501 + {{- else }} + value: http://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8500 + {{- end }} + {{- if (and .Values.global.tls.enabled (not .Values.externalServers.useSystemRoots)) }} + - name: CONSUL_CACERT + {{- if .Values.global.secretsBackend.vault.enabled }} + value: "/vault/secrets/serverca.crt" + {{- else }} + value: "/consul/tls/ca/tls.crt" + {{- end }} {{- end }} - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} - {{- end}} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane acl-init \ + -component-name=client \ + -acl-auth-method="{{ template "consul.fullname" . }}-k8s-component-auth-method" \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} -log-level={{ default .Values.global.logLevel .Values.client.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ + {{- if .Values.externalServers.enabled }} + {{- if .Values.global.tls.enabled }} + -use-https \ + {{- end }} + {{- range .Values.externalServers.hosts }} + -server-address={{ quote . }} \ + {{- end }} + -server-port={{ .Values.externalServers.httpsPort }} \ + {{- if .Values.externalServers.tlsServerName }} + -tls-server-name={{ .Values.externalServers.tlsServerName }} \ + {{- end }} + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -init-type="client" volumeMounts: - name: aclconfig diff --git a/charts/consul/templates/client-snapshot-agent-deployment.yaml b/charts/consul/templates/client-snapshot-agent-deployment.yaml new file mode 100644 index 0000000000..19ffff23c0 --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-deployment.yaml @@ -0,0 +1,281 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if or (and .Values.client.snapshotAgent.configSecret.secretName (not .Values.client.snapshotAgent.configSecret.secretKey)) (and (not .Values.client.snapshotAgent.configSecret.secretName) .Values.client.snapshotAgent.configSecret.secretKey) }}{{fail "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." }}{{ end -}} +{{- if .Values.client.snapshotAgent.enabled }} +{{- if or (and .Values.client.snapshotAgent.configSecret.secretName (not .Values.client.snapshotAgent.configSecret.secretKey)) (and (not .Values.client.snapshotAgent.configSecret.secretName) .Values.client.snapshotAgent.configSecret.secretKey) }}{{fail "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." }}{{ end -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent +spec: + replicas: {{ .Values.client.snapshotAgent.replicas }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: client-snapshot-agent + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + release: {{ .Release.Name }} + component: client-snapshot-agent + annotations: + "consul.hashicorp.com/connect-inject": "false" + {{- if .Values.global.secretsBackend.vault.enabled }} + {{- if .Values.client.snapshotAgent.configSecret.secretName }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulSnapshotAgentRole }} + {{- else if and .Values.global.tls.enabled }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + {{- end }} + {{- if .Values.global.tls.enabled }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- end }} + {{- if .Values.global.enterpriseLicense.secretName }} + {{- with .Values.global.enterpriseLicense }} + "vault.hashicorp.com/agent-inject-secret-enterpriselicense.txt": "{{ .secretName }}" + "vault.hashicorp.com/agent-inject-template-enterpriselicense.txt": {{ template "consul.vaultSecretTemplate" . }} + {{- end }} + {{- end }} + {{- if .Values.client.snapshotAgent.configSecret.secretName }} + {{- with .Values.client.snapshotAgent.configSecret }} + "vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json": "{{ .secretName }}" + "vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json": {{ template "consul.vaultSecretTemplate" . }} + {{- end }} + {{- end }} + {{- end }} + spec: + {{- if .Values.client.tolerations }} + tolerations: + {{ tpl .Values.client.tolerations . | nindent 8 | trim }} + {{- end }} + terminationGracePeriodSeconds: 10 + serviceAccountName: {{ template "consul.fullname" . }}-snapshot-agent + {{- if .Values.client.priorityClassName }} + priorityClassName: {{ .Values.client.priorityClassName | quote }} + {{- end }} + volumes: + {{- if .Values.client.snapshotAgent.caCert }} + - name: extra-ssl-certs + emptyDir: + medium: "Memory" + {{- end }} + {{- if (or .Values.global.acls.manageSystemACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload)) }} + - name: consul-data + emptyDir: + medium: "Memory" + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} + - name: snapshot-config + secret: + secretName: {{ .Values.client.snapshotAgent.configSecret.secretName }} + items: + - key: {{ .Values.client.snapshotAgent.configSecret.secretKey }} + path: snapshot-config.json + {{- end }} + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.acls.manageSystemACLs)) }} + - name: consul-license + secret: + secretName: {{ .Values.global.enterpriseLicense.secretName }} + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + {{- end }} + containers: + - name: consul-snapshot-agent + image: "{{ default .Values.global.image .Values.client.image }}" + env: + {{- if .Values.client.snapshotAgent.caCert }} + - name: SSL_CERT_DIR + value: "/etc/ssl/certs:/extra-ssl-certs" + {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: /consul/login/acl-token + {{- else }} + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload) }} + - name: CONSUL_LICENSE_PATH + {{- if .Values.global.secretsBackend.vault.enabled }} + value: /vault/secrets/enterpriselicense.txt + {{- else }} + value: /consul/license/{{ .Values.global.enterpriseLicense.secretKey }} + {{- end }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.client.snapshotAgent.caCert }} + cat < /extra-ssl-certs/custom-ca.pem + {{- .Values.client.snapshotAgent.caCert | nindent 14 }} + EOF + {{- end }} + exec /bin/consul snapshot agent \ + -interval={{ .Values.client.snapshotAgent.interval }} \ + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -config-file=/vault/secrets/snapshot-agent-config.json \ + {{- else }} + -config-dir=/consul/config \ + {{- end }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + -config-dir=/consul/login \ + {{- end }} + volumeMounts: + {{- if .Values.client.snapshotAgent.caCert }} + - name: extra-ssl-certs + mountPath: /extra-ssl-certs + readOnly: false + {{- end }} + {{- if (or .Values.global.acls.manageSystemACLs .Values.global.tls.enabled (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey) (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload)) }} + {{- if (and .Values.client.snapshotAgent.configSecret.secretName .Values.client.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} + - name: snapshot-config + mountPath: /consul/config + readOnly: true + {{- end }} + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.acls.manageSystemACLs))}} + - name: consul-license + mountPath: /consul/license + readOnly: true + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt}} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + /bin/consul logout + {{- end }} + {{- with .Values.client.snapshotAgent.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if (or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt)) }} + initContainers: + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: snapshot-agent-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=snapshot-agent \ + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/login/acl-token \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" + {{- end }} + {{- end }} + {{- if .Values.client.nodeSelector }} + nodeSelector: + {{ tpl .Values.client.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-snapshot-agent-podsecuritypolicy.yaml b/charts/consul/templates/client-snapshot-agent-podsecuritypolicy.yaml new file mode 100644 index 0000000000..dd324a3971 --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-podsecuritypolicy.yaml @@ -0,0 +1,42 @@ +{{- if (and .Values.global.enablePodSecurityPolicies (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled))) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-snapshot-agent-role.yaml b/charts/consul/templates/client-snapshot-agent-role.yaml new file mode 100644 index 0000000000..3077bc96f0 --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-role.yaml @@ -0,0 +1,26 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent +{{- if .Values.global.enablePodSecurityPolicies }} +rules: +- apiGroups: [ "policy" ] + resources: [ "podsecuritypolicies" ] + resourceNames: + - {{ template "consul.fullname" . }}-snapshot-agent + verbs: + - use +{{- else }} +rules: [ ] +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-snapshot-agent-rolebinding.yaml b/charts/consul/templates/client-snapshot-agent-rolebinding.yaml new file mode 100644 index 0000000000..e966c4e2a8 --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "consul.fullname" . }}-snapshot-agent +subjects: + - kind: ServiceAccount + name: {{ template "consul.fullname" . }}-snapshot-agent +{{- end }} +{{- end }} diff --git a/charts/consul/templates/client-snapshot-agent-serviceaccount.yaml b/charts/consul/templates/client-snapshot-agent-serviceaccount.yaml new file mode 100644 index 0000000000..a485ff0a5c --- /dev/null +++ b/charts/consul/templates/client-snapshot-agent-serviceaccount.yaml @@ -0,0 +1,25 @@ +{{- if (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} +{{- if .Values.client.snapshotAgent.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-snapshot-agent + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: client-snapshot-agent + {{- if .Values.client.snapshotAgent.serviceAccount.annotations }} + annotations: + {{ tpl .Values.client.snapshotAgent.serviceAccount.annotations . | nindent 4 | trim }} + {{- end }} +{{- with .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range . }} + - name: {{ .name }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/consul/templates/cni-clusterrole.yaml b/charts/consul/templates/cni-clusterrole.yaml index 773942cca8..39dc5ead50 100644 --- a/charts/consul/templates/cni-clusterrole.yaml +++ b/charts/consul/templates/cni-clusterrole.yaml @@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ template "consul.fullname" . }}-cni - namespace: {{ default .Release.Namespace .Values.connectInject.cni.namespace }} + namespace: {{ .Release.Namespace }} labels: app: {{ template "consul.name" . }} chart: {{ template "consul.chart" . }} diff --git a/charts/consul/templates/cni-clusterrolebinding.yaml b/charts/consul/templates/cni-clusterrolebinding.yaml index 4b860388b6..86c19d86aa 100644 --- a/charts/consul/templates/cni-clusterrolebinding.yaml +++ b/charts/consul/templates/cni-clusterrolebinding.yaml @@ -16,5 +16,5 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "consul.fullname" . }}-cni - namespace: {{ default .Release.Namespace .Values.connectInject.cni.namespace }} + namespace: {{ .Release.Namespace }} {{- end }} diff --git a/charts/consul/templates/cni-daemonset.yaml b/charts/consul/templates/cni-daemonset.yaml index ae04d9e657..1fe2e6a08d 100644 --- a/charts/consul/templates/cni-daemonset.yaml +++ b/charts/consul/templates/cni-daemonset.yaml @@ -4,7 +4,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: {{ template "consul.fullname" . }}-cni - namespace: {{ default .Release.Namespace .Values.connectInject.cni.namespace }} + namespace: {{ .Release.Namespace }} labels: app: {{ template "consul.name" . }} chart: {{ template "consul.chart" . }} diff --git a/charts/consul/templates/cni-networkattachmentdefinition.yaml b/charts/consul/templates/cni-networkattachmentdefinition.yaml index 80ef50bac6..d0feaf5cb1 100644 --- a/charts/consul/templates/cni-networkattachmentdefinition.yaml +++ b/charts/consul/templates/cni-networkattachmentdefinition.yaml @@ -3,7 +3,7 @@ apiVersion: "k8s.cni.cncf.io/v1" kind: NetworkAttachmentDefinition metadata: name: {{ template "consul.fullname" . }}-cni - namespace: {{ default .Release.Namespace .Values.connectInject.cni.namespace }} + namespace: {{ .Release.Namespace }} labels: app: {{ template "consul.name" . }} chart: {{ template "consul.chart" . }} diff --git a/charts/consul/templates/cni-podsecuritypolicy.yaml b/charts/consul/templates/cni-podsecuritypolicy.yaml index b600ed1b4b..15b96bc230 100644 --- a/charts/consul/templates/cni-podsecuritypolicy.yaml +++ b/charts/consul/templates/cni-podsecuritypolicy.yaml @@ -3,7 +3,7 @@ apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "consul.fullname" . }}-cni - namespace: {{ default .Release.Namespace .Values.connectInject.cni.namespace }} + namespace: {{ .Release.Namespace }} labels: app: {{ template "consul.name" . }} chart: {{ template "consul.chart" . }} diff --git a/charts/consul/templates/cni-resourcequota.yaml b/charts/consul/templates/cni-resourcequota.yaml index 054c3061f5..abfe5a8876 100644 --- a/charts/consul/templates/cni-resourcequota.yaml +++ b/charts/consul/templates/cni-resourcequota.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ResourceQuota metadata: name: {{ template "consul.fullname" . }}-cni - namespace: {{ default .Release.Namespace .Values.connectInject.cni.namespace }} + namespace: {{ .Release.Namespace }} labels: app: {{ template "consul.name" . }} chart: {{ template "consul.chart" . }} diff --git a/charts/consul/templates/cni-securitycontextconstraints.yaml b/charts/consul/templates/cni-securitycontextconstraints.yaml index 2c09dba9b8..95cfc555e1 100644 --- a/charts/consul/templates/cni-securitycontextconstraints.yaml +++ b/charts/consul/templates/cni-securitycontextconstraints.yaml @@ -3,7 +3,7 @@ apiVersion: security.openshift.io/v1 kind: SecurityContextConstraints metadata: name: {{ template "consul.fullname" . }}-cni - namespace: {{ default .Release.Namespace .Values.connectInject.cni.namespace }} + namespace: {{ .Release.Namespace }} labels: app: {{ template "consul.name" . }} chart: {{ template "consul.chart" . }} diff --git a/charts/consul/templates/cni-serviceaccount.yaml b/charts/consul/templates/cni-serviceaccount.yaml index cf4250b696..6b2a7627f7 100644 --- a/charts/consul/templates/cni-serviceaccount.yaml +++ b/charts/consul/templates/cni-serviceaccount.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "consul.fullname" . }}-cni - namespace: {{ default .Release.Namespace .Values.connectInject.cni.namespace }} + namespace: {{ .Release.Namespace }} labels: app: {{ template "consul.name" . }} chart: {{ template "consul.chart" . }} diff --git a/charts/consul/templates/connect-inject-clusterrole.yaml b/charts/consul/templates/connect-inject-clusterrole.yaml index f2e12f0ad9..a3f8822963 100644 --- a/charts/consul/templates/connect-inject-clusterrole.yaml +++ b/charts/consul/templates/connect-inject-clusterrole.yaml @@ -11,55 +11,9 @@ metadata: release: {{ .Release.Name }} component: connect-injector rules: -- apiGroups: - - consul.hashicorp.com - resources: - - servicedefaults - - serviceresolvers - - proxydefaults - - meshes - - exportedservices - - servicerouters - - servicesplitters - - serviceintentions - - ingressgateways - - terminatinggateways - {{- if .Values.global.peering.enabled }} - - peeringacceptors - - peeringdialers - {{- end }} - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - consul.hashicorp.com - resources: - - servicedefaults/status - - serviceresolvers/status - - proxydefaults/status - - meshes/status - - exportedservices/status - - servicerouters/status - - servicesplitters/status - - serviceintentions/status - - ingressgateways/status - - terminatinggateways/status - {{- if .Values.global.peering.enabled }} - - peeringacceptors/status - - peeringdialers/status - {{- end }} - verbs: - - get - - patch - - update {{- if .Values.global.acls.manageSystemACLs }} - apiGroups: [ "" ] - resources: [ "serviceaccounts", "secrets" ] + resources: ["serviceaccounts", "secrets"] verbs: - get {{- end }} @@ -99,7 +53,7 @@ rules: {{- end }} {{- if .Values.global.peering.enabled }} - apiGroups: [ "" ] - resources: [ "secrets" ] + resources: ["secrets"] verbs: - "get" - "list" @@ -107,6 +61,42 @@ rules: - "create" - "update" - "delete" +- apiGroups: ["consul.hashicorp.com"] + resources: ["peeringacceptors"] + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - consul.hashicorp.com + resources: + - peeringacceptors/status + verbs: + - get + - patch + - update +- apiGroups: ["consul.hashicorp.com"] + resources: ["peeringdialers"] + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - consul.hashicorp.com + resources: + - peeringdialers/status + verbs: + - get + - patch + - update {{- end }} {{- if .Values.global.enablePodSecurityPolicies }} - apiGroups: [ "policy" ] diff --git a/charts/consul/templates/connect-inject-deployment.yaml b/charts/consul/templates/connect-inject-deployment.yaml index 2b52c1b81c..6cb1b3f23a 100644 --- a/charts/consul/templates/connect-inject-deployment.yaml +++ b/charts/consul/templates/connect-inject-deployment.yaml @@ -1,16 +1,19 @@ {{- if and .Values.global.peering.enabled (not .Values.connectInject.enabled) }}{{ fail "setting global.peering.enabled to true requires connectInject.enabled to be true" }}{{ end }} -{{- if and .Values.global.peering.enabled (not .Values.global.tls.enabled) }}{{ fail "setting global.peering.enabled to true requires global.tls.enabled to be true" }}{{ end }} -{{- if and .Values.global.peering.enabled (not .Values.meshGateway.enabled) }}{{ fail "setting global.peering.enabled to true requires meshGateway.enabled to be true" }}{{ end }} {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled for connect injection" }}{{ end }} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true for connect injection" }}{{ end }} +{{- if and .Values.connectInject.consulNamespaces.mirroringK8S (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if mirroringK8S=true" }}{{ end }} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{- if .Values.connectInject.centralConfig }}{{- if eq (toString .Values.connectInject.centralConfig.enabled) "false" }}{{ fail "connectInject.centralConfig.enabled cannot be set to false; to disable, set enable_central_service_config to false in server.extraConfig and client.extraConfig" }}{{ end -}}{{ end -}} +{{- if .Values.connectInject.centralConfig }}{{- if .Values.connectInject.centralConfig.defaultProtocol }}{{ fail "connectInject.centralConfig.defaultProtocol is no longer supported; instead you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" }}{{ end }}{{ end -}} +{{- if .Values.connectInject.centralConfig }}{{ if .Values.connectInject.centralConfig.proxyDefaults }}{{- if ne (trim .Values.connectInject.centralConfig.proxyDefaults) `{}` }}{{ fail "connectInject.centralConfig.proxyDefaults is no longer supported; instead you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" }}{{ end }}{{ end }}{{ end -}} +{{- if .Values.connectInject.imageEnvoy }}{{ fail "connectInject.imageEnvoy must be specified in global.imageEnvoy" }}{{ end }} +{{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} {{ template "consul.validateVaultWebhookCertConfiguration" . }} {{- template "consul.reservedNamesFailer" (list .Values.connectInject.consulNamespaces.consulDestinationNamespace "connectInject.consulNamespaces.consulDestinationNamespace") }} -{{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} -{{- if and .Values.externalServers.skipServerWatch (not .Values.externalServers.enabled) }}{{ fail "externalServers.enabled must be set if externalServers.skipServerWatch is true" }}{{ end -}} -{{- $dnsEnabled := (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.connectInject.transparentProxy.defaultEnabled)) -}} -{{- $dnsRedirectionEnabled := (or (and (ne (.Values.dns.enableRedirection | toString) "-") .Values.dns.enableRedirection) (and (eq (.Values.dns.enableRedirection | toString) "-") .Values.connectInject.transparentProxy.defaultEnabled)) -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} +{{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} +{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") (or .Values.global.peering.enabled .Values.global.adminPartitions.enabled))) -}} +{{- if not (or (eq .Values.global.peering.tokenGeneration.serverAddresses.source "") (or (eq .Values.global.peering.tokenGeneration.serverAddresses.source "static") (eq .Values.global.peering.tokenGeneration.serverAddresses.source "consul"))) }}{{ fail "global.peering.tokenGeneration.serverAddresses.source must be one of empty string, 'consul' or 'static'" }}{{ end }} # The deployment for running the Connect sidecar injector apiVersion: apps/v1 kind: Deployment @@ -41,9 +44,6 @@ spec: chart: {{ template "consul.chart" . }} release: {{ .Release.Name }} component: connect-injector - {{- if .Values.connectInject.extraLabels }} - {{- toYaml .Values.connectInject.extraLabels | nindent 8 }} - {{- end }} {{- if .Values.global.extraLabels }} {{- toYaml .Values.global.extraLabels | nindent 8 }} {{- end }} @@ -91,63 +91,55 @@ spec: - name: sidecar-injector image: "{{ default .Values.global.imageK8S .Values.connectInject.image }}" ports: - - containerPort: 8080 - name: webhook-server - protocol: TCP + - containerPort: 8080 + name: webhook-server + protocol: TCP env: - name: NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 12 }} {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - {{- else }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method - {{- end }} - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} - {{- end }} - - name: CONSUL_LOGIN_META - value: "component=connect-injector,pod=$(NAMESPACE)/$(POD_NAME)" + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP {{- if (and .Values.connectInject.aclInjectToken.secretName .Values.connectInject.aclInjectToken.secretKey) }} - - name: CONSUL_ACL_TOKEN + - name: CONSUL_HTTP_TOKEN valueFrom: secretKeyRef: name: {{ .Values.connectInject.aclInjectToken.secretName }} key: {{ .Values.connectInject.aclInjectToken.secretKey }} {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane inject-connect \ - {{- if .Values.global.federation.enabled }} - -enable-federation \ - {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ default .Values.global.logLevel .Values.connectInject.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ -default-inject={{ .Values.connectInject.default }} \ -consul-image="{{ default .Values.global.image .Values.connectInject.imageConsul }}" \ - -consul-dataplane-image="{{ .Values.global.imageConsulDataplane }}" \ + -envoy-image="{{ .Values.global.imageEnvoy }}" \ -consul-k8s-image="{{ default .Values.global.imageK8S .Values.connectInject.image }}" \ -release-name="{{ .Release.Name }}" \ -release-namespace="{{ .Release.Namespace }}" \ -resource-prefix={{ template "consul.fullname" . }} \ -listen=:8080 \ - {{- range $k, $v := .Values.connectInject.consulNode.meta }} - -node-meta={{ $k }}={{ $v }} \ - {{- end }} {{- if .Values.connectInject.transparentProxy.defaultEnabled }} -default-enable-transparent-proxy=true \ {{- else }} @@ -156,6 +148,23 @@ spec: -enable-cni={{ .Values.connectInject.cni.enabled }} \ {{- if .Values.global.peering.enabled }} -enable-peering=true \ + {{- if (eq .Values.global.peering.tokenGeneration.serverAddresses.source "") }} + {{- if (and $serverEnabled $serverExposeServiceEnabled) }} + -read-server-expose-service=true \ + {{- else }} + {{- if .Values.externalServers.enabled }} + {{- $port := .Values.externalServers.grpcPort }} + {{- range $h := .Values.externalServers.hosts }} + -token-server-address="{{ $h }}:{{ $port }}" \ + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if (eq .Values.global.peering.tokenGeneration.serverAddresses.source "static") }} + {{- range $addr := .Values.global.peering.tokenGeneration.serverAddresses.static }} + -token-server-address="{{ $addr }}" \ + {{- end }} + {{- end }} {{- end }} {{- if .Values.global.openshift.enabled }} -enable-openshift \ @@ -165,7 +174,7 @@ spec: {{- else }} -transparent-proxy-default-overwrite-probes=false \ {{- end }} - {{- if (and $dnsEnabled $dnsRedirectionEnabled) }} + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} -enable-consul-dns=true \ {{- end }} {{- if .Values.global.openshift.enabled }} @@ -176,7 +185,6 @@ spec: {{- else }} -default-enable-metrics=false \ {{- end }} - -enable-gateway-metrics={{ .Values.global.metrics.enableGatewayMetrics }} \ -default-enable-metrics-merging={{ .Values.connectInject.metrics.defaultEnableMerging }} \ -default-merged-metrics-port={{ .Values.connectInject.metrics.defaultMergedMetricsPort }} \ -default-prometheus-scrape-port={{ .Values.connectInject.metrics.defaultPrometheusScrapePort }} \ @@ -197,13 +205,14 @@ spec: {{- end }} {{- if .Values.global.adminPartitions.enabled }} -enable-partitions=true \ + -partition={{ .Values.global.adminPartitions.name }} \ {{- end }} {{- if .Values.global.enableConsulNamespaces }} -enable-namespaces=true \ {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} -consul-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ {{- end }} - {{- if and .Values.global.enableConsulNamespaces .Values.connectInject.consulNamespaces.mirroringK8S }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} -enable-k8s-namespace-mirroring=true \ {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} -k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ @@ -251,30 +260,39 @@ spec: {{- end }} {{- end }} - {{- if .Values.global.cloud.enabled }} - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ + {{- if .Values.global.consulSidecarContainer }} + {{- $consulSidecarResources := .Values.global.consulSidecarContainer.resources }} + {{- if not (kindIs "invalid" $consulSidecarResources.limits.memory) }} + -default-consul-sidecar-memory-limit={{ $consulSidecarResources.limits.memory }} \ + {{- end }} + {{- if not (kindIs "invalid" $consulSidecarResources.requests.memory) }} + -default-consul-sidecar-memory-request={{ $consulSidecarResources.requests.memory }} \ + {{- end }} + {{- if not (kindIs "invalid" $consulSidecarResources.limits.cpu) }} + -default-consul-sidecar-cpu-limit={{ $consulSidecarResources.limits.cpu }} \ {{- end }} - {{- if and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt }} - -enable-auto-encrypt \ + {{- if not (kindIs "invalid" $consulSidecarResources.requests.cpu) }} + -default-consul-sidecar-cpu-request={{ $consulSidecarResources.requests.cpu }} \ {{- end }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout -consul-api-timeout={{ .Values.global.consulAPITimeout }} + {{- end }} startupProbe: httpGet: path: /readyz/ready port: 9445 scheme: HTTP - initialDelaySeconds: 30 failureThreshold: 15 periodSeconds: 2 timeoutSeconds: 5 - livenessProbe: - httpGet: - path: /readyz/ready - port: 9445 - scheme: HTTP - failureThreshold: 2 - initialDelaySeconds: 1 - successThreshold: 1 - timeoutSeconds: 5 readinessProbe: httpGet: path: /readyz/ready @@ -286,14 +304,21 @@ spec: timeoutSeconds: 5 volumeMounts: {{- if not (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName) }} - - name: certs - mountPath: /etc/connect-injector/certs - readOnly: true + - name: certs + mountPath: /etc/connect-injector/certs + readOnly: true {{- end }} - {{- if and .Values.global.tls.enabled (not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled))}} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true {{- end }} {{- with .Values.connectInject.resources }} resources: @@ -301,23 +326,94 @@ spec: {{- end }} volumes: {{- if not (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName) }} - - name: certs - secret: - defaultMode: 420 - secretName: {{ template "consul.fullname" . }}-connect-inject-webhook-cert + - name: certs + secret: + defaultMode: 420 + secretName: {{ template "consul.fullname" . }}-connect-inject-webhook-cert {{- end }} + - name: consul-data + emptyDir: + medium: "Memory" {{- if .Values.global.tls.enabled }} - {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + {{- if or (and .Values.global.acls.manageSystemACLs) (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + initContainers: + {{- if and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: connect-injector-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} - name: consul-ca-cert - secret: - {{- if .Values.global.tls.caCert.secretName }} - secretName: {{ .Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" . }}-ca-cert - {{- end }} - items: - - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} - path: tls.crt + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=connect-injector \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel .Values.connectInject.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" {{- end }} {{- end }} {{- if .Values.connectInject.priorityClassName }} diff --git a/charts/consul/templates/connect-inject-mutatingwebhookconfiguration.yaml b/charts/consul/templates/connect-inject-mutatingwebhookconfiguration.yaml index afcfd3800f..cd5bc8c290 100644 --- a/charts/consul/templates/connect-inject-mutatingwebhookconfiguration.yaml +++ b/charts/consul/templates/connect-inject-mutatingwebhookconfiguration.yaml @@ -12,284 +12,74 @@ metadata: release: {{ .Release.Name }} component: connect-injector webhooks: -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-proxydefaults - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-proxydefaults.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - proxydefaults - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-mesh - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-mesh.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - meshes - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-servicedefaults - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-servicedefaults.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - servicedefaults - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-serviceresolver - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-serviceresolver.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - serviceresolvers - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-servicerouter - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-servicerouter.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - servicerouters - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-servicesplitter - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-servicesplitter.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - servicesplitters - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-serviceintentions - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-serviceintentions.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - serviceintentions - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-ingressgateway - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-ingressgateway.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - ingressgateways - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-terminatinggateway - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-terminatinggateway.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - terminatinggateways - sideEffects: None -- clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: /mutate-v1alpha1-exportedservices - failurePolicy: Fail - admissionReviewVersions: - - "v1beta1" - - "v1" - name: mutate-exportedservices.consul.hashicorp.com - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - exportedservices - sideEffects: None -- name: {{ template "consul.fullname" . }}-connect-injector.consul.hashicorp.com - # The webhook will fail scheduling all pods that are not part of consul if all replicas of the webhook are unhealthy. - objectSelector: - matchExpressions: - - key: app - operator: NotIn - values: [ {{ template "consul.name" . }} ] - failurePolicy: {{ .Values.connectInject.failurePolicy }} - sideEffects: None - admissionReviewVersions: - - "v1beta1" - - "v1" - clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: "/mutate" - rules: - - operations: [ "CREATE" ] - apiGroups: [ "" ] - apiVersions: [ "v1" ] - resources: [ "pods" ] + - name: {{ template "consul.fullname" . }}-connect-injector.consul.hashicorp.com + # The webhook will fail scheduling all pods that are not part of consul if all replicas of the webhook are unhealthy. + objectSelector: + matchExpressions: + - key: app + operator: NotIn + values: [ {{ template "consul.name" . }} ] + failurePolicy: {{ .Values.connectInject.failurePolicy }} + sideEffects: None + admissionReviewVersions: + - "v1beta1" + - "v1" + clientConfig: + service: + name: {{ template "consul.fullname" . }}-connect-injector + namespace: {{ .Release.Namespace }} + path: "/mutate" + rules: + - operations: [ "CREATE" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] {{- if .Values.connectInject.namespaceSelector }} - namespaceSelector: + namespaceSelector: {{ tpl .Values.connectInject.namespaceSelector . | indent 6 }} {{- end }} {{- if .Values.global.peering.enabled }} -- name: {{ template "consul.fullname" . }}-mutate-peeringacceptors.consul.hashicorp.com - clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: "/mutate-v1alpha1-peeringacceptors" - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - peeringacceptors - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - "v1beta1" - - "v1" -- name: {{ template "consul.fullname" . }}-mutate-peeringdialers.consul.hashicorp.com - clientConfig: - service: - name: {{ template "consul.fullname" . }}-connect-injector - namespace: {{ .Release.Namespace }} - path: "/mutate-v1alpha1-peeringdialers" - rules: - - apiGroups: - - consul.hashicorp.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - peeringdialers - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - "v1beta1" - - "v1" + - name: {{ template "consul.fullname" . }}-mutate-peeringacceptors.consul.hashicorp.com + clientConfig: + service: + name: {{ template "consul.fullname" . }}-connect-injector + namespace: {{ .Release.Namespace }} + path: "/mutate-v1alpha1-peeringacceptors" + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - peeringacceptors + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - "v1beta1" + - "v1" + - name: {{ template "consul.fullname" . }}-mutate-peeringdialers.consul.hashicorp.com + clientConfig: + service: + name: {{ template "consul.fullname" . }}-connect-injector + namespace: {{ .Release.Namespace }} + path: "/mutate-v1alpha1-peeringdialers" + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - peeringdialers + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - "v1beta1" + - "v1" {{- end }} {{- end }} diff --git a/charts/consul/templates/connect-injector-disruptionbudget.yaml b/charts/consul/templates/connect-injector-disruptionbudget.yaml index 9b9cf2e39e..08f1401fbe 100644 --- a/charts/consul/templates/connect-injector-disruptionbudget.yaml +++ b/charts/consul/templates/connect-injector-disruptionbudget.yaml @@ -17,11 +17,7 @@ metadata: release: {{ .Release.Name }} component: connect-injector spec: - {{- if .Values.connectInject.disruptionBudget.minAvailable }} - minAvailable: {{ .Values.connectInject.disruptionBudget.minAvailable }} - {{- else }} maxUnavailable: {{ template "consul.pdb.connectInject.maxUnavailable" . }} - {{- end }} selector: matchLabels: app: {{ template "consul.name" . }} diff --git a/charts/consul/templates/controller-clusterrole.yaml b/charts/consul/templates/controller-clusterrole.yaml new file mode 100644 index 0000000000..fc0753cc06 --- /dev/null +++ b/charts/consul/templates/controller-clusterrole.yaml @@ -0,0 +1,79 @@ +{{- if .Values.controller.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "consul.fullname" . }}-controller + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +rules: +- apiGroups: + - consul.hashicorp.com + resources: + - servicedefaults + - serviceresolvers + - proxydefaults + - meshes + - exportedservices + - servicerouters + - servicesplitters + - serviceintentions + - ingressgateways + - terminatinggateways + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - consul.hashicorp.com + resources: + - servicedefaults/status + - serviceresolvers/status + - proxydefaults/status + - meshes/status + - exportedservices/status + - servicerouters/status + - servicesplitters/status + - serviceintentions/status + - ingressgateways/status + - terminatinggateways/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +{{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.controllerRole .Values.global.secretsBackend.vault.controller.tlsCert.secretName .Values.global.secretsBackend.vault.controller.caCert.secretName)}} +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - get + - list + - watch + - patch +{{- end }} +{{- if .Values.global.enablePodSecurityPolicies }} +- apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + resourceNames: + - {{ template "consul.fullname" . }}-controller + verbs: + - use +{{- end }} +{{- end }} diff --git a/charts/consul/templates/controller-clusterrolebinding.yaml b/charts/consul/templates/controller-clusterrolebinding.yaml new file mode 100644 index 0000000000..71262fa476 --- /dev/null +++ b/charts/consul/templates/controller-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.controller.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "consul.fullname" . }}-controller + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "consul.fullname" . }}-controller +subjects: +- kind: ServiceAccount + name: {{ template "consul.fullname" . }}-controller + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/consul/templates/controller-deployment.yaml b/charts/consul/templates/controller-deployment.yaml new file mode 100644 index 0000000000..44b13553bc --- /dev/null +++ b/charts/consul/templates/controller-deployment.yaml @@ -0,0 +1,277 @@ +{{- if .Values.controller.enabled }} +{{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{ template "consul.validateVaultWebhookCertConfiguration" . }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "consul.fullname" . }}-controller + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller + template: + metadata: + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller + annotations: + "consul.hashicorp.com/connect-inject": "false" + {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + {{- if .Values.global.secretsBackend.vault.controllerRole }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.controllerRole }} + {{ else }} + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + {{ end }} + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if .Values.global.secretsBackend.vault.controller.caCert.secretName }} + {{- with .Values.global.secretsBackend.vault.controller.caCert }} + "vault.hashicorp.com/agent-inject-secret-ca.crt": {{ .secretName }} + "vault.hashicorp.com/agent-inject-template-ca.crt": {{ template "consul.vaultCATemplate" . }} + "vault.hashicorp.com/secret-volume-path-ca.crt": "/vault/secrets/controller-webhook/certs" + {{- end }} + {{- end }} + {{- if .Values.global.secretsBackend.vault.controller.tlsCert.secretName }} + "vault.hashicorp.com/agent-inject-secret-tls.crt": {{ .Values.global.secretsBackend.vault.controller.tlsCert.secretName }} + "vault.hashicorp.com/agent-inject-template-tls.crt": {{ include "consul.controllerWebhookTLSCertTemplate" . }} + "vault.hashicorp.com/secret-volume-path-tls.crt": "/vault/secrets/controller-webhook/certs" + "vault.hashicorp.com/agent-inject-secret-tls.key": {{ .Values.global.secretsBackend.vault.controller.tlsCert.secretName }} + "vault.hashicorp.com/agent-inject-template-tls.key": {{ include "consul.controllerWebhookTLSKeyTemplate" . }} + "vault.hashicorp.com/secret-volume-path-tls.key": "/vault/secrets/controller-webhook/certs" + {{- end }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- end }} + spec: + {{- if or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + initContainers: + {{- if and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: controller-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=controller \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel .Values.controller.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" + {{- end }} + {{- end }} + containers: + - command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane controller \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel .Values.controller.logLevel }} \ + -log-json={{ .Values.global.logJSON }} \ + -resource-prefix={{ template "consul.fullname" . }} \ + {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.controller.tlsCert.secretName }} + -enable-webhook-ca-update \ + -webhook-tls-cert-dir=/vault/secrets/controller-webhook/certs \ + {{- else }} + -webhook-tls-cert-dir=/tmp/controller-webhook/certs \ + {{- end }} + -datacenter={{ .Values.global.datacenter }} \ + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -enable-leader-election \ + {{- if .Values.global.enableConsulNamespaces }} + -enable-namespaces=true \ + {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + -consul-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + -enable-k8s-namespace-mirroring=true \ + {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} + -k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + -consul-cross-namespace-acl-policy=cross-namespace-policy \ + {{- end }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout -consul-api-timeout={{ .Values.global.consulAPITimeout }} + {{- end }} + env: + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" + {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if (and .Values.controller.aclToken.secretName .Values.controller.aclToken.secretKey) }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.controller.aclToken.secretName }} + key: {{ .Values.controller.aclToken.secretKey }} + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + name: controller + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + {{- with .Values.controller.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: true + {{- if not (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.controller.tlsCert.secretName) }} + - mountPath: /tmp/controller-webhook/certs + name: cert + readOnly: true + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + terminationGracePeriodSeconds: 10 + volumes: + {{- if not (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.controller.tlsCert.secretName) }} + - name: cert + secret: + defaultMode: 420 + secretName: {{ template "consul.fullname" . }}-controller-webhook-cert + {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + - name: consul-data + emptyDir: + medium: "Memory" + serviceAccountName: {{ template "consul.fullname" . }}-controller + {{- if .Values.controller.nodeSelector }} + nodeSelector: + {{ tpl .Values.controller.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: + {{ tpl .Values.controller.affinity . | indent 8 | trim }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: + {{ tpl .Values.controller.tolerations . | indent 8 | trim }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} +{{- end }} diff --git a/charts/consul/templates/controller-leader-election-role.yaml b/charts/consul/templates/controller-leader-election-role.yaml new file mode 100644 index 0000000000..177a968954 --- /dev/null +++ b/charts/consul/templates/controller-leader-election-role.yaml @@ -0,0 +1,41 @@ +{{- if .Values.controller.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "consul.fullname" . }}-controller-leader-election + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- end }} diff --git a/charts/consul/templates/controller-leader-election-rolebinding.yaml b/charts/consul/templates/controller-leader-election-rolebinding.yaml new file mode 100644 index 0000000000..28fa84f8e2 --- /dev/null +++ b/charts/consul/templates/controller-leader-election-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.controller.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "consul.fullname" . }}-controller-leader-election + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "consul.fullname" . }}-controller-leader-election +subjects: +- kind: ServiceAccount + name: {{ template "consul.fullname" . }}-controller +{{- end }} diff --git a/charts/consul/templates/controller-mutatingwebhookconfiguration.yaml b/charts/consul/templates/controller-mutatingwebhookconfiguration.yaml new file mode 100644 index 0000000000..bf31ea862f --- /dev/null +++ b/charts/consul/templates/controller-mutatingwebhookconfiguration.yaml @@ -0,0 +1,224 @@ +{{- if .Values.controller.enabled }} +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ template "consul.fullname" . }}-controller + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +webhooks: +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-proxydefaults + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-proxydefaults.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - proxydefaults + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-mesh + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-mesh.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - meshes + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-servicedefaults + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-servicedefaults.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - servicedefaults + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-serviceresolver + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-serviceresolver.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - serviceresolvers + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-servicerouter + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-servicerouter.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - servicerouters + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-servicesplitter + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-servicesplitter.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - servicesplitters + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-serviceintentions + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-serviceintentions.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - serviceintentions + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-ingressgateway + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-ingressgateway.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - ingressgateways + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-terminatinggateway + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-terminatinggateway.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - terminatinggateways + sideEffects: None +- clientConfig: + service: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-v1alpha1-exportedservices + failurePolicy: Fail + admissionReviewVersions: + - "v1beta1" + - "v1" + name: mutate-exportedservices.consul.hashicorp.com + rules: + - apiGroups: + - consul.hashicorp.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - exportedservices + sideEffects: None +{{- end }} diff --git a/charts/consul/templates/controller-podsecuritypolicy.yaml b/charts/consul/templates/controller-podsecuritypolicy.yaml new file mode 100644 index 0000000000..e774faaa95 --- /dev/null +++ b/charts/consul/templates/controller-podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.controller.enabled .Values.global.enablePodSecurityPolicies }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "consul.fullname" . }}-controller + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/consul/templates/controller-serviceaccount.yaml b/charts/consul/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000000..c590efe30f --- /dev/null +++ b/charts/consul/templates/controller-serviceaccount.yaml @@ -0,0 +1,23 @@ +{{- if .Values.controller.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "consul.fullname" . }}-controller + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller + {{- if .Values.controller.serviceAccount.annotations }} + annotations: + {{ tpl .Values.controller.serviceAccount.annotations . | nindent 4 | trim }} + {{- end }} + {{- with .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- range . }} +- name: {{ .name }} + {{- end }} + {{- end }} + {{- end }} diff --git a/charts/consul/templates/controller-webhook-service.yaml b/charts/consul/templates/controller-webhook-service.yaml new file mode 100644 index 0000000000..c6463f8807 --- /dev/null +++ b/charts/consul/templates/controller-webhook-service.yaml @@ -0,0 +1,23 @@ +{{- if .Values.controller.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-controller-webhook + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: controller +{{- end }} diff --git a/charts/consul/templates/crd-exportedservices.yaml b/charts/consul/templates/crd-exportedservices.yaml index 007990372c..27d03dbb06 100644 --- a/charts/consul/templates/crd-exportedservices.yaml +++ b/charts/consul/templates/crd-exportedservices.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition diff --git a/charts/consul/templates/crd-ingressgateways.yaml b/charts/consul/templates/crd-ingressgateways.yaml index a01fafd8dd..6aeca4a2f7 100644 --- a/charts/consul/templates/crd-ingressgateways.yaml +++ b/charts/consul/templates/crd-ingressgateways.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition diff --git a/charts/consul/templates/crd-meshes.yaml b/charts/consul/templates/crd-meshes.yaml index 2e33eb9653..b795e48d33 100644 --- a/charts/consul/templates/crd-meshes.yaml +++ b/charts/consul/templates/crd-meshes.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -63,18 +63,6 @@ spec: required: - sanitizeXForwardedClientCert type: object - peering: - description: Peering defines the peering configuration for the service - mesh. - properties: - peerThroughMeshGateways: - description: PeerThroughMeshGateways determines whether peering - traffic between control planes should flow through mesh gateways. - If enabled, Consul servers will advertise mesh gateway addresses - as their own. Additionally, mesh gateways will configure themselves - to expose the local servers using a peering-specific SNI. - type: boolean - type: object tls: description: TLS defines the TLS configuration for the service mesh. properties: diff --git a/charts/consul/templates/crd-proxydefaults.yaml b/charts/consul/templates/crd-proxydefaults.yaml index 749f2e4257..947bdc8856 100644 --- a/charts/consul/templates/crd-proxydefaults.yaml +++ b/charts/consul/templates/crd-proxydefaults.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -57,60 +57,12 @@ spec: spec: description: ProxyDefaultsSpec defines the desired state of ProxyDefaults. properties: - accessLogs: - description: AccessLogs controls all envoy instances' access logging - configuration. - properties: - disableListenerLogs: - description: DisableListenerLogs turns off just listener logs - for connections rejected by Envoy because they don't have a - matching listener filter. - type: boolean - enabled: - description: Enabled turns on all access logging - type: boolean - jsonFormat: - description: 'JSONFormat is a JSON-formatted string of an Envoy - access log format dictionary. See for more info on formatting: - https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-dictionaries - Defining JSONFormat and TextFormat is invalid.' - type: string - path: - description: Path is the output file to write logs for file-type - logging - type: string - textFormat: - description: 'TextFormat is a representation of Envoy access logs - format. See for more info on formatting: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-strings - Defining JSONFormat and TextFormat is invalid.' - type: string - type: - description: Type selects the output for logs one of "file", "stderr". - "stdout" - type: string - type: object config: description: Config is an arbitrary map of configuration values used by Connect proxies. Any values that your proxy allows can be configured globally here. Supports JSON config values. See https://www.consul.io/docs/connect/proxies/envoy#configuration-formatting type: object x-kubernetes-preserve-unknown-fields: true - envoyExtensions: - description: EnvoyExtensions are a list of extensions to modify Envoy - proxy configuration. - items: - description: EnvoyExtension has configuration for an extension that - patches Envoy resources. - properties: - arguments: - type: object - x-kubernetes-preserve-unknown-fields: true - name: - type: string - required: - type: boolean - type: object - type: array expose: description: Expose controls the default expose path configuration for Envoy. diff --git a/charts/consul/templates/crd-servicedefaults.yaml b/charts/consul/templates/crd-servicedefaults.yaml index 5c6ecc7476..1cf42673a8 100644 --- a/charts/consul/templates/crd-servicedefaults.yaml +++ b/charts/consul/templates/crd-servicedefaults.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -57,12 +57,6 @@ spec: spec: description: ServiceDefaultsSpec defines the desired state of ServiceDefaults. properties: - balanceInboundConnections: - description: BalanceInboundConnections sets the strategy for allocating - inbound connections to the service across proxy threads. The only - supported value is exact_balance. By default, no connection balancing - is used. Refer to the Envoy Connection Balance config for details. - type: string destination: description: Destination is an address(es)/port combination that represents an endpoint outside the mesh. This is only valid when the mesh is @@ -82,22 +76,6 @@ spec: format: int32 type: integer type: object - envoyExtensions: - description: EnvoyExtensions are a list of extensions to modify Envoy - proxy configuration. - items: - description: EnvoyExtension has configuration for an extension that - patches Envoy resources. - properties: - arguments: - type: object - x-kubernetes-preserve-unknown-fields: true - name: - type: string - required: - type: boolean - type: object - type: array expose: description: Expose controls the default expose path configuration for Envoy. @@ -136,15 +114,15 @@ spec: with an external system. type: string localConnectTimeoutMs: - description: LocalConnectTimeoutMs is the number of milliseconds allowed - to make connections to the local application instance before timing - out. Defaults to 5000. + description: The number of milliseconds allowed to make connections + to the local application instance before timing out. Defaults to + 5000. type: integer localRequestTimeoutMs: - description: LocalRequestTimeoutMs is the timeout for HTTP requests - to the local application instance in milliseconds. Applies to HTTP-based - protocols only. If not specified, inherits the Envoy default for - route timeouts (15s). + description: In milliseconds, the timeout for HTTP requests to the + local application instance. Applies to HTTP-based protocols only. + If not specified, inherits the Envoy default for route timeouts + (15s). type: integer maxInboundConnections: description: MaxInboundConnections is the maximum number of concurrent @@ -258,15 +236,15 @@ spec: type: string type: object name: - description: Name is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Name is only accepted within a service-defaults config entry. type: string namespace: - description: Namespace is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Namespace is only accepted within a service-defaults config entry. type: string partition: - description: Partition is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Partition is only accepted within a service-defaults config entry. type: string passiveHealthCheck: @@ -291,10 +269,6 @@ spec: format: int32 type: integer type: object - peer: - description: Peer is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides - config entry. - type: string protocol: description: Protocol describes the upstream's service protocol. Valid values are "tcp", "http" and "grpc". Anything else @@ -361,15 +335,15 @@ spec: type: string type: object name: - description: Name is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Name is only accepted within a service-defaults config entry. type: string namespace: - description: Namespace is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Namespace is only accepted within a service-defaults config entry. type: string partition: - description: Partition is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Partition is only accepted within a service-defaults config entry. type: string passiveHealthCheck: @@ -396,10 +370,6 @@ spec: format: int32 type: integer type: object - peer: - description: Peer is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides - config entry. - type: string protocol: description: Protocol describes the upstream's service protocol. Valid values are "tcp", "http" and "grpc". Anything else diff --git a/charts/consul/templates/crd-serviceintentions.yaml b/charts/consul/templates/crd-serviceintentions.yaml index cdbb5413b0..827fcfe2a9 100644 --- a/charts/consul/templates/crd-serviceintentions.yaml +++ b/charts/consul/templates/crd-serviceintentions.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition diff --git a/charts/consul/templates/crd-serviceresolvers.yaml b/charts/consul/templates/crd-serviceresolvers.yaml index e058052e97..c06063f318 100644 --- a/charts/consul/templates/crd-serviceresolvers.yaml +++ b/charts/consul/templates/crd-serviceresolvers.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -88,37 +88,6 @@ spec: service to resolve as the failover group of instances. If empty the default subset for the requested service is used. type: string - targets: - description: Targets specifies a fixed list of failover targets - to try during failover. - items: - properties: - datacenter: - description: Datacenter specifies the datacenter to try - during failover. - type: string - namespace: - description: Namespace specifies the namespace to try - during failover. - type: string - partition: - description: Partition specifies the partition to try - during failover. - type: string - peer: - description: Peer specifies the name of the cluster peer - to try during failover. - type: string - service: - description: Service specifies the name of the service - to try during failover. - type: string - serviceSubset: - description: ServiceSubset specifies the service subset - to try during failover. - type: string - type: object - type: array type: object description: Failover controls when and how to reroute traffic to an alternate pool of service instances. The map is keyed by the @@ -228,10 +197,6 @@ spec: service from instead of the current partition. If empty the current partition is assumed. type: string - peer: - description: Peer is the name of the cluster peer to resolve the - service from instead of the current one. - type: string service: description: Service is a service to resolve instead of the current service. diff --git a/charts/consul/templates/crd-servicerouters.yaml b/charts/consul/templates/crd-servicerouters.yaml index 5052facc06..3d6aa58dae 100644 --- a/charts/consul/templates/crd-servicerouters.yaml +++ b/charts/consul/templates/crd-servicerouters.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition diff --git a/charts/consul/templates/crd-servicesplitters.yaml b/charts/consul/templates/crd-servicesplitters.yaml index a2af050c3d..532ca209be 100644 --- a/charts/consul/templates/crd-servicesplitters.yaml +++ b/charts/consul/templates/crd-servicesplitters.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition diff --git a/charts/consul/templates/crd-terminatinggateways.yaml b/charts/consul/templates/crd-terminatinggateways.yaml index 583c218be8..50f3d8b673 100644 --- a/charts/consul/templates/crd-terminatinggateways.yaml +++ b/charts/consul/templates/crd-terminatinggateways.yaml @@ -1,4 +1,4 @@ -{{- if .Values.connectInject.enabled }} +{{- if .Values.controller.enabled }} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition diff --git a/charts/consul/templates/create-federation-secret-job.yaml b/charts/consul/templates/create-federation-secret-job.yaml index 4f83a1f82a..daf97bb121 100644 --- a/charts/consul/templates/create-federation-secret-job.yaml +++ b/charts/consul/templates/create-federation-secret-job.yaml @@ -2,8 +2,6 @@ {{- if not .Values.global.federation.enabled }}{{ fail "global.federation.enabled must be true when global.federation.createFederationSecret is true" }}{{ end }} {{- if and (not .Values.global.acls.createReplicationToken) .Values.global.acls.manageSystemACLs }}{{ fail "global.acls.createReplicationToken must be true when global.acls.manageSystemACLs is true because the federation secret must include the replication token" }}{{ end }} {{- if eq (int .Values.server.updatePartition) 0 }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} apiVersion: batch/v1 kind: Job metadata: @@ -74,6 +72,13 @@ spec: items: - key: {{ default "tls.key" .Values.global.tls.caKey.secretKey }} path: tls.key + {{- /* We must incude both auto-encrypt and server CAs because we make API calls to the local + Consul client (requiring the auto-encrypt CA) but the secret generated must include the server CA */}} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} {{- if (and .Values.global.gossipEncryption.secretName .Values.global.gossipEncryption.secretKey) }} - name: gossip-encryption-key secret: @@ -90,6 +95,11 @@ spec: path: gossip.key {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + initContainers: + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + containers: - name: create-federation-secret image: "{{ .Values.global.imageK8S }}" @@ -98,10 +108,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: CONSUL_HTTP_ADDR - value: "https://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8501" + value: https://$(HOST_IP):8501 - name: CONSUL_CACERT + {{- if .Values.global.tls.enableAutoEncrypt }} + value: /consul/tls/client/ca/tls.crt + {{- else }} value: /consul/tls/ca/tls.crt + {{- end }} volumeMounts: - name: consul-ca-cert mountPath: /consul/tls/ca @@ -109,6 +127,11 @@ spec: - name: consul-ca-key mountPath: /consul/tls/server/ca readOnly: true + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + mountPath: /consul/tls/client/ca + readOnly: true + {{- end }} {{- if (or .Values.global.gossipEncryption.autoGenerate (and .Values.global.gossipEncryption.secretName .Values.global.gossipEncryption.secretKey)) }} - name: gossip-encryption-key mountPath: /consul/gossip diff --git a/charts/consul/templates/dns-service.yaml b/charts/consul/templates/dns-service.yaml index 5bb446bc19..0fc66595e1 100644 --- a/charts/consul/templates/dns-service.yaml +++ b/charts/consul/templates/dns-service.yaml @@ -1,4 +1,4 @@ -{{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.connectInject.transparentProxy.defaultEnabled)) }} +{{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.global.enabled)) }} # Service for Consul DNS. apiVersion: v1 kind: Service diff --git a/charts/consul/templates/expose-servers-service.yaml b/charts/consul/templates/expose-servers-service.yaml index d86cec9042..fcfaf8038f 100644 --- a/charts/consul/templates/expose-servers-service.yaml +++ b/charts/consul/templates/expose-servers-service.yaml @@ -1,5 +1,5 @@ {{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} -{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") .Values.global.adminPartitions.enabled)) -}} +{{- $serverExposeServiceEnabled := (or (and (ne (.Values.server.exposeService.enabled | toString) "-") .Values.server.exposeService.enabled) (and (eq (.Values.server.exposeService.enabled | toString) "-") (or .Values.global.peering.enabled .Values.global.adminPartitions.enabled))) -}} {{- if (and $serverEnabled $serverExposeServiceEnabled) }} # Service with an external IP to reach Consul servers. @@ -52,7 +52,7 @@ spec: {{- end }} - name: grpc port: 8502 - targetPort: 8502 + targetPort: 8503 {{ if (and (eq .Values.server.exposeService.type "NodePort") .Values.server.exposeService.nodePort.grpc) }} nodePort: {{ .Values.server.exposeService.nodePort.grpc }} {{- end }} diff --git a/charts/consul/templates/ingress-gateways-deployment.yaml b/charts/consul/templates/ingress-gateways-deployment.yaml index 4f72031855..ed5724389e 100644 --- a/charts/consul/templates/ingress-gateways-deployment.yaml +++ b/charts/consul/templates/ingress-gateways-deployment.yaml @@ -1,9 +1,9 @@ {{- if .Values.ingressGateways.enabled }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true" }}{{ end -}} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled" }}{{ end -}} {{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} {{- $root := . }} {{- $defaults := .Values.ingressGateways.defaults }} @@ -68,50 +68,10 @@ spec: release: {{ $root.Release.Name }} component: ingress-gateway ingress-gateway-name: {{ template "consul.fullname" $root }}-{{ .name }} - consul.hashicorp.com/connect-inject-managed-by: consul-k8s-endpoints-controller {{- if $root.Values.global.extraLabels }} {{- toYaml $root.Values.global.extraLabels | nindent 8 }} {{- end }} annotations: - "consul.hashicorp.com/connect-inject": "false" - "consul.hashicorp.com/gateway-kind": "ingress-gateway" - "consul.hashicorp.com/gateway-consul-service-name": "{{ .name }}" - {{- if $root.Values.global.enableConsulNamespaces }} - "consul.hashicorp.com/gateway-namespace": {{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} - "consul.hashicorp.com/gateway-wan-address-source": "Service" - {{- $serviceType := (default $defaults.service.type $service.type) }} - {{- if (eq $serviceType "NodePort") }} - {{- if $service.ports }} - {{- $firstPort := first $service.ports}} - {{- if $firstPort.nodePort }} - "consul.hashicorp.com/gateway-wan-port": "{{ $firstPort.nodePort }}" - {{- else }}{{ fail "if ingressGateways .service.type=NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a nodePort" }} - {{- end }} - {{- else if $defaults.service.ports }} - {{- $firstDefaultPort := first $defaults.service.ports}} - {{- if $firstDefaultPort.nodePort }} - "consul.hashicorp.com/gateway-wan-port": "{{ $firstDefaultPort.nodePort }}" - {{- else }}{{ fail "if ingressGateways .service.type=NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a nodePort" }} - {{- end }} - {{- else }}{{ fail "if ingressGateways .service.type=NodePort, the first port entry in either the defaults or specific gateway must include a nodePort" }} - {{- end }} - {{- else }} - {{- if $service.ports }} - {{- $firstPort := first $service.ports}} - {{- if $firstPort.port }} - "consul.hashicorp.com/gateway-wan-port": "{{ $firstPort.port }}" - {{- else }}{{ fail "if ingressGateways .service.type is not NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a port" }} - {{- end }} - {{- else if $defaults.service.ports }} - {{- $firstDefaultPort := first $defaults.service.ports}} - {{- if $firstDefaultPort.port }} - "consul.hashicorp.com/gateway-wan-port": "{{ $firstDefaultPort.port }}" - {{- else }}{{ fail "if ingressGateways .service.type is not NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a port" }} - {{- end }} - {{- else }}{{ fail "if ingressGateways .service.type is not NodePort, the first port entry in either the defaults or specific gateway must include a port" }} - {{- end }} - {{- end }} {{- if (and $root.Values.global.secretsBackend.vault.enabled $root.Values.global.tls.enabled) }} "vault.hashicorp.com/agent-init-first": "true" "vault.hashicorp.com/agent-inject": "true" @@ -126,6 +86,7 @@ spec: {{ tpl $root.Values.global.secretsBackend.vault.agentAnnotations $root | nindent 8 | trim }} {{- end }} {{- end }} + "consul.hashicorp.com/connect-inject": "false" {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} "prometheus.io/scrape": "true" "prometheus.io/path": "/metrics" @@ -154,202 +115,374 @@ spec: terminationGracePeriodSeconds: {{ default $defaults.terminationGracePeriodSeconds .terminationGracePeriodSeconds }} serviceAccountName: {{ template "consul.fullname" $root }}-{{ .name }} volumes: - - name: consul-service - emptyDir: - medium: "Memory" - {{- if $root.Values.global.tls.enabled }} - {{- if not (or (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) ($root.Values.global.secretsBackend.vault.enabled)) }} - - name: consul-ca-cert - secret: - {{- if $root.Values.global.tls.caCert.secretName }} - secretName: {{ $root.Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" $root }}-ca-cert - {{- end }} - items: - - key: {{ default "tls.crt" $root.Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} - initContainers: - # ingress-gateway-init registers the ingress gateway service with Consul. - - name: ingress-gateway-init - image: {{ $root.Values.global.imageK8S }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - {{- include "consul.consulK8sConsulServerEnvVars" $root | nindent 8 }} - {{- if $root.Values.global.enableConsulNamespaces }} - - name: CONSUL_NAMESPACE - value: {{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} - {{- if $root.Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - value: {{ template "consul.fullname" $root }}-k8s-component-auth-method - - name: CONSUL_LOGIN_DATACENTER - value: {{ $root.Values.global.datacenter }} - - name: CONSUL_LOGIN_META - value: "component=ingress-gateway,pod=$(NAMESPACE)/$(POD_NAME)" - {{- end }} - - name: CONSUL_NODE_NAME - value: $(NODE_NAME)-virtual - command: - - "/bin/sh" - - "-ec" - - | - consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${NAMESPACE} \ - -gateway-kind="ingress-gateway" \ - -proxy-id-file=/consul/service/proxy-id \ - -service-name={{ template "consul.fullname" $root }}-{{ .name }} \ - -log-level={{ default $root.Values.global.logLevel }} \ - -log-json={{ $root.Values.global.logJSON }} - volumeMounts: + - name: consul-bin + emptyDir: {} - name: consul-service - mountPath: /consul/service + emptyDir: + medium: "Memory" {{- if $root.Values.global.tls.enabled }} - {{- if not (or (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) ($root.Values.global.secretsBackend.vault.enabled)) }} + {{- if not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) }} - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true + secret: + {{- if $root.Values.global.tls.caCert.secretName }} + secretName: {{ $root.Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" $root }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" $root.Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" {{- end }} {{- end }} - resources: - requests: - memory: "50Mi" - cpu: "50m" - limits: - memory: "50Mi" - cpu: "50m" + initContainers: + # We use the Envoy image as our base image so we use an init container to + # copy the Consul binary to a shared directory that can be used when + # starting Envoy. + - name: copy-consul-bin + image: {{ $root.Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- $initContainer := .initCopyConsulContainer }} + {{- if (or $initContainer $defaults.initCopyConsulContainer) }} + {{- if (default $defaults.initCopyConsulContainer.resources $initContainer.resources) }} + resources: {{ toYaml (default $defaults.initCopyConsulContainer.resources $initContainer.resources) | nindent 12 }} + {{- end }} + {{- end }} + {{- if (and $root.Values.global.tls.enabled $root.Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" $root | nindent 8 }} + {{- end }} + # ingress-gateway-init registers the ingress gateway service with Consul. + - name: ingress-gateway-init + image: {{ $root.Values.global.imageK8S }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + {{- if $root.Values.global.acls.manageSystemACLs }} + consul-k8s-control-plane acl-init \ + -component-name=ingress-gateway/{{ template "consul.fullname" $root }}-{{ .name }} \ + -acl-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method \ + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/service/acl-token \ + -consul-api-timeout={{ $root.Values.global.consulAPITimeout }} \ + -log-level={{ default $root.Values.global.logLevel }} \ + -log-json={{ $root.Values.global.logJSON }} + {{ end }} + + {{- $serviceType := (default $defaults.service.type $service.type) }} + {{- if (eq $serviceType "NodePort") }} + WAN_ADDR="${HOST_IP}" + {{- else if (or (eq $serviceType "ClusterIP") (eq $serviceType "LoadBalancer")) }} + consul-k8s-control-plane service-address \ + -log-level={{ $root.Values.global.logLevel }} \ + -log-json={{ $root.Values.global.logJSON }} \ + -k8s-namespace={{ $root.Release.Namespace }} \ + -name={{ template "consul.fullname" $root }}-{{ .name }} \ + -output-file=/tmp/address.txt + WAN_ADDR="$(cat /tmp/address.txt)" + {{- else }} + {{- fail "currently set ingressGateway value service.type is not supported" }} + {{- end }} + + {{- if (eq $serviceType "NodePort") }} + {{- if $service.ports }} + {{- $firstPort := first $service.ports}} + {{- if $firstPort.nodePort }} + WAN_PORT={{ $firstPort.nodePort }} + {{- else }}{{ fail "if ingressGateways .service.type=NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a nodePort" }} + {{- end }} + {{- else if $defaults.service.ports }} + {{- $firstDefaultPort := first $defaults.service.ports}} + {{- if $firstDefaultPort.nodePort }} + WAN_PORT={{ $firstDefaultPort.nodePort }} + {{- else }}{{ fail "if ingressGateways .service.type=NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a nodePort" }} + {{- end }} + {{- else }}{{ fail "if ingressGateways .service.type=NodePort, the first port entry in either the defaults or specific gateway must include a nodePort" }} + {{- end }} + + {{- else }} + {{- if $service.ports }} + {{- $firstPort := first $service.ports}} + {{- if $firstPort.port }} + WAN_PORT={{ $firstPort.port }} + {{- else }}{{ fail "if ingressGateways .service.type is not NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a port" }} + {{- end }} + {{- else if $defaults.service.ports }} + {{- $firstDefaultPort := first $defaults.service.ports}} + {{- if $firstDefaultPort.port }} + WAN_PORT={{ $firstDefaultPort.port }} + {{- else }}{{ fail "if ingressGateways .service.type is not NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a port" }} + {{- end }} + {{- else }}{{ fail "if ingressGateways .service.type is not NodePort, the first port entry in either the defaults or specific gateway must include a port" }} + {{- end }} + {{- end }} + + cat > /consul/service/service.hcl << EOF + service { + kind = "ingress-gateway" + name = "{{ .name }}" + id = "${POD_NAME}" + {{- if $root.Values.global.enableConsulNamespaces }} + namespace = "{{ (default $defaults.consulNamespace .consulNamespace) }}" + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + partition = "{{ $root.Values.global.adminPartitions.name }}" + {{- end }} + port = ${WAN_PORT} + address = "${WAN_ADDR}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 21000 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + proxy { + config { + {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} + envoy_prometheus_bind_addr = "${POD_IP}:20200" + {{- end }} + envoy_gateway_no_default_bind = true + envoy_gateway_bind_addresses { + all-interfaces { + address = "0.0.0.0" + } + } + } + } + checks = [ + { + name = "Ingress Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:21000" + deregister_critical_service_after = "6h" + } + ] + } + EOF + + /consul-bin/consul services register \ + {{- if $root.Values.global.acls.manageSystemACLs }} + -token-file=/consul/service/acl-token \ + {{- end }} + /consul/service/service.hcl + volumeMounts: + - name: consul-service + mountPath: /consul/service + - name: consul-bin + mountPath: /consul-bin + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + resources: + requests: + memory: "50Mi" + cpu: "50m" + limits: + memory: "50Mi" + cpu: "50m" containers: - - name: ingress-gateway - image: {{ $root.Values.global.imageConsulDataplane | quote }} - {{- if (default $defaults.resources .resources) }} - resources: {{ toYaml (default $defaults.resources .resources) | nindent 10 }} - {{- end }} - volumeMounts: - - name: consul-service - mountPath: /consul/service - readOnly: true - {{- if $root.Values.global.tls.enabled }} - {{- if not (or (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) ($root.Values.global.secretsBackend.vault.enabled)) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- end }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: DP_ENVOY_READY_BIND_ADDRESS - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: DP_CREDENTIAL_LOGIN_META1 - value: pod=$(NAMESPACE)/$(POD_NAME) - - name: DP_CREDENTIAL_LOGIN_META2 - value: component=ingress-gateway - - name: DP_SERVICE_NODE_NAME - value: $(NODE_NAME)-virtual - command: - - consul-dataplane - args: - - -envoy-ready-bind-port=21000 - {{- if $root.Values.externalServers.enabled }} - - -addresses={{ $root.Values.externalServers.hosts | first }} - {{- else }} - - -addresses={{ template "consul.fullname" $root }}-server.{{ $root.Release.Namespace }}.svc - {{- end }} - {{- if $root.Values.externalServers.enabled }} - - -grpc-port={{ $root.Values.externalServers.grpcPort }} - {{- else }} - - -grpc-port=8502 - {{- end }} - - -proxy-service-id-path=/consul/service/proxy-id - {{- if $root.Values.global.enableConsulNamespaces }} - - -service-namespace={{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} - {{- if and $root.Values.global.tls.enabled }} - {{- if (not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots)) }} - {{- if $root.Values.global.secretsBackend.vault.enabled }} - - -ca-certs=/vault/secrets/serverca.crt - {{- else }} - - -ca-certs=/consul/tls/ca/tls.crt - {{- end }} - {{- end }} - {{- if and $root.Values.externalServers.enabled $root.Values.externalServers.tlsServerName }} - - -tls-server-name={{ $root.Values.externalServers.tlsServerName }} - {{- else if $root.Values.global.cloud.enabled }} - - -tls-server-name=server.{{ $root.Values.global.datacenter}}.{{ $root.Values.global.domain}} - {{- end }} - {{- else }} - - -tls-disabled - {{- end }} - {{- if $root.Values.global.acls.manageSystemACLs }} - - -credential-type=login - - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - - -login-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method - {{- if $root.Values.global.adminPartitions.enabled }} - - -login-partition={{ $root.Values.global.adminPartitions.name }} - {{- end }} - {{- end }} - {{- if $root.Values.global.adminPartitions.enabled }} - - -service-partition={{ $root.Values.global.adminPartitions.name }} - {{- end }} - - -log-level={{ default $root.Values.global.logLevel }} - - -log-json={{ $root.Values.global.logJSON }} - {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} - - -telemetry-prom-scrape-path=/metrics - {{- end }} - {{- if and $root.Values.externalServers.enabled $root.Values.externalServers.skipServerWatch }} - - -server-watch-disabled=true - {{- end }} - livenessProbe: - tcpSocket: - port: 21000 - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - readinessProbe: - tcpSocket: - port: 21000 - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - ports: - - name: gateway-health - containerPort: 21000 - {{- range $index, $allPorts := (default $defaults.service.ports $service.ports) }} - - name: gateway-{{ $index }} - containerPort: {{ $allPorts.port }} - {{- end }} + - name: ingress-gateway + image: {{ $root.Values.global.imageEnvoy | quote }} + {{- if (default $defaults.resources .resources) }} + resources: {{ toYaml (default $defaults.resources .resources) | nindent 12 }} + {{- end }} + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + - name: consul-service + mountPath: /consul/service + readOnly: true + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $root.Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/service/acl-token" + {{- end}} + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_GRPC_ADDR + value: https://$(HOST_IP):8502 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + - name: CONSUL_GRPC_ADDR + value: $(HOST_IP):8502 + {{- end }} + command: + - /consul-bin/consul + - connect + - envoy + - -gateway=ingress + - -proxy-id=$(POD_NAME) + - -address=$(POD_IP):21000 + {{- if $root.Values.global.enableConsulNamespaces }} + - -namespace={{ default $defaults.consulNamespace .consulNamespace }} + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + - -partition={{ $root.Values.global.adminPartitions.name }} + {{- end }} + livenessProbe: + tcpSocket: + port: 21000 + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: 21000 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + ports: + - name: gateway-health + containerPort: 21000 + {{- range $index, $allPorts := (default $defaults.service.ports $service.ports) }} + - name: gateway-{{ $index }} + containerPort: {{ $allPorts.port }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + /consul-bin/consul services deregister \ + {{- if $root.Values.global.enableConsulNamespaces }} + -namespace={{ default $defaults.consulNamespace .consulNamespace }} \ + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -id="${POD_NAME}" + {{- if $root.Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} + + # consul-sidecar ensures the ingress gateway is always registered with + # the local Consul agent, even if it loses the initial registration. + - name: consul-sidecar + image: {{ $root.Values.global.imageK8S }} + volumeMounts: + - name: consul-service + mountPath: /consul/service + readOnly: true + - name: consul-bin + mountPath: /consul-bin + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if $root.Values.global.consulSidecarContainer }} + {{- if $root.Values.global.consulSidecarContainer.resources }} + resources: {{ toYaml $root.Values.global.consulSidecarContainer.resources | nindent 12 }} + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + command: + - consul-k8s-control-plane + - consul-sidecar + - -log-level={{ $root.Values.global.logLevel }} + - -log-json={{ $root.Values.global.logJSON }} + - -service-config=/consul/service/service.hcl + - -consul-binary=/consul-bin/consul + - -consul-api-timeout={{ $root.Values.global.consulAPITimeout }} + {{- if $root.Values.global.acls.manageSystemACLs }} + - -token-file=/consul/service/acl-token + {{- end }} {{- if (default $defaults.priorityClassName .priorityClassName) }} priorityClassName: {{ default $defaults.priorityClassName .priorityClassName | quote }} {{- end }} diff --git a/charts/consul/templates/mesh-gateway-deployment.yaml b/charts/consul/templates/mesh-gateway-deployment.yaml index 2b2bdc8c2a..a74abce318 100644 --- a/charts/consul/templates/mesh-gateway-deployment.yaml +++ b/charts/consul/templates/mesh-gateway-deployment.yaml @@ -1,13 +1,13 @@ {{- if .Values.meshGateway.enabled }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true" }}{{ end -}} {{- if and .Values.global.acls.manageSystemACLs (ne .Values.meshGateway.consulServiceName "") (ne .Values.meshGateway.consulServiceName "mesh-gateway") }}{{ fail "if global.acls.manageSystemACLs is true, meshGateway.consulServiceName cannot be set" }}{{ end -}} +{{- if .Values.meshGateway.imageEnvoy }}{{ fail "meshGateway.imageEnvoy must be specified in global.imageEnvoy" }}{{ end -}} {{- if .Values.meshGateway.globalMode }}{{ fail "meshGateway.globalMode is no longer supported; instead, you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" }}{{ end -}} +{{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} +{{- /* The below test checks if clients are disabled (and if so, fails). We use the conditional from other client files and prepend 'not' */ -}} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled" }}{{ end -}} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} -{{- if and (eq .Values.meshGateway.wanAddress.source "Static") (eq .Values.meshGateway.wanAddress.static "") }}{{ fail "if meshGateway.wanAddress.source=Static then meshGateway.wanAddress.static cannot be empty" }}{{ end }} -{{- if and (eq .Values.meshGateway.wanAddress.source "Service") (eq .Values.meshGateway.service.type "NodePort") (not .Values.meshGateway.service.nodePort) }}{{ fail "if meshGateway.wanAddress.source=Service and meshGateway.service.type=NodePort, meshGateway.service.nodePort must be set" }}{{ end }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} - apiVersion: apps/v1 kind: Deployment metadata: @@ -37,26 +37,11 @@ spec: chart: {{ template "consul.chart" . }} release: {{ .Release.Name }} component: mesh-gateway - consul.hashicorp.com/connect-inject-managed-by: consul-k8s-endpoints-controller {{- if .Values.global.extraLabels }} {{- toYaml .Values.global.extraLabels | nindent 8 }} {{- end }} annotations: "consul.hashicorp.com/connect-inject": "false" - "consul.hashicorp.com/gateway-kind": "mesh-gateway" - "consul.hashicorp.com/gateway-consul-service-name": "{{ .Values.meshGateway.consulServiceName }}" - "consul.hashicorp.com/mesh-gateway-container-port": "{{ .Values.meshGateway.containerPort }}" - "consul.hashicorp.com/gateway-wan-address-source": "{{ .Values.meshGateway.wanAddress.source }}" - "consul.hashicorp.com/gateway-wan-address-static": "{{ .Values.meshGateway.wanAddress.static }}" - {{- if eq .Values.meshGateway.wanAddress.source "Service" }} - {{- if eq .Values.meshGateway.service.type "NodePort" }} - "consul.hashicorp.com/gateway-wan-port": "{{ .Values.meshGateway.service.nodePort }}" - {{- else }} - "consul.hashicorp.com/gateway-wan-port": "{{ .Values.meshGateway.service.port }}" - {{- end }} - {{- else }} - "consul.hashicorp.com/gateway-wan-port": "{{ .Values.meshGateway.wanAddress.port }}" - {{- end }} {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} "vault.hashicorp.com/agent-init-first": "true" "vault.hashicorp.com/agent-inject": "true" @@ -95,23 +80,30 @@ spec: terminationGracePeriodSeconds: 10 serviceAccountName: {{ template "consul.fullname" . }}-mesh-gateway volumes: - - name: consul-service - emptyDir: - medium: "Memory" - {{- if .Values.global.tls.enabled }} - {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} - - name: consul-ca-cert - secret: - {{- if .Values.global.tls.caCert.secretName }} - secretName: {{ .Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" . }}-ca-cert - {{- end }} - items: - - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} + - name: consul-bin + emptyDir: {} + - name: consul-service + emptyDir: + medium: "Memory" + {{- if .Values.global.tls.enabled }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} {{- if .Values.meshGateway.hostNetwork }} hostNetwork: {{ .Values.meshGateway.hostNetwork }} {{- end }} @@ -119,180 +111,314 @@ spec: dnsPolicy: {{ .Values.meshGateway.dnsPolicy }} {{- end }} initContainers: - - name: mesh-gateway-init - image: {{ .Values.global.imageK8S }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - {{- else }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method + # We use the Envoy image as our base image so we use an init container to + # copy the Consul binary to a shared directory that can be used when + # starting Envoy. + - name: copy-consul-bin + image: {{ .Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.meshGateway.initCopyConsulContainer }} + {{- if .Values.meshGateway.initCopyConsulContainer.resources }} + resources: {{ toYaml .Values.meshGateway.initCopyConsulContainer.resources | nindent 12 }} {{- end }} - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} {{- end }} - - name: CONSUL_LOGIN_META - value: "component=mesh-gateway,pod=$(NAMESPACE)/$(POD_NAME)" - {{- end }} - - name: CONSUL_NODE_NAME - value: $(NODE_NAME)-virtual - command: - - "/bin/sh" - - "-ec" - - | - consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${NAMESPACE} \ - -gateway-kind="mesh-gateway" \ - -proxy-id-file=/consul/service/proxy-id \ - -service-name={{ .Values.meshGateway.consulServiceName }} \ - -log-level={{ default .Values.global.logLevel }} \ - -log-json={{ .Values.global.logJSON }} - volumeMounts: - - name: consul-service - mountPath: /consul/service - {{- if .Values.global.tls.enabled }} - {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- end }} - {{- if .Values.meshGateway.initServiceInitContainer.resources }} - resources: {{ toYaml .Values.meshGateway.initServiceInitContainer.resources | nindent 10 }} + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 8 }} {{- end }} + - name: mesh-gateway-init + image: {{ .Values.global.imageK8S }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + {{- if .Values.global.acls.manageSystemACLs }} + consul-k8s-control-plane acl-init \ + -component-name=mesh-gateway \ + -token-sink-file=/consul/service/acl-token \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + {{ end }} + + {{- $source := .Values.meshGateway.wanAddress.source }} + {{- $serviceType := .Values.meshGateway.service.type }} + {{- if and (eq $source "Service") (not .Values.meshGateway.service.enabled) }}{{ fail "if meshGateway.wanAddress.source=Service then meshGateway.service.enabled must be set to true" }}{{ end }} + {{- if or (eq $source "NodeIP") (and (eq $source "Service") (eq $serviceType "NodePort")) }} + WAN_ADDR="${HOST_IP}" + {{- else if eq $source "NodeName" }} + WAN_ADDR="${NODE_NAME}" + {{- else if and (eq $source "Service") (or (eq $serviceType "ClusterIP") (eq $serviceType "LoadBalancer")) }} + consul-k8s-control-plane service-address \ + -log-level={{ .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} \ + -k8s-namespace={{ .Release.Namespace }} \ + -name={{ template "consul.fullname" . }}-mesh-gateway \ + -output-file=/tmp/address.txt + WAN_ADDR="$(cat /tmp/address.txt)" + {{- else if eq $source "Static" }} + {{- if eq .Values.meshGateway.wanAddress.static "" }}{{ fail "if meshGateway.wanAddress.source=Static then meshGateway.wanAddress.static cannot be empty" }}{{ end }} + WAN_ADDR="{{ .Values.meshGateway.wanAddress.static }}" + {{- else }} + {{- fail "currently set meshGateway values for wanAddress.source and service.type are not supported" }} + {{- end }} + + {{- if eq $source "Service" }} + {{- if eq $serviceType "NodePort" }} + {{- if not .Values.meshGateway.service.nodePort }}{{ fail "if meshGateway.wanAddress.source=Service and meshGateway.service.type=NodePort, meshGateway.service.nodePort must be set" }}{{ end }} + WAN_PORT="{{ .Values.meshGateway.service.nodePort }}" + {{- else }} + WAN_PORT="{{ .Values.meshGateway.service.port }}" + {{- end }} + {{- else }} + WAN_PORT="{{ .Values.meshGateway.wanAddress.port }}" + {{- end }} + + cat > /consul/service/service.hcl << EOF + service { + kind = "mesh-gateway" + name = "{{ .Values.meshGateway.consulServiceName }}" + {{- if .Values.global.federation.enabled }} + meta { + consul-wan-federation = "1" + } + {{- end }} + {{- if (and .Values.global.metrics.enabled .Values.global.metrics.enableGatewayMetrics) }} + proxy { config { envoy_prometheus_bind_addr = "${POD_IP}:20200" } } + {{- end }} + port = {{ .Values.meshGateway.containerPort }} + address = "${POD_IP}" + {{- if .Values.global.adminPartitions.enabled }} + partition = "{{ .Values.global.adminPartitions.name }}" + {{- end }} + tagged_addresses { + lan { + address = "${POD_IP}" + port = {{ .Values.meshGateway.containerPort }} + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:{{ .Values.meshGateway.containerPort }}" + deregister_critical_service_after = "6h" + } + ] + } + EOF + + /consul-bin/consul services register \ + {{- if .Values.global.acls.manageSystemACLs }} + -token-file=/consul/service/acl-token \ + {{- end }} + /consul/service/service.hcl + volumeMounts: + - name: consul-service + mountPath: /consul/service + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if .Values.meshGateway.initServiceInitContainer.resources }} + resources: {{ toYaml .Values.meshGateway.initServiceInitContainer.resources | nindent 12 }} + {{- end }} containers: - - name: mesh-gateway - image: {{ .Values.global.imageConsulDataplane | quote }} - {{- if .Values.meshGateway.resources }} - resources: + - name: mesh-gateway + image: {{ .Values.global.imageEnvoy | quote }} + {{- if .Values.meshGateway.resources }} + resources: {{- if eq (typeOf .Values.meshGateway.resources) "string" }} {{ tpl .Values.meshGateway.resources . | nindent 12 | trim }} {{- else }} {{- toYaml .Values.meshGateway.resources | nindent 12 }} {{- end }} - {{- end }} - volumeMounts: - - mountPath: /consul/service - name: consul-service - readOnly: true - {{- if .Values.global.tls.enabled }} - {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- end }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: DP_CREDENTIAL_LOGIN_META1 - value: pod=$(NAMESPACE)/$(POD_NAME) - - name: DP_CREDENTIAL_LOGIN_META2 - value: component=mesh-gateway - - name: DP_SERVICE_NODE_NAME - value: $(NODE_NAME)-virtual - command: - - consul-dataplane - args: - {{- if .Values.externalServers.enabled }} - - -addresses={{ .Values.externalServers.hosts | first }} - {{- else }} - - -addresses={{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc - {{- end }} - {{- if .Values.externalServers.enabled }} - - -grpc-port={{ .Values.externalServers.grpcPort }} - {{- else }} - - -grpc-port=8502 - {{- end }} - - -proxy-service-id-path=/consul/service/proxy-id - {{- if .Values.global.tls.enabled }} - {{- if (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} - {{- if .Values.global.secretsBackend.vault.enabled }} - - -ca-certs=/vault/secrets/serverca.crt - {{- else }} - - -ca-certs=/consul/tls/ca/tls.crt - {{- end }} - {{- end }} - {{- if and .Values.externalServers.enabled .Values.externalServers.tlsServerName }} - - -tls-server-name={{.Values.externalServers.tlsServerName }} - {{- else if .Values.global.cloud.enabled }} - - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} - {{- end }} - {{- else }} - - -tls-disabled - {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - - -credential-type=login - - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - - -login-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - - -login-datacenter={{ .Values.global.federation.primaryDatacenter }} - {{- else }} - - -login-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method - {{- end }} - {{- if .Values.global.adminPartitions.enabled }} - - -login-partition={{ .Values.global.adminPartitions.name }} - {{- end }} - {{- end }} - {{- if .Values.global.adminPartitions.enabled }} - - -service-partition={{ .Values.global.adminPartitions.name }} - {{- end }} - - -log-level={{ default .Values.global.logLevel }} - - -log-json={{ .Values.global.logJSON }} - {{- if (and .Values.global.metrics.enabled .Values.global.metrics.enableGatewayMetrics) }} - - -telemetry-prom-scrape-path=/metrics - {{- end }} - {{- if and .Values.externalServers.enabled .Values.externalServers.skipServerWatch }} - - -server-watch-disabled=true - {{- end }} - livenessProbe: - tcpSocket: - port: {{ .Values.meshGateway.containerPort }} - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - readinessProbe: - tcpSocket: - port: {{ .Values.meshGateway.containerPort }} - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - ports: - - name: gateway - containerPort: {{ .Values.meshGateway.containerPort }} - {{- if .Values.meshGateway.hostPort }} - hostPort: {{ .Values.meshGateway.hostPort }} {{- end }} + volumeMounts: + - mountPath: /consul/service + name: consul-service + readOnly: true + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if eq .Values.meshGateway.wanAddress.source "NodeName" }} + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: /consul/service/acl-token + {{- end }} + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_GRPC_ADDR + value: https://$(HOST_IP):8502 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + - name: CONSUL_GRPC_ADDR + value: $(HOST_IP):8502 + {{- end }} + command: + - /consul-bin/consul + - connect + - envoy + - -mesh-gateway + {{- if .Values.global.adminPartitions.enabled }} + - -partition={{ .Values.global.adminPartitions.name }} + {{- end }} + livenessProbe: + tcpSocket: + port: {{ .Values.meshGateway.containerPort }} + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: {{ .Values.meshGateway.containerPort }} + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + ports: + - name: gateway + containerPort: {{ .Values.meshGateway.containerPort }} + {{- if .Values.meshGateway.hostPort }} + hostPort: {{ .Values.meshGateway.hostPort }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - "/consul-bin/consul services deregister -id=\"{{ .Values.meshGateway.consulServiceName }}\"" + {{- if .Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} + + # consul-sidecar ensures the mesh gateway is always registered with + # the local Consul agent, even if it loses the initial registration. + - name: consul-sidecar + image: {{ .Values.global.imageK8S }} + volumeMounts: + - name: consul-service + mountPath: /consul/service + readOnly: true + - name: consul-bin + mountPath: /consul-bin + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if .Values.global.consulSidecarContainer }} + {{- if .Values.global.consulSidecarContainer.resources }} + resources: {{ toYaml .Values.global.consulSidecarContainer.resources | nindent 12 }} + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + command: + - consul-k8s-control-plane + - consul-sidecar + - -log-level={{ .Values.global.logLevel }} + - -log-json={{ .Values.global.logJSON }} + - -service-config=/consul/service/service.hcl + - -consul-binary=/consul-bin/consul + - -consul-api-timeout={{ .Values.global.consulAPITimeout }} + {{- if .Values.global.acls.manageSystemACLs }} + - -token-file=/consul/service/acl-token + {{- end }} {{- if .Values.meshGateway.priorityClassName }} priorityClassName: {{ .Values.meshGateway.priorityClassName | quote }} {{- end }} diff --git a/charts/consul/templates/mesh-gateway-service.yaml b/charts/consul/templates/mesh-gateway-service.yaml index 5fdceca8df..7bd7ec2acc 100644 --- a/charts/consul/templates/mesh-gateway-service.yaml +++ b/charts/consul/templates/mesh-gateway-service.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.meshGateway.enabled }} +{{- if and .Values.meshGateway.enabled .Values.meshGateway.service.enabled }} apiVersion: v1 kind: Service metadata: diff --git a/charts/consul/templates/partition-init-job.yaml b/charts/consul/templates/partition-init-job.yaml index db73ef783b..1b17e721be 100644 --- a/charts/consul/templates/partition-init-job.yaml +++ b/charts/consul/templates/partition-init-job.yaml @@ -3,7 +3,6 @@ {{- template "consul.reservedNamesFailer" (list .Values.global.adminPartitions.name "global.adminPartitions.name") }} {{- if and (not .Values.externalServers.enabled) (ne .Values.global.adminPartitions.name "default") }}{{ fail "externalServers.enabled needs to be true and configured to create a non-default partition." }}{{ end -}} {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.manageSystemACLs (not .Values.global.secretsBackend.vault.adminPartitionsRole) }}{{ fail "global.secretsBackend.vault.adminPartitionsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true." }}{{ end -}} -{{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} apiVersion: batch/v1 kind: Job metadata: @@ -82,19 +81,22 @@ spec: - name: partition-init-job image: {{ .Values.global.imageK8S }} env: - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 10 }} - {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} - {{- if .Values.global.secretsBackend.vault.enabled }} - - name: CONSUL_ACL_TOKEN_FILE - value: /vault/secrets/bootstrap-token - {{- else }} - - name: CONSUL_ACL_TOKEN - valueFrom: - secretKeyRef: - name: {{ .Values.global.acls.bootstrapToken.secretName }} - key: {{ .Values.global.acls.bootstrapToken.secretKey }} - {{- end }} - {{- end }} + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + - name: CONSUL_HTTP_TOKEN_FILE + value: /vault/secrets/bootstrap-token + {{- else }} + - name: CONSUL_HTTP_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.acls.bootstrapToken.secretName }} + key: {{ .Values.global.acls.bootstrapToken.secretKey }} + {{- end }} + {{- end }} {{- if .Values.global.tls.enabled }} {{- if not (or .Values.externalServers.useSystemRoots .Values.global.secretsBackend.vault.enabled) }} volumeMounts: @@ -108,11 +110,30 @@ spec: - "-ec" - | consul-k8s-control-plane partition-init \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ .Values.global.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ - {{- if .Values.global.cloud.enabled }} - -tls-server-name=server.{{ .Values.global.datacenter}}.{{ .Values.global.domain}} \ + + {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} + {{- range .Values.externalServers.hosts }} + -server-address={{ quote . }} \ + {{- end }} + -server-port={{ .Values.externalServers.httpsPort }} \ + + {{- if .Values.global.tls.enabled }} + -use-https \ + {{- if not .Values.externalServers.useSystemRoots }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -ca-file=/vault/secrets/serverca.crt \ + {{- else }} + -ca-file=/consul/tls/ca/tls.crt \ + {{- end }} + {{- end }} + {{- if .Values.externalServers.tlsServerName }} + -tls-server-name={{ .Values.externalServers.tlsServerName }} \ + {{- end }} {{- end }} + -partition-name={{ .Values.global.adminPartitions.name }} resources: requests: memory: "50Mi" diff --git a/charts/consul/templates/partition-service.yaml b/charts/consul/templates/partition-service.yaml new file mode 100644 index 0000000000..b9266a11c7 --- /dev/null +++ b/charts/consul/templates/partition-service.yaml @@ -0,0 +1,45 @@ +{{- $serverEnabled := (or (and (ne (.Values.server.enabled | toString) "-") .Values.server.enabled) (and (eq (.Values.server.enabled | toString) "-") .Values.global.enabled)) -}} +{{- if (and .Values.global.adminPartitions.enabled $serverEnabled) }} +# Service with an external IP for clients in non-default Admin Partitions +# to discover Consul servers. This service should only point to Consul servers. +apiVersion: v1 +kind: Service +metadata: + name: {{ template "consul.fullname" . }}-partition + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "consul.name" . }} + chart: {{ template "consul.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: server + annotations: + {{- if .Values.global.adminPartitions.service.annotations }} + {{ tpl .Values.global.adminPartitions.service.annotations . | nindent 4 | trim }} + {{- end }} +spec: + type: "{{ .Values.global.adminPartitions.service.type }}" + ports: + - name: https + port: 8501 + targetPort: 8501 + {{ if (and (eq .Values.global.adminPartitions.service.type "NodePort") .Values.global.adminPartitions.service.nodePort.https) }} + nodePort: {{ .Values.global.adminPartitions.service.nodePort.https }} + {{- end }} + - name: serflan + port: 8301 + targetPort: 8301 + {{ if (and (eq .Values.global.adminPartitions.service.type "NodePort") .Values.global.adminPartitions.service.nodePort.serf) }} + nodePort: {{ .Values.global.adminPartitions.service.nodePort.serf }} + {{- end }} + - name: server + port: 8300 + targetPort: 8300 + {{ if (and (eq .Values.global.adminPartitions.service.type "NodePort") .Values.global.adminPartitions.service.nodePort.rpc) }} + nodePort: {{ .Values.global.adminPartitions.service.nodePort.rpc }} + {{- end }} + selector: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: server +{{- end }} diff --git a/charts/consul/templates/server-acl-init-job.yaml b/charts/consul/templates/server-acl-init-job.yaml index 440ab8bee0..21fd018656 100644 --- a/charts/consul/templates/server-acl-init-job.yaml +++ b/charts/consul/templates/server-acl-init-job.yaml @@ -7,8 +7,6 @@ {{- if or (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.acls.bootstrapToken.secretKey)) (and .Values.global.acls.bootstrapToken.secretKey (not .Values.global.acls.bootstrapToken.secretName))}}{{ fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided" }}{{ end -}} {{- if or (and .Values.global.acls.replicationToken.secretName (not .Values.global.acls.replicationToken.secretKey)) (and .Values.global.acls.replicationToken.secretKey (not .Values.global.acls.replicationToken.secretName))}}{{ fail "both global.acls.replicationToken.secretKey and global.acls.replicationToken.secretName must be set if one of them is provided" }}{{ end -}} {{- if (and .Values.global.secretsBackend.vault.enabled (and (not .Values.global.acls.bootstrapToken.secretName) (not .Values.global.acls.replicationToken.secretName ))) }}{{fail "global.acls.bootstrapToken or global.acls.replicationToken must be provided when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true" }}{{ end -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} {{- if (and .Values.global.secretsBackend.vault.enabled (not .Values.global.secretsBackend.vault.manageSystemACLsRole)) }}{{fail "global.secretsBackend.vault.manageSystemACLsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true" }}{{ end -}} {{- /* We don't render this job when server.updatePartition > 0 because that means a server rollout is in progress and this job won't complete unless @@ -76,7 +74,7 @@ spec: {{- end }} {{- if .Values.global.acls.replicationToken.secretName }} "vault.hashicorp.com/agent-inject-secret-replication-token": "{{ .Values.global.acls.replicationToken.secretName }}" - "vault.hashicorp.com/agent-inject-template-replication-token": {{ template "consul.vaultReplicationTokenTemplate" . }} + "vault.hashicorp.com/agent-inject-template-replication-token": {{ template "consul.vaultReplicationTokenTemplate" . }} {{- end }} {{- if .Values.global.secretsBackend.vault.agentAnnotations }} {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} @@ -87,226 +85,255 @@ spec: serviceAccountName: {{ template "consul.fullname" . }}-server-acl-init {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} volumes: - {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} - {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - - name: consul-ca-cert - secret: - {{- if .Values.global.tls.caCert.secretName }} - secretName: {{ .Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" . }}-ca-cert - {{- end }} - items: - - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} - {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} - - name: bootstrap-token - secret: - secretName: {{ .Values.global.acls.bootstrapToken.secretName }} - items: - - key: {{ .Values.global.acls.bootstrapToken.secretKey }} - path: bootstrap-token - {{- else if and .Values.global.acls.replicationToken.secretName (not .Values.global.secretsBackend.vault.enabled) }} - - name: acl-replication-token - secret: - secretName: {{ .Values.global.acls.replicationToken.secretName }} - items: - - key: {{ .Values.global.acls.replicationToken.secretKey }} - path: acl-replication-token - {{- end }} - {{- end }} - containers: - - name: server-acl-init-job - image: {{ .Values.global.imageK8S }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} - {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} - volumeMounts: {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} - {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} + secret: + {{- if .Values.global.tls.caCert.secretName }} + secretName: {{ .Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" . }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} + path: tls.crt {{- end }} {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} - name: bootstrap-token - mountPath: /consul/acl/tokens - readOnly: true + secret: + secretName: {{ .Values.global.acls.bootstrapToken.secretName }} + items: + - key: {{ .Values.global.acls.bootstrapToken.secretKey }} + path: bootstrap-token {{- else if and .Values.global.acls.replicationToken.secretName (not .Values.global.secretsBackend.vault.enabled) }} - name: acl-replication-token - mountPath: /consul/acl/tokens - readOnly: true + secret: + secretName: {{ .Values.global.acls.replicationToken.secretName }} + items: + - key: {{ .Values.global.acls.replicationToken.secretKey }} + path: acl-replication-token {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - CONSUL_FULLNAME="{{template "consul.fullname" . }}" + {{- end }} + containers: + - name: post-install-job + image: {{ .Values.global.imageK8S }} + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} + volumeMounts: + {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} + - name: consul-ca-cert + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} + - name: bootstrap-token + mountPath: /consul/acl/tokens + readOnly: true + {{- else if and .Values.global.acls.replicationToken.secretName (not .Values.global.secretsBackend.vault.enabled) }} + - name: acl-replication-token + mountPath: /consul/acl/tokens + readOnly: true + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + CONSUL_FULLNAME="{{template "consul.fullname" . }}" - consul-k8s-control-plane server-acl-init \ - -log-level={{ .Values.global.logLevel }} \ - -log-json={{ .Values.global.logJSON }} \ - -resource-prefix=${CONSUL_FULLNAME} \ - -k8s-namespace={{ .Release.Namespace }} \ - -set-server-tokens={{ $serverEnabled }} \ + consul-k8s-control-plane server-acl-init \ + -log-level={{ .Values.global.logLevel }} \ + -log-json={{ .Values.global.logJSON }} \ + -resource-prefix=${CONSUL_FULLNAME} \ + -k8s-namespace={{ .Release.Namespace }} \ + -set-server-tokens={{ $serverEnabled }} \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ - {{- if .Values.global.acls.bootstrapToken.secretName }} - {{- if .Values.global.secretsBackend.vault.enabled }} - -bootstrap-token-file=/vault/secrets/bootstrap-token \ - {{- else }} - -bootstrap-token-file=/consul/acl/tokens/bootstrap-token \ - {{- end }} - {{- end }} + {{- if .Values.externalServers.enabled }} + {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} + {{- range .Values.externalServers.hosts }} + -server-address={{ quote . }} \ + {{- end }} + -server-port={{ .Values.externalServers.httpsPort }} \ + {{- else }} + {{- range $index := until (.Values.server.replicas | int) }} + -server-address="${CONSUL_FULLNAME}-server-{{ $index }}.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc" \ + {{- end }} + {{- end }} - {{- if .Values.syncCatalog.enabled }} - -sync-catalog=true \ - {{- if .Values.syncCatalog.consulNodeName }} - -sync-consul-node-name={{ .Values.syncCatalog.consulNodeName }} \ - {{- end }} - {{- end }} + {{- if .Values.global.tls.enabled }} + -use-https \ + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -consul-ca-cert=/vault/secrets/serverca.crt \ + {{- else }} + -consul-ca-cert=/consul/tls/ca/tls.crt \ + {{- end }} + {{- end }} + {{- if not .Values.externalServers.enabled }} + -server-port=8501 \ + {{- end }} + {{- if .Values.externalServers.tlsServerName }} + -consul-tls-server-name={{ .Values.externalServers.tlsServerName }} \ + {{- end }} + {{- end }} - {{- if .Values.global.peering.enabled }} - -enable-peering=true \ - {{- end }} - {{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.connectInject.transparentProxy.defaultEnabled)) }} - -allow-dns=true \ - {{- end }} + {{- if .Values.syncCatalog.enabled }} + -sync-catalog=true \ + {{- if .Values.syncCatalog.consulNodeName }} + -sync-consul-node-name={{ .Values.syncCatalog.consulNodeName }} \ + {{- end }} + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -enable-partitions=true \ + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + {{- if .Values.global.peering.enabled }} + -enable-peering=true \ + {{- end }} + {{- if (or (and (ne (.Values.dns.enabled | toString) "-") .Values.dns.enabled) (and (eq (.Values.dns.enabled | toString) "-") .Values.global.enabled)) }} + -allow-dns=true \ + {{- end }} - {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} - -connect-inject=true \ - {{- end }} - {{- if and .Values.externalServers.enabled .Values.externalServers.k8sAuthMethodHost }} - -auth-method-host={{ .Values.externalServers.k8sAuthMethodHost }} \ - {{- end }} + {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} + -connect-inject=true \ + {{- end }} + {{- if and .Values.externalServers.enabled .Values.externalServers.k8sAuthMethodHost }} + -auth-method-host={{ .Values.externalServers.k8sAuthMethodHost }} \ + {{- end }} - {{- if .Values.global.federation.k8sAuthMethodHost }} - -auth-method-host={{ .Values.global.federation.k8sAuthMethodHost }} \ - {{- end }} + {{- if .Values.global.federation.k8sAuthMethodHost }} + -auth-method-host={{ .Values.global.federation.k8sAuthMethodHost }} \ + {{- end }} - {{- if .Values.meshGateway.enabled }} - -mesh-gateway=true \ - {{- end }} + {{- if .Values.meshGateway.enabled }} + -mesh-gateway=true \ + {{- end }} - {{- if .Values.ingressGateways.enabled }} - {{- if .Values.global.enableConsulNamespaces }} - {{- $root := . }} - {{- range .Values.ingressGateways.gateways }} - {{- if (or $root.Values.ingressGateways.defaults.consulNamespace .consulNamespace) }} - -ingress-gateway-name="{{ .name }}.{{ (default $root.Values.ingressGateways.defaults.consulNamespace .consulNamespace) }}" \ - {{- else }} - -ingress-gateway-name="{{ .name }}" \ - {{- end }} - {{- end }} - {{- else }} - {{- range .Values.ingressGateways.gateways }} - -ingress-gateway-name="{{ .name }}" \ - {{- end }} - {{- end }} - {{- end }} + {{- if .Values.ingressGateways.enabled }} + {{- if .Values.global.enableConsulNamespaces }} + {{- $root := . }} + {{- range .Values.ingressGateways.gateways }} + {{- if (or $root.Values.ingressGateways.defaults.consulNamespace .consulNamespace) }} + -ingress-gateway-name="{{ .name }}.{{ (default $root.Values.ingressGateways.defaults.consulNamespace .consulNamespace) }}" \ + {{- else }} + -ingress-gateway-name="{{ .name }}" \ + {{- end }} + {{- end }} + {{- else }} + {{- range .Values.ingressGateways.gateways }} + -ingress-gateway-name="{{ .name }}" \ + {{- end }} + {{- end }} + {{- end }} - {{- if .Values.terminatingGateways.enabled }} - {{- if .Values.global.enableConsulNamespaces }} - {{- $root := . }} - {{- range .Values.terminatingGateways.gateways }} - {{- if (or $root.Values.terminatingGateways.defaults.consulNamespace .consulNamespace) }} - -terminating-gateway-name="{{ .name }}.{{ (default $root.Values.terminatingGateways.defaults.consulNamespace .consulNamespace) }}" \ - {{- else }} - -terminating-gateway-name="{{ .name }}" \ - {{- end }} - {{- end }} - {{- else }} - {{- range .Values.terminatingGateways.gateways }} - -terminating-gateway-name="{{ .name }}" \ - {{- end }} - {{- end }} - {{- end }} + {{- if .Values.terminatingGateways.enabled }} + {{- if .Values.global.enableConsulNamespaces }} + {{- $root := . }} + {{- range .Values.terminatingGateways.gateways }} + {{- if (or $root.Values.terminatingGateways.defaults.consulNamespace .consulNamespace) }} + -terminating-gateway-name="{{ .name }}.{{ (default $root.Values.terminatingGateways.defaults.consulNamespace .consulNamespace) }}" \ + {{- else }} + -terminating-gateway-name="{{ .name }}" \ + {{- end }} + {{- end }} + {{- else }} + {{- range .Values.terminatingGateways.gateways }} + -terminating-gateway-name="{{ .name }}" \ + {{- end }} + {{- end }} + {{- end }} - {{- if .Values.connectInject.aclBindingRuleSelector }} - -acl-binding-rule-selector={{ .Values.connectInject.aclBindingRuleSelector }} \ - {{- end }} + {{- if .Values.connectInject.aclBindingRuleSelector }} + -acl-binding-rule-selector={{ .Values.connectInject.aclBindingRuleSelector }} \ + {{- end }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey) }} - -create-enterprise-license-token=true \ - {{- end }} + {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey) }} + -create-enterprise-license-token=true \ + {{- end }} - {{- if .Values.server.snapshotAgent.enabled }} - -snapshot-agent=true \ - {{- end }} + {{- if .Values.client.snapshotAgent.enabled }} + -snapshot-agent=true \ + {{- end }} - {{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} - -client=false \ - {{- end }} + {{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} + -client=false \ + {{- end }} - {{- if .Values.global.acls.createReplicationToken }} - -create-acl-replication-token=true \ - {{- end }} + {{- if .Values.global.acls.createReplicationToken }} + -create-acl-replication-token=true \ + {{- end }} - {{- if .Values.global.federation.enabled }} - -federation=true \ - {{- end }} + {{- if .Values.global.federation.enabled }} + -federation=true \ + {{- end }} - {{- if .Values.global.acls.replicationToken.secretName }} - {{- if .Values.global.secretsBackend.vault.enabled }} - -acl-replication-token-file=/vault/secrets/replication-token \ - {{- else }} - -acl-replication-token-file=/consul/acl/tokens/acl-replication-token \ - {{- end }} - {{- end }} - {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.partitionToken.secretName }} - -partition-token-file=/vault/secrets/partition-token \ - {{- end }} + {{- if .Values.global.acls.bootstrapToken.secretName }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -bootstrap-token-file=/vault/secrets/bootstrap-token \ + {{- else }} + -bootstrap-token-file=/consul/acl/tokens/bootstrap-token \ + {{- end }} + {{- end }} + {{- if .Values.global.acls.replicationToken.secretName }} + {{- if .Values.global.secretsBackend.vault.enabled }} + -acl-replication-token-file=/vault/secrets/replication-token \ + {{- else }} + -acl-replication-token-file=/consul/acl/tokens/acl-replication-token \ + {{- end }} + {{- end }} + {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.partitionToken.secretName }} + -partition-token-file=/vault/secrets/partition-token \ + {{- end }} - {{- if .Values.apiGateway.enabled }} - -api-gateway-controller=true \ - {{- end }} + {{- if .Values.controller.enabled }} + -controller=true \ + {{- end }} - {{- if .Values.global.enableConsulNamespaces }} - -enable-namespaces=true \ - {{- /* syncCatalog must be enabled to set sync flags */}} - {{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} - {{- if .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} - -consul-sync-destination-namespace={{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} \ - {{- end }} - {{- if .Values.syncCatalog.consulNamespaces.mirroringK8S }} - -enable-sync-k8s-namespace-mirroring=true \ - {{- if .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} - -sync-k8s-namespace-mirroring-prefix={{ .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} \ - {{- end }} - {{- end }} - {{- end }} + {{- if .Values.apiGateway.enabled }} + -api-gateway-controller=true \ + {{- end }} - {{- /* connectInject must be enabled to set inject flags */}} - {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} - {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} - -consul-inject-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ - {{- end }} - {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} - -enable-inject-k8s-namespace-mirroring=true \ - {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} - -inject-k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ - {{- end }} - {{- end }} - {{- end }} - {{- end }} - resources: - requests: - memory: "50Mi" - cpu: "50m" - limits: - memory: "50Mi" - cpu: "50m" + {{- if .Values.global.enableConsulNamespaces }} + -enable-namespaces=true \ + + {{- /* syncCatalog must be enabled to set sync flags */}} + {{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} + {{- if .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} + -consul-sync-destination-namespace={{ .Values.syncCatalog.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8S }} + -enable-sync-k8s-namespace-mirroring=true \ + {{- if .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} + -sync-k8s-namespace-mirroring-prefix={{ .Values.syncCatalog.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- end }} + + {{- /* connectInject must be enabled to set inject flags */}} + {{- if (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled)) }} + {{- if .Values.connectInject.consulNamespaces.consulDestinationNamespace }} + -consul-inject-destination-namespace={{ .Values.connectInject.consulNamespaces.consulDestinationNamespace }} \ + {{- end }} + {{- if .Values.connectInject.consulNamespaces.mirroringK8S }} + -enable-inject-k8s-namespace-mirroring=true \ + {{- if .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} + -inject-k8s-namespace-mirroring-prefix={{ .Values.connectInject.consulNamespaces.mirroringK8SPrefix }} \ + {{- end }} + {{- end }} + {{- end }} + + {{- end }} + resources: + requests: + memory: "50Mi" + cpu: "50m" + limits: + memory: "50Mi" + cpu: "50m" {{- if .Values.global.acls.tolerations }} tolerations: {{ tpl .Values.global.acls.tolerations . | indent 8 | trim }} diff --git a/charts/consul/templates/server-config-configmap.yaml b/charts/consul/templates/server-config-configmap.yaml index f7dd85f166..e35311a9c7 100644 --- a/charts/consul/templates/server-config-configmap.yaml +++ b/charts/consul/templates/server-config-configmap.yaml @@ -27,15 +27,8 @@ data: "data_dir": "/consul/data", "domain": "{{ .Values.global.domain }}", "ports": { - {{- if not .Values.global.tls.enabled }} - "grpc": 8502, - "grpc_tls": -1, - {{- end }} - {{- if .Values.global.tls.enabled }} - "grpc": -1, - "grpc_tls": 8502, - {{- end }} - "serf_lan": {{ .Values.server.ports.serflan.port }} + "serf_lan": {{ .Values.server.ports.serflan.port }}, + "grpc": 8503 }, "recursors": {{ .Values.global.recursors | toJson }}, "retry_join": ["{{template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:{{ .Values.server.ports.serflan.port }}"], @@ -99,6 +92,7 @@ data: {{- if .Values.global.tls.enabled }} tls-config.json: |- { + {{- if .Values.global.peering.enabled }} "tls": { {{- if .Values.global.tls.verify }} "internal_rpc": { @@ -135,6 +129,33 @@ data: {{- end }} "https": 8501 } + {{- else }} + {{- if .Values.global.secretsBackend.vault.enabled }} + "ca_file": "/vault/secrets/serverca.crt", + "cert_file": "/vault/secrets/servercert.crt", + "key_file": "/vault/secrets/servercert.key", + {{- else }} + "ca_file": "/consul/tls/ca/tls.crt", + "cert_file": "/consul/tls/server/tls.crt", + "key_file": "/consul/tls/server/tls.key", + {{- end }} + {{- if .Values.global.tls.enableAutoEncrypt }} + "auto_encrypt": { + "allow_tls": true + }, + {{- end }} + {{- if .Values.global.tls.verify }} + "verify_incoming_rpc": true, + "verify_outgoing": true, + "verify_server_hostname": true, + {{- end }} + "ports": { + {{- if .Values.global.tls.httpsOnly }} + "http": -1, + {{- end }} + "https": 8501 + } + {{- end }} } {{- end }} {{- if .Values.ui.enabled }} diff --git a/charts/consul/templates/server-podsecuritypolicy.yaml b/charts/consul/templates/server-podsecuritypolicy.yaml index 09e8d75bd1..507a07179f 100644 --- a/charts/consul/templates/server-podsecuritypolicy.yaml +++ b/charts/consul/templates/server-podsecuritypolicy.yaml @@ -35,8 +35,8 @@ spec: max: {{ .Values.server.ports.serflan.port }} - min: 8302 max: 8302 - - min: 8502 - max: 8502 + - min: 8503 + max: 8503 {{- end }} hostIPC: false hostPID: false diff --git a/charts/consul/templates/server-service.yaml b/charts/consul/templates/server-service.yaml index a392f0e76b..4b1c714c1b 100644 --- a/charts/consul/templates/server-service.yaml +++ b/charts/consul/templates/server-service.yaml @@ -19,6 +19,10 @@ metadata: {{- if .Values.server.service.annotations }} {{ tpl .Values.server.service.annotations . | nindent 4 | trim }} {{- end }} + # This must be set in addition to publishNotReadyAddresses due + # to an open issue where it may not work: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: clusterIP: None # We want the servers to become available even if they're not ready @@ -36,8 +40,8 @@ spec: targetPort: 8501 {{- end }} - name: grpc - port: 8502 - targetPort: 8502 + port: 8503 + targetPort: 8503 - name: serflan-tcp protocol: "TCP" port: 8301 diff --git a/charts/consul/templates/server-snapshot-agent-configmap.yaml b/charts/consul/templates/server-snapshot-agent-configmap.yaml deleted file mode 100644 index da68d1509c..0000000000 --- a/charts/consul/templates/server-snapshot-agent-configmap.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.server.snapshotAgent.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "consul.fullname" . }}-snapshot-agent-config - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "consul.name" . }} - chart: {{ template "consul.chart" . }} - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - component: server -data: - snapshot-login.json: | - { - "snapshot_agent": { - "login": { - "auth_method": "{{ template "consul.fullname" . }}-k8s-component-auth-method", - "bearer_token_file": "/var/run/secrets/kubernetes.io/serviceaccount/token", - "meta": {"component": "snapshot-agent"} - } - } - } -{{- end }} diff --git a/charts/consul/templates/server-statefulset.yaml b/charts/consul/templates/server-statefulset.yaml index 8b73306fd7..fa775cd0bc 100644 --- a/charts/consul/templates/server-statefulset.yaml +++ b/charts/consul/templates/server-statefulset.yaml @@ -10,15 +10,12 @@ {{- if (and .Values.global.secretsBackend.vault.enabled (not .Values.global.secretsBackend.vault.consulServerRole)) }}{{ fail "global.secretsBackend.vault.consulServerRole must be provided if global.secretsBackend.vault.enabled=true." }}{{ end -}} {{- if (and .Values.server.serverCert.secretName (not .Values.global.tls.caCert.secretName)) }}{{ fail "If server.serverCert.secretName is provided, global.tls.caCert.secretName must also be provided" }}{{ end }} {{- if (and (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) (not .Values.global.tls.caCert.secretName)) }}{{ fail "global.tls.caCert.secretName must be provided if global.tls.enabled=true and global.secretsBackend.vault.enabled=true." }}{{ end -}} +{{- if (and (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) (not .Values.global.tls.enableAutoEncrypt)) }}{{ fail "global.tls.enableAutoEncrypt must be true if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" }}{{ end -}} +{{- if (and (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) (not .Values.global.secretsBackend.vault.consulCARole)) }}{{ fail "global.secretsBackend.vault.consulCARole must be provided if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" }}{{ end -}} {{- if (and .Values.global.enterpriseLicense.secretName (not .Values.global.enterpriseLicense.secretKey)) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if (and (not .Values.global.enterpriseLicense.secretName) .Values.global.enterpriseLicense.secretKey) }}{{fail "enterpriseLicense.secretKey and secretName must both be specified." }}{{ end -}} {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.acls.bootstrapToken.secretKey)) }}{{fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided." }}{{ end -}} {{- if (and (not .Values.global.acls.bootstrapToken.secretName) .Values.global.acls.bootstrapToken.secretKey) }}{{fail "both global.acls.bootstrapToken.secretKey and global.acls.bootstrapToken.secretName must be set if one of them is provided." }}{{ end -}} -{{- if .Values.server.snapshotAgent.enabled -}} -{{- if or (and .Values.server.snapshotAgent.configSecret.secretName (not .Values.server.snapshotAgent.configSecret.secretKey)) (and (not .Values.server.snapshotAgent.configSecret.secretName) .Values.server.snapshotAgent.configSecret.secretKey) }}{{fail "server.snapshotAgent.configSecret.secretKey and server.snapshotAgent.configSecret.secretName must both be specified." }}{{ end -}} -{{- end -}} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} # StatefulSet to run the actual Consul server cluster. apiVersion: apps/v1 kind: StatefulSet @@ -104,12 +101,6 @@ spec: "vault.hashicorp.com/agent-inject-template-enterpriselicense.txt": {{ template "consul.vaultSecretTemplate" . }} {{- end }} {{- end }} - {{- if .Values.server.snapshotAgent.configSecret.secretName }} - {{- with .Values.server.snapshotAgent.configSecret }} - "vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json": "{{ .secretName }}" - "vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json": {{ template "consul.vaultSecretTemplate" . }} - {{- end }} - {{- end }} {{- end }} "consul.hashicorp.com/connect-inject": "false" "consul.hashicorp.com/config-checksum": {{ include (print $.Template.BasePath "/server-config-configmap.yaml") . | sha256sum }} @@ -178,26 +169,6 @@ spec: - key: {{ .Values.global.secretsBackend.vault.ca.secretKey }} path: tls.crt {{- end }} - {{- if .Values.server.snapshotAgent.enabled }} - {{- if .Values.global.acls.manageSystemACLs }} - - name: snapshot-agent-config - configMap: - name: {{ template "consul.fullname" . }}-snapshot-agent-config - {{- end }} - {{- if (and .Values.server.snapshotAgent.configSecret.secretName .Values.server.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} - - name: snapshot-agent-user-config - secret: - secretName: {{ .Values.server.snapshotAgent.configSecret.secretName }} - items: - - key: {{ .Values.server.snapshotAgent.configSecret.secretKey }} - path: snapshot-config.json - {{- end }} - {{- if .Values.server.snapshotAgent.caCert }} - - name: extra-ssl-certs - emptyDir: - medium: "Memory" - {{- end }} - {{- end }} {{- range .Values.server.extraVolumes }} - name: userconfig-{{ .name }} {{ .type }}: @@ -289,55 +260,6 @@ spec: name: {{ .Values.global.acls.replicationToken.secretName | quote }} key: {{ .Values.global.acls.replicationToken.secretKey | quote }} {{- end }} - {{- if .Values.global.cloud.enabled}} - # These are mounted as secrets so that the consul server agent can use them. - # - the hcp-go-sdk in consul agent will already look for HCP_CLIENT_ID, HCP_CLIENT_SECRET, HCP_AUTH_URL, - # HCP_SCADA_ADDRESS, and HCP_API_HOST. so nothing more needs to be done. - # - HCP_RESOURCE_ID is created for use in the - # `-hcl="cloud { resource_id = \"${HCP_RESOURCE_ID}\" }"` logic in the command below. - {{- if .Values.global.cloud.clientId.secretName }} - - name: HCP_CLIENT_ID - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.clientId.secretName }} - key: {{ .Values.global.cloud.clientId.secretKey }} - {{- end }} - {{- if .Values.global.cloud.clientSecret.secretName }} - - name: HCP_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.clientSecret.secretName }} - key: {{ .Values.global.cloud.clientSecret.secretKey }} - {{- end}} - {{- if .Values.global.cloud.resourceId.secretName }} - - name: HCP_RESOURCE_ID - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.resourceId.secretName }} - key: {{ .Values.global.cloud.resourceId.secretKey }} - {{- end }} - {{- if .Values.global.cloud.authUrl.secretName }} - - name: HCP_AUTH_URL - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.authUrl.secretName }} - key: {{ .Values.global.cloud.authUrl.secretKey }} - {{- end}} - {{- if .Values.global.cloud.apiHost.secretName }} - - name: HCP_API_HOST - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.apiHost.secretName }} - key: {{ .Values.global.cloud.apiHost.secretKey }} - {{- end}} - {{- if .Values.global.cloud.scadaAddress.secretName }} - - name: HCP_SCADA_ADDRESS - valueFrom: - secretKeyRef: - name: {{ .Values.global.cloud.scadaAddress.secretName }} - key: {{ .Values.global.cloud.scadaAddress.secretKey }} - {{- end}} - {{- end }} {{- include "consul.extraEnvironmentVars" .Values.server | nindent 12 }} command: - "/bin/sh" @@ -346,6 +268,10 @@ spec: {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.gossipEncryption.secretName }} GOSSIP_KEY=`cat /vault/secrets/gossip.txt` {{- end }} + + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} + {{ template "consul.recursors" }} + {{- end }} {{ template "consul.extraconfig" }} @@ -362,6 +288,9 @@ spec: -hcl="acl { tokens { agent = \"${ACL_REPLICATION_TOKEN}\", replication = \"${ACL_REPLICATION_TOKEN}\" } }" \ {{- end }} {{- end }} + {{- if (and .Values.dns.enabled .Values.dns.enableRedirection) }} + $recursor_flags \ + {{- end }} {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.bootstrapToken.secretName }} -config-file=/vault/secrets/bootstrap-token-config.hcl \ {{- else if (and (not .Values.global.secretsBackend.vault.enabled) .Values.global.acls.bootstrapToken.secretName) }} @@ -376,9 +305,6 @@ spec: {{- end }} {{- end }} -config-file=/consul/extra-config/extra-from-values.json - {{- if and .Values.global.cloud.enabled .Values.global.cloud.resourceId.secretName }} - -hcl="cloud { resource_id = \"${HCP_RESOURCE_ID}\" }" - {{- end }} volumeMounts: - name: data-{{ .Release.Namespace | trunc 58 | trimSuffix "-" }} mountPath: /consul/data @@ -418,12 +344,11 @@ spec: - name: https containerPort: 8501 {{- end }} - - name: grpc - containerPort: 8502 + - containerPort: 8503 {{- if .Values.server.exposeGossipAndRPCPorts }} - hostPort: 8502 + hostPort: 8503 {{- end }} - protocol: "TCP" + name: grpc - name: serflan-tcp containerPort: {{ .Values.server.ports.serflan.port }} {{- if .Values.server.exposeGossipAndRPCPorts }} @@ -494,87 +419,6 @@ spec: {{- if .Values.server.extraContainers }} {{ toYaml .Values.server.extraContainers | nindent 8 }} {{- end }} - {{- if .Values.server.snapshotAgent.enabled }} - - name: consul-snapshot-agent - image: "{{ default .Values.global.image .Values.server.image }}" - env: - {{- if .Values.server.snapshotAgent.caCert }} - - name: SSL_CERT_DIR - value: "/etc/ssl/certs:/extra-ssl-certs" - {{- end }} - {{- if .Values.global.tls.enabled }} - - name: CONSUL_HTTP_ADDR - value: https://127.0.0.1:8501 - - name: CONSUL_CACERT - {{- if .Values.global.secretsBackend.vault.enabled }} - value: /vault/secrets/serverca.crt - {{- else }} - value: /consul/tls/ca/tls.crt - {{- end }} - {{- else }} - - name: CONSUL_HTTP_ADDR - value: http://127.0.0.1:8500 - {{- end }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.acls.manageSystemACLs)) }} - - name: CONSUL_LICENSE_PATH - {{- if .Values.global.secretsBackend.vault.enabled }} - value: /vault/secrets/enterpriselicense.txt - {{- else }} - value: /consul/license/{{ .Values.global.enterpriseLicense.secretKey }} - {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - {{- if .Values.server.snapshotAgent.caCert }} - cat < /extra-ssl-certs/custom-ca.pem - {{- .Values.server.snapshotAgent.caCert | nindent 14 }} - EOF - {{- end }} - exec /bin/consul snapshot agent \ - -interval={{ .Values.server.snapshotAgent.interval }} \ - {{- if .Values.global.acls.manageSystemACLs }} - -config-file=/consul/config/snapshot-login.json \ - {{- end }} - {{- if (and .Values.server.snapshotAgent.configSecret.secretName .Values.server.snapshotAgent.configSecret.secretKey) }} - {{- if .Values.global.secretsBackend.vault.enabled }} - -config-file=/vault/secrets/snapshot-agent-config.json \ - {{- else }} - -config-dir=/consul/user-config \ - {{- end }} - {{- end }} - volumeMounts: - {{- if .Values.global.acls.manageSystemACLs }} - - name: snapshot-agent-config - mountPath: /consul/config - readOnly: true - {{- end }} - {{- if .Values.server.snapshotAgent.caCert }} - - name: extra-ssl-certs - mountPath: /extra-ssl-certs - readOnly: false - {{- end }} - {{- if (and .Values.server.snapshotAgent.configSecret.secretName .Values.server.snapshotAgent.configSecret.secretKey (not .Values.global.secretsBackend.vault.enabled)) }} - - name: snapshot-agent-user-config - mountPath: /consul/user-config - readOnly: true - {{- end }} - {{- if (and .Values.global.enterpriseLicense.secretName .Values.global.enterpriseLicense.secretKey .Values.global.enterpriseLicense.enableLicenseAutoload (not .Values.global.secretsBackend.vault.enabled) (not .Values.global.acls.manageSystemACLs))}} - - name: consul-license - mountPath: /consul/license - readOnly: true - {{- end }} - {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- with .Values.server.snapshotAgent.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- end }} {{- if .Values.server.nodeSelector }} nodeSelector: {{ tpl .Values.server.nodeSelector . | indent 8 | trim }} diff --git a/charts/consul/templates/sync-catalog-deployment.yaml b/charts/consul/templates/sync-catalog-deployment.yaml index f2815d9627..ef793e1190 100644 --- a/charts/consul/templates/sync-catalog-deployment.yaml +++ b/charts/consul/templates/sync-catalog-deployment.yaml @@ -1,7 +1,6 @@ +{{- $clientEnabled := (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }} {{- if (or (and (ne (.Values.syncCatalog.enabled | toString) "-") .Values.syncCatalog.enabled) (and (eq (.Values.syncCatalog.enabled | toString) "-") .Values.global.enabled)) }} {{- template "consul.reservedNamesFailer" (list .Values.syncCatalog.consulNamespaces.consulDestinationNamespace "syncCatalog.consulNamespaces.consulDestinationNamespace") }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} # The deployment for running the sync-catalog pod apiVersion: apps/v1 kind: Deployment @@ -60,8 +59,11 @@ spec: spec: serviceAccountName: {{ template "consul.fullname" . }}-sync-catalog volumes: + - name: consul-data + emptyDir: + medium: "Memory" {{- if .Values.global.tls.enabled }} - {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert secret: {{- if .Values.global.tls.caCert.secretName }} @@ -73,28 +75,24 @@ spec: - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} path: tls.crt {{- end }} + {{- if (and .Values.global.tls.enableAutoEncrypt $clientEnabled) }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} {{- end }} containers: - name: sync-catalog image: "{{ default .Values.global.imageK8S .Values.syncCatalog.image }}" env: - {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 12 }} {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} - {{- else }} - value: {{ template "consul.fullname" . }}-k8s-component-auth-method - {{- end }} - - name: CONSUL_LOGIN_DATACENTER - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} - value: {{ .Values.global.federation.primaryDatacenter }} - {{- else }} - value: {{ .Values.global.datacenter }} - {{- end }} - - name: CONSUL_LOGIN_META - value: "component=sync-catalog,pod=$(NAMESPACE)/$(POD_NAME)" + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/login/acl-token" {{- end }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: NAMESPACE valueFrom: fieldRef: @@ -106,19 +104,44 @@ spec: name: {{ .Values.syncCatalog.aclSyncToken.secretName }} key: {{ .Values.syncCatalog.aclSyncToken.secretKey }} {{- end }} + {{- if .Values.global.tls.enabled }} + {{- if .Values.client.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + {{- else }} + - name: CONSUL_HTTP_ADDR + value: https://{{ template "consul.fullname" . }}-server:8501 + {{- end }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + {{- if .Values.client.enabled }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://{{ template "consul.fullname" . }}-server:8500 + {{- end }} + {{- end }} volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: true {{- if .Values.global.tls.enabled }} - {{- if not (or (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) .Values.global.secretsBackend.vault.enabled) }} + {{- if and .Values.global.tls.enableAutoEncrypt $clientEnabled }} + - name: consul-auto-encrypt-ca-cert + {{- else }} - name: consul-ca-cert + {{- end }} mountPath: /consul/tls/ca readOnly: true {{- end }} - {{- end }} command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane sync-catalog \ + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ default .Values.global.logLevel .Values.syncCatalog.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ -k8s-default-sync={{ .Values.syncCatalog.default }} \ @@ -181,16 +204,16 @@ spec: -consul-cross-namespace-acl-policy=cross-namespace-policy \ {{- end }} {{- end }} - livenessProbe: - httpGet: - path: /health/ready - port: 8080 - scheme: HTTP - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 5 + {{- if .Values.global.acls.manageSystemACLs }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane consul-logout -consul-api-timeout={{ .Values.global.consulAPITimeout }} + {{- end }} readinessProbe: httpGet: path: /health/ready @@ -205,6 +228,69 @@ spec: resources: {{- toYaml . | nindent 12 }} {{- end }} + {{- if or .Values.global.acls.manageSystemACLs (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt $clientEnabled) }} + initContainers: + {{- if (and .Values.global.tls.enabled .Values.global.tls.enableAutoEncrypt $clientEnabled) }} + {{- include "consul.getAutoEncryptClientCA" . | nindent 6 }} + {{- end }} + {{- if .Values.global.acls.manageSystemACLs }} + - name: sync-catalog-acl-init + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if .Values.global.tls.enabled }} + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- end }} + - name: CONSUL_HTTP_ADDR + {{- if .Values.global.tls.enabled }} + value: https://$(HOST_IP):8501 + {{- else }} + value: http://$(HOST_IP):8500 + {{- end }} + image: {{ .Values.global.imageK8S }} + volumeMounts: + - mountPath: /consul/login + name: consul-data + readOnly: false + {{- if .Values.global.tls.enabled }} + {{- if .Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + consul-k8s-control-plane acl-init \ + -component-name=sync-catalog \ + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter .Values.global.enableConsulNamespaces }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ + -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ + {{- else }} + -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ + {{- end }} + {{- if .Values.global.adminPartitions.enabled }} + -partition={{ .Values.global.adminPartitions.name }} \ + {{- end }} + -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ + -log-level={{ default .Values.global.logLevel .Values.syncCatalog.logLevel }} \ + -log-json={{ .Values.global.logJSON }} + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "25Mi" + cpu: "50m" + {{- end }} + {{- end }} {{- if .Values.syncCatalog.priorityClassName }} priorityClassName: {{ .Values.syncCatalog.priorityClassName | quote }} {{- end }} diff --git a/charts/consul/templates/terminating-gateways-deployment.yaml b/charts/consul/templates/terminating-gateways-deployment.yaml index 2f2cb9a921..540fded2e6 100644 --- a/charts/consul/templates/terminating-gateways-deployment.yaml +++ b/charts/consul/templates/terminating-gateways-deployment.yaml @@ -1,8 +1,9 @@ {{- if .Values.terminatingGateways.enabled }} {{- if not .Values.connectInject.enabled }}{{ fail "connectInject.enabled must be true" }}{{ end -}} +{{- if not .Values.client.grpc }}{{ fail "client.grpc must be true" }}{{ end -}} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} -{{ template "consul.validateRequiredCloudSecretsExist" . }} -{{ template "consul.validateCloudSecretKeys" . }} +{{- if not (or (and (ne (.Values.client.enabled | toString) "-") .Values.client.enabled) (and (eq (.Values.client.enabled | toString) "-") .Values.global.enabled)) }}{{ fail "clients must be enabled" }}{{ end -}} +{{- if .Values.global.lifecycleSidecarContainer }}{{ fail "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." }}{{ end }} {{- $root := . }} {{- $defaults := .Values.terminatingGateways.defaults }} @@ -70,17 +71,10 @@ spec: release: {{ $root.Release.Name }} component: terminating-gateway terminating-gateway-name: {{ template "consul.fullname" $root }}-{{ .name }} - consul.hashicorp.com/connect-inject-managed-by: consul-k8s-endpoints-controller {{- if $root.Values.global.extraLabels }} {{- toYaml $root.Values.global.extraLabels | nindent 8 }} {{- end }} annotations: - "consul.hashicorp.com/connect-inject": "false" - "consul.hashicorp.com/gateway-kind": "terminating-gateway" - "consul.hashicorp.com/gateway-consul-service-name": "{{ .name }}" - {{- if $root.Values.global.enableConsulNamespaces }} - "consul.hashicorp.com/gateway-namespace": {{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} {{- if (and $root.Values.global.secretsBackend.vault.enabled $root.Values.global.tls.enabled) }} "vault.hashicorp.com/agent-init-first": "true" "vault.hashicorp.com/agent-inject": "true" @@ -95,6 +89,7 @@ spec: {{ tpl $root.Values.global.secretsBackend.vault.agentAnnotations $root | nindent 8 | trim }} {{- end }} {{- end }} + "consul.hashicorp.com/connect-inject": "false" {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} "prometheus.io/scrape": "true" "prometheus.io/path": "/metrics" @@ -123,91 +118,156 @@ spec: terminationGracePeriodSeconds: 10 serviceAccountName: {{ template "consul.fullname" $root }}-{{ .name }} volumes: - - name: consul-service - emptyDir: - medium: "Memory" - {{- range (default $defaults.extraVolumes .extraVolumes) }} - - name: userconfig-{{ .name }} - {{ .type }}: - {{- if (eq .type "configMap") }} - name: {{ .name }} - {{- else if (eq .type "secret") }} - secretName: {{ .name }} - {{- end }} - {{- with .items }} - items: - {{- range . }} - - key: {{.key}} - path: {{.path}} - {{- end }} + - name: consul-bin + emptyDir: {} + - name: consul-service + emptyDir: + medium: "Memory" + {{- range (default $defaults.extraVolumes .extraVolumes) }} + - name: userconfig-{{ .name }} + {{ .type }}: + {{- if (eq .type "configMap") }} + name: {{ .name }} + {{- else if (eq .type "secret") }} + secretName: {{ .name }} + {{- end }} + {{- with .items }} + items: + {{- range . }} + - key: {{.key}} + path: {{.path}} + {{- end }} + {{- end }} + {{- end }} + {{- if $root.Values.global.tls.enabled }} + {{- if not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) }} + - name: consul-ca-cert + secret: + {{- if $root.Values.global.tls.caCert.secretName }} + secretName: {{ $root.Values.global.tls.caCert.secretName }} + {{- else }} + secretName: {{ template "consul.fullname" $root }}-ca-cert + {{- end }} + items: + - key: {{ default "tls.crt" $root.Values.global.tls.caCert.secretKey }} + path: tls.crt + {{- end }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + emptyDir: + medium: "Memory" + {{- end }} + {{- end }} + initContainers: + # We use the Envoy image as our base image so we use an init container to + # copy the Consul binary to a shared directory that can be used when + # starting Envoy. + - name: copy-consul-bin + image: {{ $root.Values.global.image | quote }} + command: + - cp + - /bin/consul + - /consul-bin/consul + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + {{- $initContainer := .initCopyConsulContainer }} + {{- if (or $initContainer $defaults.initCopyConsulContainer) }} + {{- if (default $defaults.initCopyConsulContainer.resources $initContainer.resources) }} + resources: {{ toYaml (default $defaults.initCopyConsulContainer.resources $initContainer.resources) | nindent 12 }} {{- end }} - {{- end }} - {{- if $root.Values.global.tls.enabled }} - {{- if not (or (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) ($root.Values.global.secretsBackend.vault.enabled)) }} - - name: consul-ca-cert - secret: - {{- if $root.Values.global.tls.caCert.secretName }} - secretName: {{ $root.Values.global.tls.caCert.secretName }} - {{- else }} - secretName: {{ template "consul.fullname" $root }}-ca-cert {{- end }} - items: - - key: {{ default "tls.crt" $root.Values.global.tls.caCert.secretKey }} - path: tls.crt - {{- end }} - {{- end }} - initContainers: + {{- if (and $root.Values.global.tls.enabled $root.Values.global.tls.enableAutoEncrypt) }} + {{- include "consul.getAutoEncryptClientCA" $root | nindent 8 }} + {{- end }} # terminating-gateway-init registers the terminating gateway service with Consul. - name: terminating-gateway-init image: {{ $root.Values.global.imageK8S }} env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - {{- include "consul.consulK8sConsulServerEnvVars" $root | nindent 10 }} - {{- if $root.Values.global.enableConsulNamespaces }} - - name: CONSUL_NAMESPACE - value: {{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} - {{- if $root.Values.global.acls.manageSystemACLs }} - - name: CONSUL_LOGIN_AUTH_METHOD - value: {{ template "consul.fullname" $root }}-k8s-component-auth-method - - name: CONSUL_LOGIN_DATACENTER - value: {{ $root.Values.global.datacenter }} - - name: CONSUL_LOGIN_META - value: "component=terminating-gateway,pod=$(NAMESPACE)/$(POD_NAME)" - {{- end }} - - name: CONSUL_NODE_NAME - value: $(NODE_NAME)-virtual + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} command: - "/bin/sh" - "-ec" - | - consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${NAMESPACE} \ - -gateway-kind="terminating-gateway" \ - -proxy-id-file=/consul/service/proxy-id \ - -service-name={{ .name }} \ + {{- if $root.Values.global.acls.manageSystemACLs }} + consul-k8s-control-plane acl-init \ + -component-name=terminating-gateway/{{ template "consul.fullname" $root }}-{{ .name }} \ + -acl-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method \ + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -token-sink-file=/consul/service/acl-token \ + -consul-api-timeout={{ $root.Values.global.consulAPITimeout }} \ -log-level={{ default $root.Values.global.logLevel }} \ -log-json={{ $root.Values.global.logJSON }} + {{- end }} + + cat > /consul/service/service.hcl << EOF + service { + kind = "terminating-gateway" + name = "{{ .name }}" + id = "${POD_NAME}" + {{- if $root.Values.global.enableConsulNamespaces }} + namespace = "{{ (default $defaults.consulNamespace .consulNamespace) }}" + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + partition = "{{ $root.Values.global.adminPartitions.name }}" + {{- end }} + address = "${POD_IP}" + port = 8443 + {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} + proxy { config { envoy_prometheus_bind_addr = "${POD_IP}:20200" } } + {{- end }} + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] + } + EOF + + /consul-bin/consul services register \ + {{- if $root.Values.global.acls.manageSystemACLs }} + -token-file=/consul/service/acl-token \ + {{- end }} + /consul/service/service.hcl volumeMounts: - name: consul-service mountPath: /consul/service + - name: consul-bin + mountPath: /consul-bin {{- if $root.Values.global.tls.enabled }} - {{- if not (or (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) ($root.Values.global.secretsBackend.vault.enabled)) }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} - name: consul-ca-cert + {{- end }} mountPath: /consul/tls/ca readOnly: true {{- end }} - {{- end }} resources: requests: memory: "50Mi" @@ -217,97 +277,72 @@ spec: cpu: "50m" containers: - name: terminating-gateway - image: {{ $root.Values.global.imageConsulDataplane | quote }} - volumeMounts: - - name: consul-service - mountPath: /consul/service - readOnly: true - {{- if $root.Values.global.tls.enabled }} - {{- if not (or (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots) ($root.Values.global.secretsBackend.vault.enabled)) }} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - {{- end }} - {{- range (default $defaults.extraVolumes .extraVolumes) }} - - name: userconfig-{{ .name }} - readOnly: true - mountPath: /consul/userconfig/{{ .name }} - {{- end }} + image: {{ $root.Values.global.imageEnvoy | quote }} {{- if (default $defaults.resources .resources) }} resources: {{ toYaml (default $defaults.resources .resources) | nindent 12 }} {{- end }} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: DP_CREDENTIAL_LOGIN_META1 - value: pod=$(NAMESPACE)/$(POD_NAME) - - name: DP_CREDENTIAL_LOGIN_META2 - value: component=terminating-gateway - - name: DP_SERVICE_NODE_NAME - value: $(NODE_NAME)-virtual - command: - - consul-dataplane - args: - {{- if $root.Values.externalServers.enabled }} - - -addresses={{ $root.Values.externalServers.hosts | first }} - {{- else }} - - -addresses={{ template "consul.fullname" $root }}-server.{{ $root.Release.Namespace }}.svc - {{- end }} - {{- if $root.Values.externalServers.enabled }} - - -grpc-port={{ $root.Values.externalServers.grpcPort }} - {{- else }} - - -grpc-port=8502 - {{- end }} - - -proxy-service-id-path=/consul/service/proxy-id - {{- if $root.Values.global.enableConsulNamespaces }} - - -service-namespace={{ (default $defaults.consulNamespace .consulNamespace) }} - {{- end }} - {{- if and $root.Values.global.tls.enabled }} - {{- if (not (and $root.Values.externalServers.enabled $root.Values.externalServers.useSystemRoots)) }} - {{- if $root.Values.global.secretsBackend.vault.enabled }} - - -ca-certs=/vault/secrets/serverca.crt - {{- else }} - - -ca-certs=/consul/tls/ca/tls.crt - {{- end }} - {{- end }} - {{- if and $root.Values.externalServers.enabled $root.Values.externalServers.tlsServerName }} - - -tls-server-name={{$root.Values.externalServers.tlsServerName }} - {{- else if $root.Values.global.cloud.enabled }} - - -tls-server-name=server.{{ $root.Values.global.datacenter}}.{{ $root.Values.global.domain}} - {{- end }} + volumeMounts: + - name: consul-bin + mountPath: /consul-bin + - mountPath: /consul/service + name: consul-service + readOnly: true + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert {{- else }} - - -tls-disabled - {{- end }} - {{- if $root.Values.global.acls.manageSystemACLs }} - - -credential-type=login - - -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - - -login-auth-method={{ template "consul.fullname" $root }}-k8s-component-auth-method - {{- if $root.Values.global.adminPartitions.enabled }} - - -login-partition={{ $root.Values.global.adminPartitions.name }} - {{- end }} + - name: consul-ca-cert {{- end }} - {{- if $root.Values.global.adminPartitions.enabled }} - - -service-partition={{ $root.Values.global.adminPartitions.name }} + mountPath: /consul/tls/ca + readOnly: true {{- end }} - - -log-level={{ default $root.Values.global.logLevel }} - - -log-json={{ $root.Values.global.logJSON }} - {{- if (and $root.Values.global.metrics.enabled $root.Values.global.metrics.enableGatewayMetrics) }} - - -telemetry-prom-scrape-path=/metrics - {{- end }} - {{- if and $root.Values.externalServers.enabled $root.Values.externalServers.skipServerWatch }} - - -server-watch-disabled=true + {{- range (default $defaults.extraVolumes .extraVolumes) }} + - name: userconfig-{{ .name }} + readOnly: true + mountPath: /consul/userconfig/{{ .name }} {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if $root.Values.global.acls.manageSystemACLs }} + - name: CONSUL_HTTP_TOKEN_FILE + value: "/consul/service/acl-token" + {{- end }} + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_GRPC_ADDR + value: https://$(HOST_IP):8502 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + - name: CONSUL_GRPC_ADDR + value: $(HOST_IP):8502 + {{- end }} + command: + - /consul-bin/consul + - connect + - envoy + - -gateway=terminating + - -proxy-id=$(POD_NAME) + {{- if $root.Values.global.enableConsulNamespaces }} + - -namespace={{ default $defaults.consulNamespace .consulNamespace }} + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + - -partition={{ $root.Values.global.adminPartitions.name }} + {{- end }} livenessProbe: tcpSocket: port: 8443 @@ -327,6 +362,78 @@ spec: ports: - name: gateway containerPort: 8443 + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-ec" + - | + /consul-bin/consul services deregister \ + {{- if $root.Values.global.enableConsulNamespaces }} + -namespace={{ default $defaults.consulNamespace .consulNamespace }} \ + {{- end }} + {{- if $root.Values.global.adminPartitions.enabled }} + -partition={{ $root.Values.global.adminPartitions.name }} \ + {{- end }} + -id="${POD_NAME}" + {{- if $root.Values.global.acls.manageSystemACLs }} + - "/consul-bin/consul logout" + {{- end}} + + # consul-sidecar ensures the terminating gateway is always registered with + # the local Consul agent, even if it loses the initial registration. + - name: consul-sidecar + image: {{ $root.Values.global.imageK8S }} + volumeMounts: + - name: consul-service + mountPath: /consul/service + readOnly: true + - name: consul-bin + mountPath: /consul-bin + {{- if $root.Values.global.tls.enabled }} + {{- if $root.Values.global.tls.enableAutoEncrypt }} + - name: consul-auto-encrypt-ca-cert + {{- else }} + - name: consul-ca-cert + {{- end }} + mountPath: /consul/tls/ca + readOnly: true + {{- end }} + {{- if $root.Values.global.consulSidecarContainer }} + {{- if $root.Values.global.consulSidecarContainer.resources }} + resources: {{ toYaml $root.Values.global.consulSidecarContainer.resources | nindent 12 }} + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if $root.Values.global.tls.enabled }} + - name: CONSUL_HTTP_ADDR + value: https://$(HOST_IP):8501 + - name: CONSUL_CACERT + value: /consul/tls/ca/tls.crt + {{- else }} + - name: CONSUL_HTTP_ADDR + value: http://$(HOST_IP):8500 + {{- end }} + command: + - consul-k8s-control-plane + - consul-sidecar + - -log-level={{ $root.Values.global.logLevel }} + - -log-json={{ $root.Values.global.logJSON }} + - -service-config=/consul/service/service.hcl + - -consul-binary=/consul-bin/consul + - -consul-api-timeout={{ $root.Values.global.consulAPITimeout }} + {{- if $root.Values.global.acls.manageSystemACLs }} + - -token-file=/consul/service/acl-token + {{- end }} {{- if (default $defaults.priorityClassName .priorityClassName) }} priorityClassName: {{ (default $defaults.priorityClassName .priorityClassName) | quote }} {{- end }} diff --git a/charts/consul/templates/terminating-gateways-service.yaml b/charts/consul/templates/terminating-gateways-service.yaml deleted file mode 100644 index 124900e727..0000000000 --- a/charts/consul/templates/terminating-gateways-service.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if .Values.terminatingGateways.enabled }} - -{{- $root := . }} -{{- $defaults := .Values.terminatingGateways.defaults }} - -{{- range .Values.terminatingGateways.gateways }} - -{{- $service := .service }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "consul.fullname" $root }}-{{ .name }} - namespace: {{ $root.Release.Namespace }} - labels: - app: {{ template "consul.name" $root }} - chart: {{ template "consul.chart" $root }} - heritage: {{ $root.Release.Service }} - release: {{ $root.Release.Name }} - component: terminating-gateway -spec: - selector: - app: {{ template "consul.name" $root }} - release: "{{ $root.Release.Name }}" - component: terminating-gateway - type: ClusterIP - ports: - - port: 80 - targetPort: 8443 ---- -{{- end }} -{{- end }} diff --git a/charts/consul/templates/webhook-cert-manager-clusterrole.yaml b/charts/consul/templates/webhook-cert-manager-clusterrole.yaml index e13e2dc741..ce8dfb846c 100644 --- a/charts/consul/templates/webhook-cert-manager-clusterrole.yaml +++ b/charts/consul/templates/webhook-cert-manager-clusterrole.yaml @@ -1,5 +1,5 @@ -{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName) -}} -{{- if (and .Values.connectInject.enabled (not $hasConfiguredWebhookCertsUsingVault)) }} +{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName .Values.global.secretsBackend.vault.controllerRole .Values.global.secretsBackend.vault.controller.tlsCert.secretName .Values.global.secretsBackend.vault.controller.caCert.secretName) -}} +{{- if (and (or .Values.connectInject.enabled .Values.controller.enabled) (not $hasConfiguredWebhookCertsUsingVault)) }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/charts/consul/templates/webhook-cert-manager-clusterrolebinding.yaml b/charts/consul/templates/webhook-cert-manager-clusterrolebinding.yaml index 472ef4ee1d..90192d5966 100644 --- a/charts/consul/templates/webhook-cert-manager-clusterrolebinding.yaml +++ b/charts/consul/templates/webhook-cert-manager-clusterrolebinding.yaml @@ -1,5 +1,5 @@ -{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName) -}} -{{- if (and .Values.connectInject.enabled (not $hasConfiguredWebhookCertsUsingVault)) }} +{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName .Values.global.secretsBackend.vault.controllerRole .Values.global.secretsBackend.vault.controller.tlsCert.secretName .Values.global.secretsBackend.vault.controller.caCert.secretName) -}} +{{- if (and (or .Values.connectInject.enabled .Values.controller.enabled) (not $hasConfiguredWebhookCertsUsingVault)) }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/charts/consul/templates/webhook-cert-manager-configmap.yaml b/charts/consul/templates/webhook-cert-manager-configmap.yaml index 293dd32d9f..61520fe230 100644 --- a/charts/consul/templates/webhook-cert-manager-configmap.yaml +++ b/charts/consul/templates/webhook-cert-manager-configmap.yaml @@ -1,5 +1,5 @@ -{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName) -}} -{{- if (and .Values.connectInject.enabled (not $hasConfiguredWebhookCertsUsingVault)) }} +{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName .Values.global.secretsBackend.vault.controllerRole .Values.global.secretsBackend.vault.controller.tlsCert.secretName .Values.global.secretsBackend.vault.controller.caCert.secretName) -}} +{{- if (and (or .Values.connectInject.enabled .Values.controller.enabled) (not $hasConfiguredWebhookCertsUsingVault)) }} apiVersion: v1 kind: ConfigMap metadata: @@ -14,6 +14,7 @@ metadata: data: webhook-config.json: |- [ + {{- if .Values.connectInject.enabled }} { "name": "{{ template "consul.fullname" . }}-connect-injector", "tlsAutoHosts": [ @@ -24,6 +25,19 @@ data: ], "secretName": "{{ template "consul.fullname" . }}-connect-inject-webhook-cert", "secretNamespace": "{{ .Release.Namespace }}" + }{{- if and .Values.controller.enabled }},{{- end }}{{- end }} + {{- if and .Values.controller.enabled }} + { + "name": "{{ template "consul.fullname" . }}-controller", + "tlsAutoHosts": [ + "{{ template "consul.fullname" . }}-controller-webhook", + "{{ template "consul.fullname" . }}-controller-webhook.{{ .Release.Namespace }}", + "{{ template "consul.fullname" . }}-controller-webhook.{{ .Release.Namespace }}.svc", + "{{ template "consul.fullname" . }}-controller-webhook.{{ .Release.Namespace }}.svc.cluster.local" + ], + "secretName": "{{ template "consul.fullname" . }}-controller-webhook-cert", + "secretNamespace": "{{ .Release.Namespace }}" } + {{- end }} ] {{- end }} diff --git a/charts/consul/templates/webhook-cert-manager-deployment.yaml b/charts/consul/templates/webhook-cert-manager-deployment.yaml index dd93c039d2..838f31b923 100644 --- a/charts/consul/templates/webhook-cert-manager-deployment.yaml +++ b/charts/consul/templates/webhook-cert-manager-deployment.yaml @@ -1,5 +1,5 @@ -{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName) -}} -{{- if (and .Values.connectInject.enabled (not $hasConfiguredWebhookCertsUsingVault)) }} +{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName .Values.global.secretsBackend.vault.controllerRole .Values.global.secretsBackend.vault.controller.tlsCert.secretName .Values.global.secretsBackend.vault.controller.caCert.secretName) -}} +{{- if (and (or .Values.connectInject.enabled .Values.controller.enabled) (not $hasConfiguredWebhookCertsUsingVault)) }} apiVersion: apps/v1 kind: Deployment metadata: diff --git a/charts/consul/templates/webhook-cert-manager-podsecuritypolicy.yaml b/charts/consul/templates/webhook-cert-manager-podsecuritypolicy.yaml index 4d685edc39..833d902343 100644 --- a/charts/consul/templates/webhook-cert-manager-podsecuritypolicy.yaml +++ b/charts/consul/templates/webhook-cert-manager-podsecuritypolicy.yaml @@ -1,6 +1,5 @@ -{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName) -}} -{{- if (and .Values.global.enablePodSecurityPolicies (or (and (ne (.Values.connectInject.enabled | toString) "-") .Values.connectInject.enabled) (and (eq (.Values.connectInject.enabled | toString) "-") .Values.global.enabled))) }} -{{- if (and .Values.connectInject.enabled (not $hasConfiguredWebhookCertsUsingVault)) }} +{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName .Values.global.secretsBackend.vault.controllerRole .Values.global.secretsBackend.vault.controller.tlsCert.secretName .Values.global.secretsBackend.vault.controller.caCert.secretName) -}} +{{- if (and (or .Values.connectInject.enabled .Values.controller.enabled) .Values.global.enablePodSecurityPolicies (not $hasConfiguredWebhookCertsUsingVault)) }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: @@ -40,4 +39,3 @@ spec: rule: 'RunAsAny' readOnlyRootFilesystem: false {{- end }} -{{- end }} diff --git a/charts/consul/templates/webhook-cert-manager-serviceaccount.yaml b/charts/consul/templates/webhook-cert-manager-serviceaccount.yaml index 68c54f3c27..e1680d6e50 100644 --- a/charts/consul/templates/webhook-cert-manager-serviceaccount.yaml +++ b/charts/consul/templates/webhook-cert-manager-serviceaccount.yaml @@ -1,5 +1,5 @@ -{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName) -}} -{{- if (and .Values.connectInject.enabled (not $hasConfiguredWebhookCertsUsingVault)) }} +{{ $hasConfiguredWebhookCertsUsingVault := (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.connectInjectRole .Values.global.secretsBackend.vault.connectInject.tlsCert.secretName .Values.global.secretsBackend.vault.connectInject.caCert.secretName .Values.global.secretsBackend.vault.controllerRole .Values.global.secretsBackend.vault.controller.tlsCert.secretName .Values.global.secretsBackend.vault.controller.caCert.secretName) -}} +{{- if (and (or .Values.connectInject.enabled .Values.controller.enabled) (not $hasConfiguredWebhookCertsUsingVault)) }} apiVersion: v1 kind: ServiceAccount metadata: diff --git a/charts/consul/test/docker/Test.dockerfile b/charts/consul/test/docker/Test.dockerfile index 85f3a607e3..d60e8b0a24 100644 --- a/charts/consul/test/docker/Test.dockerfile +++ b/charts/consul/test/docker/Test.dockerfile @@ -6,7 +6,7 @@ # a script to configure kubectl, potentially install Helm, and run the tests # manually. This image only has the dependencies pre-installed. -FROM cimg/go:1.19 +FROM cimg/go:1.18 # change the user to root so we can install stuff USER root @@ -61,7 +61,7 @@ RUN curl -sSL https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/o && mv /tmp/oc /usr/local/bin/oc # AWS CLI -RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" \ +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.8.3.zip" -o "awscliv2.zip" \ && unzip awscliv2.zip \ && ./aws/install --bin-dir /usr/local/bin \ && rm awscliv2.zip \ @@ -73,4 +73,4 @@ RUN curl -Lo aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-au && mv ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator # change the user back to what circleci/golang image has -USER circleci +USER circleci \ No newline at end of file diff --git a/charts/consul/test/terraform/aks/main.tf b/charts/consul/test/terraform/aks/main.tf index 1db5145531..784a60d9ef 100644 --- a/charts/consul/test/terraform/aks/main.tf +++ b/charts/consul/test/terraform/aks/main.tf @@ -1,5 +1,5 @@ provider "azurerm" { - version = "3.40.0" + version = "2.90.0" features {} } @@ -40,13 +40,12 @@ resource "azurerm_virtual_network_peering" "default" { } resource "azurerm_kubernetes_cluster" "default" { - count = var.cluster_count - name = "consul-k8s-${random_id.suffix[count.index].dec}" - location = azurerm_resource_group.default[count.index].location - resource_group_name = azurerm_resource_group.default[count.index].name - dns_prefix = "consul-k8s-${random_id.suffix[count.index].dec}" - kubernetes_version = "1.24.6" - role_based_access_control_enabled = true + count = var.cluster_count + name = "consul-k8s-${random_id.suffix[count.index].dec}" + location = azurerm_resource_group.default[count.index].location + resource_group_name = azurerm_resource_group.default[count.index].name + dns_prefix = "consul-k8s-${random_id.suffix[count.index].dec}" + kubernetes_version = "1.22.11" // We're setting the network plugin and other network properties explicitly // here even though they are the same as defaults to ensure that none of these CIDRs @@ -78,6 +77,10 @@ resource "azurerm_kubernetes_cluster" "default" { client_secret = var.client_secret } + role_based_access_control { + enabled = true + } + tags = var.tags } diff --git a/charts/consul/test/terraform/aks/variables.tf b/charts/consul/test/terraform/aks/variables.tf index bb9dbef537..1651ce7b09 100644 --- a/charts/consul/test/terraform/aks/variables.tf +++ b/charts/consul/test/terraform/aks/variables.tf @@ -27,7 +27,7 @@ variable "cluster_count" { } variable "tags" { - type = map(any) + type = map default = {} description = "Tags to attach to the created resources." } diff --git a/charts/consul/test/terraform/eks/main.tf b/charts/consul/test/terraform/eks/main.tf index ca48a5a8fe..9ccc2cdd2b 100644 --- a/charts/consul/test/terraform/eks/main.tf +++ b/charts/consul/test/terraform/eks/main.tf @@ -3,8 +3,8 @@ provider "aws" { region = var.region assume_role { - role_arn = var.role_arn - duration = "2700s" + role_arn = var.role_arn + duration_seconds = 2700 } } @@ -58,9 +58,8 @@ module "eks" { kubeconfig_api_version = "client.authentication.k8s.io/v1beta1" cluster_name = "consul-k8s-${random_id.suffix[count.index].dec}" - cluster_version = "1.23" + cluster_version = "1.21" subnets = module.vpc[count.index].private_subnets - enable_irsa = true vpc_id = module.vpc[count.index].vpc_id @@ -81,47 +80,6 @@ module "eks" { tags = var.tags } -resource "aws_iam_role" "csi-driver-role" { - count = var.cluster_count - assume_role_policy = jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Effect = "Allow", - Action = "sts:AssumeRoleWithWebIdentity", - Principal = { - Federated = module.eks[count.index].oidc_provider_arn - }, - Condition = { - StringEquals = { - join(":", [trimprefix(module.eks[count.index].cluster_oidc_issuer_url, "https://"), "aud"]) = ["sts.amazonaws.com"], - join(":", [trimprefix(module.eks[count.index].cluster_oidc_issuer_url, "https://"), "sub"]) = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"], - } - } - } - ] - }) -} - -data "aws_iam_policy" "csi-driver-policy" { - name = "AmazonEBSCSIDriverPolicy" -} - -resource "aws_iam_role_policy_attachment" "csi" { - count = var.cluster_count - role = aws_iam_role.csi-driver-role[count.index].name - policy_arn = data.aws_iam_policy.csi-driver-policy.arn -} - -resource "aws_eks_addon" "csi-driver" { - count = var.cluster_count - cluster_name = module.eks[count.index].cluster_id - addon_name = "aws-ebs-csi-driver" - addon_version = "v1.15.0-eksbuild.1" - service_account_role_arn = aws_iam_role.csi-driver-role[count.index].arn - resolve_conflicts = "OVERWRITE" -} - data "aws_eks_cluster" "cluster" { count = var.cluster_count name = module.eks[count.index].cluster_id diff --git a/charts/consul/test/terraform/eks/variables.tf b/charts/consul/test/terraform/eks/variables.tf index 05f383168b..361a5f5c45 100644 --- a/charts/consul/test/terraform/eks/variables.tf +++ b/charts/consul/test/terraform/eks/variables.tf @@ -21,7 +21,7 @@ variable "role_arn" { } variable "tags" { - type = map(any) + type = map default = {} description = "Tags to attach to the created resources." } diff --git a/charts/consul/test/terraform/gke/main.tf b/charts/consul/test/terraform/gke/main.tf index 1bd574ce2c..1574df36b3 100644 --- a/charts/consul/test/terraform/gke/main.tf +++ b/charts/consul/test/terraform/gke/main.tf @@ -1,4 +1,4 @@ -provider "google" { +provider "google-beta" { project = var.project version = "~> 3.49.0" } @@ -10,12 +10,13 @@ resource "random_id" "suffix" { data "google_container_engine_versions" "main" { location = var.zone - version_prefix = "1.25." + version_prefix = "1.23." } resource "google_container_cluster" "cluster" { - provider = "google" - count = var.cluster_count + provider = "google-beta" + + count = var.cluster_count name = "consul-k8s-${random_id.suffix[count.index].dec}" project = var.project @@ -27,6 +28,10 @@ resource "google_container_cluster" "cluster" { tags = ["consul-k8s-${random_id.suffix[count.index].dec}"] machine_type = "e2-standard-4" } + pod_security_policy_config { + enabled = true + } + resource_labels = var.labels } diff --git a/charts/consul/test/terraform/gke/variables.tf b/charts/consul/test/terraform/gke/variables.tf index ef4a429116..04d214cedb 100644 --- a/charts/consul/test/terraform/gke/variables.tf +++ b/charts/consul/test/terraform/gke/variables.tf @@ -30,7 +30,7 @@ variable "cluster_count" { } variable "labels" { - type = map(any) + type = map default = {} description = "Labels to attach to the created resources." } diff --git a/charts/consul/test/terraform/openshift/variables.tf b/charts/consul/test/terraform/openshift/variables.tf index 1df518f8ed..f2479e3229 100644 --- a/charts/consul/test/terraform/openshift/variables.tf +++ b/charts/consul/test/terraform/openshift/variables.tf @@ -9,7 +9,7 @@ variable "cluster_count" { } variable "tags" { - type = map(any) + type = map default = {} description = "Tags to attach to the created resources." } diff --git a/charts/consul/test/unit/api-gateway-controller-deployment.bats b/charts/consul/test/unit/api-gateway-controller-deployment.bats index 2dbcb9e0f1..543cdaecd7 100755 --- a/charts/consul/test/unit/api-gateway-controller-deployment.bats +++ b/charts/consul/test/unit/api-gateway-controller-deployment.bats @@ -15,6 +15,7 @@ load _helpers -s templates/api-gateway-controller-deployment.yaml \ --set 'apiGateway.enabled=true' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "apiGateway.image must be set to enable api gateway" ]] } @@ -189,7 +190,7 @@ load _helpers [ "${actual}" = "true" ] } -@test "apiGateway/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled with clients" { +@test "apiGateway/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/api-gateway-controller-deployment.yaml \ @@ -197,26 +198,11 @@ load _helpers --set 'apiGateway.image=foo' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "apiGateway/Deployment: consul-ca-cert volumeMount is added when TLS with auto-encrypt is enabled without clients" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'client.enabled=false' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert") | length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - @test "apiGateway/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ @@ -299,18 +285,6 @@ load _helpers [ "${actual}" = "true" ] } -@test "apiGateway/Deployment: CONSUL_LOGIN_DATACENTER is set when acls are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[2].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - @test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true" { cd `chart_dir` local object=$(helm template \ @@ -330,27 +304,12 @@ load _helpers [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[0].name] | any(contains("NAMESPACE"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[1].name] | any(contains("POD_NAME"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[2].name] | any(contains("CONSUL_LOGIN_META"))' | tee /dev/stderr) + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[2].value] | any(contains("component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[3].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '[.env[8].value] | any(contains("5s"))' | tee /dev/stderr) + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] } @@ -371,52 +330,26 @@ load _helpers [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "NAMESPACE") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "POD_NAME") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.name"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_LOGIN_META") | [.value] | any(contains("component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_ADDRESSES") | [.value] | any(contains("release-name-consul-server.default.svc"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_GRPC_PORT") | [.value] | any(contains("8502"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_HTTP_PORT") | [.value] | any(contains("8501"))' | tee /dev/stderr) + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_DATACENTER") | [.value] | any(contains("dc1"))' | tee /dev/stderr) + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_API_TIMEOUT") | [.value] | any(contains("5s"))' | tee /dev/stderr) + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_USE_TLS") | [.value] | any(contains("true"))' | tee /dev/stderr) + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_CACERT_FILE") | [.value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo $object | - yq '.volumeMounts[] | select(.name == "consul-ca-cert") | [.mountPath] | any(contains("/consul/tls/ca"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.volumeMounts[] | select(.name == "consul-data") | [.mountPath] | any(contains("/consul/login"))' | tee /dev/stderr) - [ "${actual}" = "true" ] } @test "apiGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { @@ -438,87 +371,32 @@ load _helpers [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.command | any(contains("-auth-method-name=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "NAMESPACE") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.namespace"))' | tee /dev/stderr) + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "POD_NAME") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.name"))' | tee /dev/stderr) + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_LOGIN_META") | [.value] | any(contains("component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_ADDRESSES") | [.value] | any(contains("release-name-consul-server.default.svc"))' | tee /dev/stderr) + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_GRPC_PORT") | [.value] | any(contains("8502"))' | tee /dev/stderr) + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_HTTP_PORT") | [.value] | any(contains("8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_DATACENTER") | [.value] | any(contains("dc1"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_API_TIMEOUT") | [.value] | any(contains("5s"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_PARTITION") | [.value] | any(contains("default"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_LOGIN_PARTITION") | [.value] | any(contains("default"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_USE_TLS") | [.value] | any(contains("true"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_CACERT_FILE") | [.value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.volumeMounts[] | select(.name == "consul-ca-cert") | [.mountPath] | any(contains("/consul/tls/ca"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.volumeMounts[] | select(.name == "consul-data") | [.mountPath] | any(contains("/consul/login"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: consul login datacenter is set to primary when when federation enabled in non-primary datacenter" { - cd `chart_dir` - local object=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'meshGateway.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.datacenter=dc1' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc2' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[3].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[3].value] | any(contains("dc2"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -570,11 +448,11 @@ load _helpers [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.command | any(contains("-auth-method-name=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[3].value] | any(contains("dc1"))' | tee /dev/stderr) + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -595,51 +473,24 @@ load _helpers [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "NAMESPACE") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.namespace"))' | tee /dev/stderr) + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "POD_NAME") | [.valueFrom.fieldRef.fieldPath] | any(contains("metadata.name"))' | tee /dev/stderr) + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_LOGIN_META") | [.value] | any(contains("component=api-gateway-controller,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_ADDRESSES") | [.value] | any(contains("release-name-consul-server.default.svc"))' | tee /dev/stderr) + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_GRPC_PORT") | [.value] | any(contains("8502"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_HTTP_PORT") | [.value] | any(contains("8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_DATACENTER") | [.value] | any(contains("dc1"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_API_TIMEOUT") | [.value] | any(contains("5s"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_USE_TLS") | [.value] | any(contains("true"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.env[] | select(.name == "CONSUL_CACERT_FILE") | [.value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.volumeMounts[] | select(.name == "consul-ca-cert") | [.mountPath] | any(contains("/consul/tls/ca"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.volumeMounts[] | select(.name == "consul-data") | [.mountPath] | any(contains("/consul/login"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1054,525 +905,6 @@ load _helpers [ "${actual}" = "bar" ] } -#-------------------------------------------------------------------- -# global.cloud - -@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "apiGateway/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -#-------------------------------------------------------------------- -# CONSUL_HTTP_SSL - -@test "apiGateway/Deployment: CONSUL_HTTP_SSL set correctly when not using TLS." { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=false' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[2].value' | tee /dev/stderr) - [ "${actual}" = "\"false\"" ] -} - -@test "apiGateway/Deployment: CONSUL_HTTP_SSL set correctly when using TLS." { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[3].value' | tee /dev/stderr) - [ "${actual}" = "\"true\"" ] -} - -#-------------------------------------------------------------------- -# CONSUL_HTTP_ADDR - -@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with external servers, TLS, and no clients." { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=true' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.httpsPort=8501' \ - --set 'server.enabled=false' \ - --set 'client.enabled=false' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[2].value] | any(contains("external-consul.host:8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with external servers, no TLS, and no clients" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.httpsPort=8500' \ - --set 'server.enabled=false' \ - --set 'client.enabled=false' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[1].value] | any(contains("external-consul.host:8500"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with local servers, TLS, and clients" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=true' \ - --set 'client.enabled=true' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[2].value] | any(contains("$(HOST_IP):8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with local servers, no TLS, and clients" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=false' \ - --set 'client.enabled=true' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[1].value] | any(contains("$(HOST_IP):8500"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with local servers, TLS, and no clients" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=true' \ - --set 'client.enabled=false' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[2].value] | any(contains("release-name-consul-server:8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_HTTP_ADDR set correctly with local servers, no TLS, and no clients" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=false' \ - --set 'client.enabled=false' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[1].value] | any(contains("release-name-consul-server:8500"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# externalServers tlsServerName - -@test "apiGateway/Deployment: CONSUL_TLS_SERVER_NAME can be set for externalServers" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=true' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.httpsPort=8501' \ - --set 'externalServers.tlsServerName=hashi' \ - --set 'server.enabled=false' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[4].value == "hashi"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# Admin Partitions - -@test "apiGateway/Deployment: CONSUL_PARTITION is set when using admin partitions" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.enableConsulNamespaces=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=hashi' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[3].value == "hashi"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_LOGIN_PARTITION is set when using admin partitions with ACLs" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.enableConsulNamespaces=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=hashi' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[6].value == "hashi"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_DYNAMIC_SERVER_DISCOVERY is set when not using clients" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'client.enabled=false' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[3].value == "true"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_DYNAMIC_SERVER_DISCOVERY is not set when using clients" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'client.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[3]' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "apiGateway/Deployment: CONSUL_CACERT is set when using tls and clients even when useSystemRoots is true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.useSystemRoots=true' \ - --set 'client.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[0].name == "CONSUL_CACERT"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_CACERT is set when using tls and internal servers" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=true' \ - --set 'server.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[0].name == "CONSUL_CACERT"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/Deployment: CONSUL_CACERT is not set when using tls and useSystemRoots" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.useSystemRoots=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[0].name == "CONSUL_CACERT"' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "apiGateway/Deployment: consul-ca-cert volume mount is not set when using externalServers and useSystemRoots" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.useSystemRoots=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "apiGateway/Deployment: consul-ca-cert volume mount is not set on acl-init when using externalServers and useSystemRoots" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.useSystemRoots=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[1].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "apiGateway/Deployment: consul-auto-encrypt-ca-cert volume mount is set when tls.enabled, client.enabled, externalServers, useSystemRoots, and autoencrypt" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=bar' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'client.enabled=true' \ - --set 'server.enabled=false' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.useSystemRoots=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | .mountPath' | tee /dev/stderr) - [ "${actual}" = '"/consul/tls/ca"' ] -} - #-------------------------------------------------------------------- # extraLabels diff --git a/charts/consul/test/unit/api-gateway-gatewayclassconfig.bats b/charts/consul/test/unit/api-gateway-gatewayclassconfig.bats index 742f31afa0..ae1b5f7aa6 100644 --- a/charts/consul/test/unit/api-gateway-gatewayclassconfig.bats +++ b/charts/consul/test/unit/api-gateway-gatewayclassconfig.bats @@ -66,121 +66,3 @@ load _helpers yq '.spec.deployment.minInstances == 3' | tee /dev/stderr) [ "${actual}" = "true" ] } - -@test "apiGateway/GatewayClassConfig: imageEnvoy can be set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-gatewayclassconfig.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'apiGateway.imageEnvoy=bar' \ - . | tee /dev/stderr | - yq '.spec.image.envoy' | tee /dev/stderr) - [ "${actual}" = "\"bar\"" ] -} - -#-------------------------------------------------------------------- -# Consul server address - -@test "apiGateway/GatewayClassConfig: Consul server address set with external servers and no clients." { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-gatewayclassconfig.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'server.enabled=false' \ - --set 'client.enabled=false' \ - . | tee /dev/stderr | - yq '.spec.consul.address == "external-consul.host"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/GatewayClassConfig: Consul server address set with external servers and clients." { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-gatewayclassconfig.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'server.enabled=false' \ - --set 'client.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.consul.address == "$(HOST_IP)"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/GatewayClassConfig: Consul server address set with local servers and no clients." { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-gatewayclassconfig.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'client.enabled=false' \ - . | tee /dev/stderr | - yq '.spec.consul.address == "release-name-consul-server.default.svc"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "apiGateway/GatewayClassConfig: Consul server address set with local servers and clients." { - cd `chart_dir` - local actual=$(helm template \ - -s templates/api-gateway-gatewayclassconfig.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'client.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.consul.address == "$(HOST_IP)"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# externalServers ports - -@test "apiGateway/GatewayClassConfig: ports for externalServers when not using TLS." { - cd `chart_dir` - local ports=$(helm template \ - -s templates/api-gateway-gatewayclassconfig.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.grpcPort=1234' \ - --set 'externalServers.httpsPort=5678' \ - --set 'server.enabled=false' \ - . | tee /dev/stderr | - yq '.spec.consul.ports' | tee /dev/stderr) - - local actual - actual=$(echo $ports | jq -r '.grpc' | tee /dev/stderr) - [ "${actual}" = "1234" ] - - actual=$(echo $ports | jq -r '.http' | tee /dev/stderr) - [ "${actual}" = "5678" ] -} - -@test "apiGateway/GatewayClassConfig: ports for externalServers when using TLS." { - cd `chart_dir` - local ports=$(helm template \ - -s templates/api-gateway-gatewayclassconfig.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=external-consul.host' \ - --set 'externalServers.grpcPort=1234' \ - --set 'externalServers.httpsPort=5678' \ - --set 'server.enabled=false' \ - . | tee /dev/stderr | - yq '.spec.consul.ports' | tee /dev/stderr) - - local actual - actual=$(echo $ports | jq -r '.grpc' | tee /dev/stderr) - [ "${actual}" = "1234" ] - - actual=$(echo $ports | jq -r '.http' | tee /dev/stderr) - [ "${actual}" = "5678" ] -} diff --git a/charts/consul/test/unit/client-config-configmap.bats b/charts/consul/test/unit/client-config-configmap.bats index 5fc4a186d9..b1b6035429 100755 --- a/charts/consul/test/unit/client-config-configmap.bats +++ b/charts/consul/test/unit/client-config-configmap.bats @@ -2,11 +2,19 @@ load _helpers +@test "client/ConfigMap: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-config-configmap.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "client/ConfigMap: enable with global.enabled false" { cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'global.enabled=false' \ --set 'client.enabled=true' \ . | tee /dev/stderr | @@ -14,11 +22,10 @@ load _helpers [ "${actual}" = "true" ] } -@test "client/ConfigMap: disable with client.enabled false" { +@test "client/ConfigMap: disable with client.enabled" { cd `chart_dir` assert_empty helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'client.enabled=false' \ . } @@ -35,7 +42,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'client.extraConfig="{\"hello\": \"world\"}"' \ . | tee /dev/stderr | yq '.data["extra-from-values.json"] | match("world") | length > 1' | tee /dev/stderr) @@ -49,7 +55,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq '.data["central-config.json"] | contains("enable_central_service_config")' | tee /dev/stderr) @@ -60,7 +65,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq '.data["config.json"] | contains("check_update_interval")' | tee /dev/stderr) @@ -74,7 +78,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -89,7 +92,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-config-configmap.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.data["client.json"]' | jq -r .auto_reload_config | tee /dev/stderr) diff --git a/charts/consul/test/unit/client-daemonset.bats b/charts/consul/test/unit/client-daemonset.bats index 4c38207635..c7e533cc08 100755 --- a/charts/consul/test/unit/client-daemonset.bats +++ b/charts/consul/test/unit/client-daemonset.bats @@ -2,6 +2,15 @@ load _helpers +@test "client/DaemonSet: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "client/DaemonSet: enabled with global.enabled=false and client.enabled=true" { cd `chart_dir` local actual=$(helm template \ @@ -33,7 +42,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.image=foo' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) @@ -44,7 +52,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.image=foo' \ --set 'client.image=bar' \ . | tee /dev/stderr | @@ -56,7 +63,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.updateStrategy' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -69,7 +75,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.replicas=3' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) @@ -88,7 +93,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.replicas=3' \ --set 'server.ports.serflan.port=9301' \ . | tee /dev/stderr | @@ -108,7 +112,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ @@ -128,7 +131,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ @@ -147,7 +149,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("grpc"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -157,7 +158,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.grpc=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("grpc"))' | tee /dev/stderr) @@ -171,7 +171,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-node-meta=pod-name:${HOSTNAME}"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -181,7 +180,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-node-meta=host-ip:${HOST_IP}"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -191,7 +189,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.nodeMeta.pod-name=foobar' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-node-meta=pod-name:foobar"))' | tee /dev/stderr) @@ -202,7 +199,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.nodeMeta.cluster-name=cluster01' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-node-meta=cluster-name:cluster01"))' | tee /dev/stderr) @@ -216,7 +212,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) [ "${actual}" = '{"limits":{"cpu":"100m","memory":"100Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] @@ -226,7 +221,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.resources.foo=bar' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].resources.foo' | tee /dev/stderr) @@ -238,7 +232,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.resources=foo: bar' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].resources.foo' | tee /dev/stderr) @@ -254,7 +247,6 @@ load _helpers # check that the extra-config volume is defined local volume_name=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.volumes[] | select(.name == "extra-config") | .name' | tee /dev/stderr) [ "${volume_name}" = "extra-config" ] @@ -262,7 +254,6 @@ load _helpers # check that the consul container mounts the volume at /consul/extra-config local mount_path=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[] | select(.name == "consul") | .volumeMounts[] | select(.name == "extra-config") | .mountPath' | tee /dev/stderr) [ "${mount_path}" = "/consul/extra-config" ] @@ -277,7 +268,6 @@ load _helpers # Test that it defines it local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -294,7 +284,6 @@ load _helpers # Test that it mounts it local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -311,7 +300,6 @@ load _helpers # Doesn't load it local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -325,7 +313,6 @@ load _helpers # Test that it defines it local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=secret' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -342,7 +329,6 @@ load _helpers # Test that it mounts it local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -359,7 +345,6 @@ load _helpers # Doesn't load it local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ . | tee /dev/stderr | @@ -371,7 +356,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraVolumes[0].type=configMap' \ --set 'client.extraVolumes[0].name=foo' \ --set 'client.extraVolumes[0].load=true' \ @@ -387,7 +371,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -397,7 +380,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.nodeSelector=testing' \ . | tee /dev/stderr | yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) @@ -411,7 +393,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec | .affinity? == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -421,7 +402,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.affinity=foobar' \ . | tee /dev/stderr | yq '.spec.template.spec | .affinity == "foobar"' | tee /dev/stderr) @@ -435,7 +415,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.priorityClassName' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -445,7 +424,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.priorityClassName=testing' \ . | tee /dev/stderr | yq -r '.spec.template.spec.priorityClassName' | tee /dev/stderr) @@ -459,7 +437,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component") | del(."hasDNS")' | tee /dev/stderr) [ "${actual}" = "{}" ] @@ -469,7 +446,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraLabels.foo=bar' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) @@ -480,7 +456,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraLabels.foo=bar' \ --set 'client.extraLabels.baz=qux' \ . | tee /dev/stderr) @@ -528,7 +503,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."consul.hashicorp.com/config-checksum")' | tee /dev/stderr) [ "${actual}" = "{}" ] @@ -538,7 +512,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.annotations=foo: bar' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) @@ -552,7 +525,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ . | tee /dev/stderr | @@ -564,7 +536,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ . | tee /dev/stderr | @@ -576,7 +547,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ . | tee /dev/stderr | @@ -588,7 +558,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ . | tee /dev/stderr | @@ -601,7 +570,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.metrics.enabled=true' \ --set 'global.metrics.enableAgentMetrics=true' \ --set 'global.metrics.agentMetricsRetentionTime=5m' \ @@ -618,28 +586,25 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = f9be2829fed80a127e3752e10be32f29c2f9ca0ea548abcf3d4fc2c985cb7201 ] + [ "${actual}" = 55f93d04c3f0b85c7ef2869e4b8623296025a8388c881eab63be9f2dc70bafd6 ] } @test "client/DaemonSet: config-checksum annotation changes when extraConfig is provided" { cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraConfig="{\"hello\": \"world\"}"' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = e9fb5f0b4ff4e36a89e8ca2dc1aed2072306e0dd6d4cc60b3edf155cf8dbe2e9 ] + [ "${actual}" = 891c0e207e1e0259ffb150d7364b667b7b12786ce37af3dd89f366bc6d2f21aa ] } @test "client/DaemonSet: config-checksum annotation changes when connectInject.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) @@ -653,7 +618,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec | .tolerations? == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -663,7 +627,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.tolerations=foobar' \ . | tee /dev/stderr | yq '.spec.template.spec.tolerations == "foobar"' | tee /dev/stderr) @@ -677,7 +640,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY")' | tee /dev/stderr) [ "${actual}" = "" ] @@ -687,7 +649,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.autoGenerate=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | .valueFrom.secretKeyRef | [.name=="release-name-consul-gossip-encryption-key", .key="key"] | all' | tee /dev/stderr) @@ -698,7 +659,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.autoGenerate=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .command | any(contains("-encrypt=\"${GOSSIP_KEY}\""))' \ @@ -710,7 +670,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.secretKey=bar' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) @@ -721,7 +680,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.secretName=foo' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name == "GOSSIP_KEY") | length > 0' | tee /dev/stderr) @@ -732,7 +690,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.secretKey=foo' \ --set 'global.gossipEncryption.secretName=bar' \ . | tee /dev/stderr | @@ -744,7 +701,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[] | select(.name=="consul") | .command | join(" ") | contains("encrypt")' | tee /dev/stderr) [ "${actual}" = "false" ] @@ -754,7 +710,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.gossipEncryption.secretKey=foo' \ --set 'global.gossipEncryption.secretName=bar' \ . | tee /dev/stderr | @@ -769,7 +724,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) @@ -780,7 +734,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-key")' | tee /dev/stderr) @@ -791,7 +744,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "consul-client-cert")' | tee /dev/stderr) @@ -802,7 +754,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8501)' | tee /dev/stderr) @@ -813,7 +764,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].ports[] | select (.containerPort == 8501)' | tee /dev/stderr) @@ -824,7 +774,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=false' \ . | tee /dev/stderr | @@ -836,7 +785,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=true' \ . | tee /dev/stderr | @@ -848,7 +796,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("http://127.0.0.1:8500")' | tee /dev/stderr) @@ -859,7 +806,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("https://127.0.0.1:8501")' | tee /dev/stderr) @@ -870,7 +816,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].readinessProbe.exec.command | join(" ") | contains("-k")' | tee /dev/stderr) @@ -881,7 +826,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=true' \ . | tee /dev/stderr | @@ -889,32 +833,10 @@ load _helpers [ "${actual}" = "true" ] } -@test "client/DaemonSet: TLS GRPC port is configured" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | join(" ") | contains("ports { grpc = -1, grpc_tls = 8502 }")' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "client/DaemonSet: non-TLS GRPC port is configured when TLS is disabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | join(" ") | contains("ports { grpc = 8502, grpc_tls = -1 }")' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - @test "client/DaemonSet: init container is created when global.tls.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "client-tls-init") | length > 0' | tee /dev/stderr) @@ -925,40 +847,33 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_ADDRESSES") | .value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-server.default.svc" ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_PORT") | .value' | tee /dev/stderr) - [ "${actual}" = "8501" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://release-name-consul-server.default.svc:8501" ] } @test "client/DaemonSet: Adds consul envvars CONSUL_HTTP_ADDR on acl-init init container when ACLs are enabled and tls is not enabled" { cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_ADDRESSES") | .value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-server.default.svc" ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_PORT") | .value' | tee /dev/stderr) - [ "${actual}" = "8500" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://release-name-consul-server.default.svc:8500" ] } @test "client/DaemonSet: Does not add consul envvars CONSUL_CACERT on acl-init init container when ACLs are enabled and tls is not enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[0].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) @@ -970,13 +885,12 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT_FILE") | .value' | tee /dev/stderr) + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @@ -984,7 +898,6 @@ load _helpers cd `chart_dir` local has_acl_init_container=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | @@ -994,7 +907,6 @@ load _helpers local has_tls_init_container=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | @@ -1007,7 +919,6 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) @@ -1024,7 +935,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) @@ -1044,10 +954,8 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.peering.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) @@ -1067,10 +975,8 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.peering.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | join(" ")' | tee /dev/stderr) @@ -1090,7 +996,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.verify=false' \ . | tee /dev/stderr | @@ -1111,7 +1016,6 @@ load _helpers cd `chart_dir` local spec=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.caCert.secretName=foo-ca-cert' \ --set 'global.tls.caCert.secretKey=key' \ @@ -1145,7 +1049,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1157,7 +1060,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1183,7 +1085,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1195,7 +1096,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1207,7 +1107,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1219,7 +1118,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | @@ -1234,7 +1132,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraEnvironmentVars.custom_proxy=fakeproxy' \ --set 'client.extraEnvironmentVars.no_proxy=custom_no_proxy' \ . | tee /dev/stderr | @@ -1256,7 +1153,6 @@ load _helpers cd `chart_dir` local volume_name=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.volumes[] | select(.name == "aclconfig") | .name' | tee /dev/stderr) @@ -1267,7 +1163,6 @@ load _helpers cd `chart_dir` local mount_path=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[] | select(.name == "consul") | .volumeMounts[] | select(.name == "aclconfig") | .mountPath' | tee /dev/stderr) @@ -1278,7 +1173,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("/consul/aclconfig"))' | tee /dev/stderr) @@ -1289,7 +1183,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) @@ -1298,36 +1191,24 @@ load _helpers yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo $object | +local actual=$(echo $object | yq -r '.command | any(contains("secret-name"))' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$(echo $object | - yq -r '.env[2].name | contains("CONSUL_ADDRESSES")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[2].value | contains("release-name-consul-server.default.svc")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[7].name | contains("CONSUL_LOGIN_AUTH_METHOD")' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | - yq -r '.env[7].value | contains("release-name-consul-k8s-component-auth-method")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("k8s-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] local actual=$(echo $object | - yq -r '.env[8].name | contains("CONSUL_LOGIN_META")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("component-name=client"))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.env[8].value | contains("component=client")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("init-type=\"client\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.command | any(contains("init-type=\"client\""))' | tee /dev/stderr) + yq -r '.command | any(contains("acl-auth-method=\"release-name-consul-k8s-component-auth-method\""))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | @@ -1343,7 +1224,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=default' \ @@ -1360,55 +1240,35 @@ load _helpers [ "${actual}" = "false" ] local actual=$(echo $object | - yq -r '.env[2].name | contains("CONSUL_ADDRESSES")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[2].value | contains("release-name-consul-server.default.svc")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[7].name | contains("CONSUL_PARTITION")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[7].value | contains("default")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[8].name | contains("CONSUL_LOGIN_PARTITION")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[8].value | contains("default")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("k8s-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] local actual=$(echo $object | - yq -r '.env[9].name | contains("CONSUL_LOGIN_AUTH_METHOD")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("component-name=client"))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.env[9].value | contains("release-name-consul-k8s-component-auth-method")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("init-type=\"client\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.env[10].name | contains("CONSUL_LOGIN_META")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("acl-auth-method=\"release-name-consul-k8s-component-auth-method\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.env[10].value | contains("component=client")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.command | any(contains("init-type=\"client\""))' | tee /dev/stderr) + yq -r '.command | any(contains("log-json=false"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.command | any(contains("log-level=info"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.command | any(contains("partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(echo $object | - yq -r '.command | any(contains("log-json=false"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1416,7 +1276,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=false' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[] | select(.name == "consul") | .env[] | .name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) @@ -1427,7 +1286,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[] | select(.name == "consul") | .env[] | .name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) @@ -1438,7 +1296,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul logout"))' | tee /dev/stderr) @@ -1449,7 +1306,6 @@ load _helpers cd `chart_dir` local volume=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | yq '.spec.template.spec.volumes[] | select(.name == "consul-data")' | tee /dev/stderr) @@ -1466,7 +1322,6 @@ load _helpers cd `chart_dir` local volume_mount=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | yq '.spec.template.spec.containers[] | select(.name == "consul") | .volumeMounts[] | select(.name == "consul-data")' | tee /dev/stderr) @@ -1483,7 +1338,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | yq '.spec.template.spec.initContainers[0].volumeMounts[1]' | tee /dev/stderr) @@ -1504,7 +1358,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ . | yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | .volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) @@ -1526,7 +1379,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=false' \ . | yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | .volumeMounts[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) @@ -1537,7 +1389,6 @@ load _helpers cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1547,32 +1398,29 @@ load _helpers [[ "$output" =~ "externalServers.hosts must be set if externalServers.enabled is true" ]] } -@test "client/DaemonSet: addresses env is set with hosts when externalServers.hosts are provided" { +@test "client/DaemonSet: server-address flag is set with hosts when externalServers.hosts are provided" { cd `chart_dir` - local object=$(helm template \ + local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ --set 'externalServers.hosts[0]=foo' \ + --set 'externalServers.hosts[1]=bar' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | .command' | tee /dev/stderr) - local actual=$(echo $object | - yq -r '.env[2].name | contains("CONSUL_ADDRESSES")' | tee /dev/stderr) + local actual=$(echo $command | jq -r ' . | any(contains("-server-address=\"foo\""))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo $object | - yq -r '.env[2].value | contains("foo")' | tee /dev/stderr) + local actual=$(echo $command | jq -r ' . | any(contains("-server-address=\"bar\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "client/DaemonSet: tls-server-name flag is set when externalServers.tlsServerName is provided" { cd `chart_dir` - local object=$(helm template \ + local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ --set 'externalServers.enabled=true' \ @@ -1580,22 +1428,9 @@ load _helpers --set 'externalServers.hosts[0]=computer' \ --set 'externalServers.tlsServerName=foo' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.env[2].name | contains("CONSUL_ADDRESSES")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[2].value | contains("computer")' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.env[9].name | contains("CONSUL_TLS_SERVER_NAME")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | .command' | tee /dev/stderr) - local actual=$(echo $object | - yq -r '.env[9].value | contains("foo")' | tee /dev/stderr) + local actual=$(echo $command | jq -r ' . | any(contains("-tls-server-name=foo"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1603,7 +1438,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1619,7 +1453,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ @@ -1631,25 +1464,19 @@ load _helpers [ "${actual}" = "false" ] } -@test "client/DaemonSet: use-tls env is set when global.tls.enabled is provided and externalServers.enabled is true" { +@test "client/DaemonSet: use-https flag is set when global.tls.enabled is provided and externalServers.enabled is true" { cd `chart_dir` - local object=$(helm template \ + local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=true' \ --set 'server.enabled=false' \ --set 'externalServers.hosts[0]=computer' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.env[7].name | contains("CONSUL_USE_TLS")' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -r '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | .command' | tee /dev/stderr) - local actual=$(echo $object | - yq -r '.env[7].value | contains("true")' | tee /dev/stderr) + local actual=$(echo $command | jq -r ' . | any(contains("-use-https"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1657,7 +1484,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=false' \ --set 'server.enabled=false' \ @@ -1674,7 +1500,6 @@ load _helpers cd `chart_dir` local command=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'externalServers.enabled=false' \ --set 'server.enabled=false' \ @@ -1686,6 +1511,21 @@ load _helpers [ "${actual}" = "false" ] } +@test "client/DaemonSet: server-port flag is set when externalServers.enabled is true" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'externalServers.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.hosts[0]=computer' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[] | select(.name == "client-acl-init") | .command' | tee /dev/stderr) + + local actual=$(echo $command | jq -r ' . | any(contains("-server-port"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # client.exposeGossipPorts @@ -1694,7 +1534,6 @@ load _helpers local actual=$(helm template \ -s templates/client-daemonset.yaml \ --set 'client.enabled=true' \ - --set 'client.enabled=true' \ --set 'client.exposeGossipPorts=false' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name=="ADVERTISE_IP") | .valueFrom.fieldRef.fieldPath' | @@ -1707,7 +1546,6 @@ load _helpers local actual=$(helm template \ -s templates/client-daemonset.yaml \ --set 'client.enabled=true' \ - --set 'client.enabled=true' \ --set 'client.exposeGossipPorts=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[] | select(.name=="consul") | .env[] | select(.name=="ADVERTISE_IP") | .valueFrom.fieldRef.fieldPath' | @@ -1719,7 +1557,6 @@ load _helpers cd `chart_dir` local has_exposed_host_ports=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'server.enabled=true' \ --set 'client.enabled=true' \ . | tee /dev/stderr | @@ -1733,7 +1570,6 @@ load _helpers local has_exposed_host_ports=$(helm template \ -s templates/client-daemonset.yaml \ --set 'client.enabled=true' \ - --set 'client.enabled=true' \ --set 'client.exposeGossipPorts=true' \ . | tee /dev/stderr | yq '[.spec.template.spec.containers[] | select(.name=="consul") | .ports[] | select(.containerPort==8301)] | all(has("hostPort"))' | @@ -1749,7 +1585,6 @@ load _helpers # Test that hostPath is set to null. local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "data") | .hostPath == null' | tee /dev/stderr ) [ "${actual}" = "true" ] @@ -1757,7 +1592,6 @@ load _helpers # Test that emptyDir is set instead. local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "data") | .emptyDir == {}' | tee /dev/stderr ) [ "${actual}" = "true" ] @@ -1767,7 +1601,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "data") | .hostPath.path == "/opt/consul"' | tee /dev/stderr) @@ -1781,7 +1614,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.dnsPolicy == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -1791,13 +1623,34 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.dnsPolicy=ClusterFirstWithHostNet' \ . | tee /dev/stderr | yq '.spec.template.spec.dnsPolicy == "ClusterFirstWithHostNet"' | tee /dev/stderr) [ "${actual}" = "true" ] } +#-------------------------------------------------------------------- +# DNS + +@test "client/DaemonSet: recursor flags is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + . | tee /dev/stderr | + yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("$recursor_flags")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/DaemonSet: add recursor flags if dns.enableRedirection is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-daemonset.yaml \ + --set 'dns.enableRedirection=true' \ + . | tee /dev/stderr | + yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("$recursor_flags")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # hostNetwork @@ -1805,7 +1658,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.hostNetwork == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -1815,7 +1667,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.hostNetwork=true' \ . | tee /dev/stderr | yq '.spec.template.spec.hostNetwork == true' | tee /dev/stderr) @@ -1828,7 +1679,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | \ yq '.spec.updateStrategy == null' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -1842,7 +1692,6 @@ rollingUpdate: " local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set "client.updateStrategy=${updateStrategy}" \ . | tee /dev/stderr | \ yq -c '.spec.updateStrategy == {"type":"RollingUpdate","rollingUpdate":{"maxUnavailable":5}}' | tee /dev/stderr) @@ -1856,7 +1705,6 @@ rollingUpdate: cd `chart_dir` local has_security_context=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec | has("securityContext")' | tee /dev/stderr) @@ -1870,7 +1718,6 @@ rollingUpdate: cd `chart_dir` local security_context=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.securityContext' | tee /dev/stderr) @@ -1891,7 +1738,6 @@ rollingUpdate: cd `chart_dir` local security_context=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.securityContext.runAsNonRoot=false' \ --set 'client.securityContext.privileged=true' \ . | tee /dev/stderr | @@ -1911,7 +1757,6 @@ rollingUpdate: cd `chart_dir` local manifest=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=false' \ --set 'global.acls.manageSystemACLs=true' \ @@ -1937,7 +1782,6 @@ rollingUpdate: cd `chart_dir` local manifest=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.enableAutoEncrypt=false' \ @@ -1964,7 +1808,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ . | tee /dev/stderr | @@ -1976,7 +1819,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ . | tee /dev/stderr | @@ -1988,7 +1830,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ . | tee /dev/stderr | @@ -2000,7 +1841,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ --set 'global.acls.manageSystemACLs=true' \ @@ -2013,7 +1853,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ --set 'global.acls.manageSystemACLs=true' \ @@ -2026,7 +1865,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=bar' \ --set 'global.acls.manageSystemACLs=true' \ @@ -2039,7 +1877,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=' \ --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ . @@ -2051,7 +1888,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.enterpriseLicense.secretName=foo' \ --set 'global.enterpriseLicense.secretKey=' \ . @@ -2065,7 +1901,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.recursors[0]=1.2.3.4' \ . | tee /dev/stderr | yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("-recursor=\"1.2.3.4\"")' | tee /dev/stderr) @@ -2078,8 +1913,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ . | tee /dev/stderr | yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("partition = \"default\"")' | tee /dev/stderr) @@ -2090,8 +1923,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=test' \ --set 'server.enabled=false' \ @@ -2106,11 +1937,10 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=test' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "global.adminPartitions.name has to be \"default\" in the server cluster" ]] } @@ -2119,37 +1949,14 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.federation.enabled=true' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "If global.federation.enabled is true, global.adminPartitions.enabled must be false because they are mutually exclusive" ]] } -@test "client/DaemonSet: consul login datacenter is set to primary when when federation enabled in non-primary datacenter" { - cd `chart_dir` - local object=$(helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.datacenter=dc1' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc2' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[11].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[11].value] | any(contains("dc2"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # extraContainers @@ -2159,7 +1966,6 @@ rollingUpdate: # Test that it defines the extra container local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraContainers[0].image=test-image' \ --set 'client.extraContainers[0].name=test-container' \ --set 'client.extraContainers[0].ports[0].name=test-port' \ @@ -2205,7 +2011,6 @@ rollingUpdate: local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'client.extraContainers[0].image=test-image' \ --set 'client.extraContainers[0].name=test-container' \ --set 'client.extraContainers[1].image=test-image' \ @@ -2222,7 +2027,6 @@ rollingUpdate: local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers | length' | tee /dev/stderr) @@ -2236,7 +2040,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ . @@ -2248,7 +2051,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -2262,7 +2064,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2279,7 +2080,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2295,7 +2095,6 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2311,7 +2110,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata' | tee /dev/stderr) @@ -2327,7 +2125,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2347,7 +2144,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2370,7 +2166,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2395,7 +2190,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2412,7 +2206,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2430,7 +2223,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2448,7 +2240,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2467,7 +2258,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2493,7 +2283,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2531,7 +2320,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2554,7 +2342,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2580,7 +2367,6 @@ rollingUpdate: cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2599,7 +2385,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2614,7 +2399,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=test' \ @@ -2629,7 +2413,6 @@ rollingUpdate: cd `chart_dir` local env=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.secretsBackend.vault.manageSystemACLsRole=true' \ --set 'global.acls.replicationToken.secretName=replication' \ @@ -2645,7 +2428,7 @@ rollingUpdate: . | tee /dev/stderr | yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT_FILE") | .value' | tee /dev/stderr) + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) [ "${actual}" = "/vault/secrets/serverca.crt" ] } @@ -2653,7 +2436,6 @@ rollingUpdate: cd `chart_dir` local object=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ @@ -2677,7 +2459,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2690,7 +2471,6 @@ rollingUpdate: cd `chart_dir` local actual=$(helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2707,256 +2487,9 @@ rollingUpdate: cd `chart_dir` run helm template \ -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ --set 'global.imageK8s=something' \ . - [ "$status" -eq 1 ] - [[ "$output" =~ "global.imageK8s is not a valid key, use global.imageK8S (note the capital 'S')" ]] -} - -#-------------------------------------------------------------------- -# global.cloud -@test "client/DaemonSet: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'apiGateway.enabled=true' \ - --set 'apiGateway.image=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/api-gateway-controller-deployment.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} -@test "client/DaemonSet: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "client/DaemonSet: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[] | select(.name == "client-acl-init")' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_TLS_SERVER_NAME"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[9].value] | any(contains("server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [[ "$output" =~ "global.imageK8s is not a valid key, use global.imageK8S (note the capital 'S')" ]] } diff --git a/charts/consul/test/unit/client-podsecuritypolicy.bats b/charts/consul/test/unit/client-podsecuritypolicy.bats index 3d7b628389..a37d4ec147 100644 --- a/charts/consul/test/unit/client-podsecuritypolicy.bats +++ b/charts/consul/test/unit/client-podsecuritypolicy.bats @@ -22,7 +22,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq -s 'length > 0' | tee /dev/stderr) @@ -33,7 +32,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq -c '.spec.hostPorts' | tee /dev/stderr) @@ -47,7 +45,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.grpc=false' \ . | tee /dev/stderr | @@ -62,7 +59,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.exposeGossipPorts=true' \ . | tee /dev/stderr | @@ -77,7 +73,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq '.spec.volumes | any(contains("hostPath"))' | tee /dev/stderr) @@ -89,7 +84,6 @@ load _helpers # Test that hostPath is an allowed volume type. local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | @@ -99,7 +93,6 @@ load _helpers # Test that the path we're allowed to write to is the right one. local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | @@ -114,7 +107,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | @@ -126,7 +118,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=true' \ @@ -142,7 +133,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.hostNetwork=true' \ . | tee /dev/stderr | @@ -155,7 +145,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ --set 'client.hostNetwork=true' \ . | tee /dev/stderr | @@ -170,7 +159,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-podsecuritypolicy.yaml \ - --set 'client.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq '.spec.hostNetwork == false' | tee /dev/stderr) diff --git a/charts/consul/test/unit/client-role.bats b/charts/consul/test/unit/client-role.bats index ad9bf86702..066e4ad98d 100644 --- a/charts/consul/test/unit/client-role.bats +++ b/charts/consul/test/unit/client-role.bats @@ -2,11 +2,13 @@ load _helpers -@test "client/Role: disabled by default" { +@test "client/Role: enabled by default" { cd `chart_dir` - assert_empty helm template \ + local actual=$(helm template \ -s templates/client-role.yaml \ - . + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } @test "client/Role: disabled with global.enabled=false" { diff --git a/charts/consul/test/unit/client-rolebinding.bats b/charts/consul/test/unit/client-rolebinding.bats index d2dd375f19..2c5912eda8 100644 --- a/charts/consul/test/unit/client-rolebinding.bats +++ b/charts/consul/test/unit/client-rolebinding.bats @@ -2,6 +2,15 @@ load _helpers +@test "client/RoleBinding: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-rolebinding.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "client/RoleBinding: disabled with global.enabled=false" { cd `chart_dir` assert_empty helm template \ diff --git a/charts/consul/test/unit/client-securitycontextconstraints.bats b/charts/consul/test/unit/client-securitycontextconstraints.bats index 4efbda998b..c8901f7e43 100644 --- a/charts/consul/test/unit/client-securitycontextconstraints.bats +++ b/charts/consul/test/unit/client-securitycontextconstraints.bats @@ -22,7 +22,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq -s 'length > 0' | tee /dev/stderr) @@ -33,7 +32,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq -c '.allowHostPorts' | tee /dev/stderr) @@ -48,7 +46,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq '.volumes | any(contains("hostPath"))' | tee /dev/stderr) @@ -60,7 +57,6 @@ load _helpers # Test that hostPath is an allowed volume type. local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | @@ -70,7 +66,6 @@ load _helpers # Test that the path we're allowed to write to host path. local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ --set 'client.dataDirectoryHostPath=/opt/consul' \ . | tee /dev/stderr | @@ -85,7 +80,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ --set 'client.hostNetwork=true' \ . | tee /dev/stderr | @@ -97,7 +91,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-securitycontextconstraints.yaml \ - --set 'client.enabled=true' \ --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | yq '.allowHostNetwork == false' | tee /dev/stderr) diff --git a/charts/consul/test/unit/client-serviceaccount.bats b/charts/consul/test/unit/client-serviceaccount.bats index d8a717a95d..429470207a 100644 --- a/charts/consul/test/unit/client-serviceaccount.bats +++ b/charts/consul/test/unit/client-serviceaccount.bats @@ -2,6 +2,15 @@ load _helpers +@test "client/ServiceAccount: enabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-serviceaccount.yaml \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "client/ServiceAccount: disabled with global.enabled=false" { cd `chart_dir` assert_empty helm template \ @@ -46,7 +55,6 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/client-serviceaccount.yaml \ - --set 'client.enabled=true' \ --set 'global.imagePullSecrets[0].name=my-secret' \ --set 'global.imagePullSecrets[1].name=my-secret2' \ . | tee /dev/stderr) @@ -67,7 +75,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-serviceaccount.yaml \ - --set 'client.enabled=true' \ . | tee /dev/stderr | yq '.metadata.annotations | length > 0' | tee /dev/stderr) [ "${actual}" = "false" ] @@ -77,7 +84,6 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/client-serviceaccount.yaml \ - --set 'client.enabled=true' \ --set "client.serviceAccount.annotations=foo: bar" \ . | tee /dev/stderr | yq -r '.metadata.annotations.foo' | tee /dev/stderr) diff --git a/charts/consul/test/unit/client-snapshot-agent-deployment.bats b/charts/consul/test/unit/client-snapshot-agent-deployment.bats new file mode 100644 index 0000000000..166b1d45e7 --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-deployment.bats @@ -0,0 +1,1094 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentDeployment: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + . +} + +@test "client/SnapshotAgentDeployment: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . +} + +@test "client/SnapshotAgentDeployment: when client.snapshotAgent.configSecret.secretKey!=null and client.snapshotAgent.configSecret.secretName=null, fail" { + cd `chart_dir` + run helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=' \ + --set 'client.snapshotAgent.configSecret.secretKey=bar' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." ]] +} + +@test "client/SnapshotAgentDeployment: when client.snapshotAgent.configSecret.secretName!=null and client.snapshotAgent.configSecret.secretKey=null, fail" { + cd `chart_dir` + run helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=foo' \ + --set 'client.snapshotAgent.configSecret.secretKey=' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.snapshotAgent.configSecret.secretKey and client.snapshotAgent.configSecret.secretName must both be specified." ]] +} + +@test "client/SnapshotAgentDeployment: adds volume for snapshot agent config secret when secret is configured" { + cd `chart_dir` + local vol=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-config")' | tee /dev/stderr) + local actual + actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config' ] + + actual=$(echo $vol | jq -r '. .secret.secretName' | tee /dev/stderr) + [ "${actual}" = 'a/b/c/d' ] + + actual=$(echo $vol | jq -r '. .secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = 'snapshot-agent-config' ] + + actual=$(echo $vol | jq -r '. .secret.items[0].path' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config.json' ] +} + +@test "client/SnapshotAgentDeployment: adds volume mount to snapshot container for snapshot agent config secret when secret is configured" { + cd `chart_dir` + local vol=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "snapshot-config")' | tee /dev/stderr) + local actual + actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) + [ "${actual}" = 'snapshot-config' ] + + actual=$(echo $vol | jq -r '. .readOnly' | tee /dev/stderr) + [ "${actual}" = 'true' ] + + actual=$(echo $vol | jq -r '. .mountPath' | tee /dev/stderr) + [ "${actual}" = '/consul/config' ] +} + +@test "client/SnapshotAgentDeployment: set config-dir argument on snapshot agent command to volume mount" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-config-dir=/consul/config")' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "client/SnapshotAgentDeployment: no tolerations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates tolerations when client.tolerations is populated" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.tolerations=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "client/SnapshotAgentDeployment: no priorityClassName by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates priorityClassName when client.priorityClassName is populated" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.priorityClassName=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.acls.manageSystemACLs + +@test "clientSnapshotAgent/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("/bin/consul logout"))' | tee /dev/stderr) + [ "${object}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "clientSnapshotAgent/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[2].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "snapshot-agent-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "snapshot-agent-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "clientSnapshotAgent/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "client/SnapshotAgentDeployment: no nodeSelector by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: populates nodeSelector when client.nodeSelector is populated" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.nodeSelector=allow' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector | contains("allow")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "client/SnapshotAgentDeployment: sets TLS env vars when global.tls.enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "client/SnapshotAgentDeployment: populates volumes when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: populates container volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: can overwrite CA with the provided secret" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that it uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "client/SnapshotAgentDeployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: adds both init containers when TLS with auto-encrypt and ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "client/SnapshotAgentDeployment: default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"50m","memory":"50Mi"},"requests":{"cpu":"50m","memory":"50Mi"}}' ] +} + +@test "client/SnapshotAgentDeployment: can set resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.resources.requests.memory=100Mi' \ + --set 'client.snapshotAgent.resources.requests.cpu=100m' \ + --set 'client.snapshotAgent.resources.limits.memory=200Mi' \ + --set 'client.snapshotAgent.resources.limits.cpu=200m' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] +} + +#-------------------------------------------------------------------- +# client.snapshotAgent.caCert + +@test "client/SnapshotAgentDeployment: if caCert is set command is modified correctly" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("cat < /extra-ssl-certs/custom-ca.pem")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: if caCert is set extra-ssl-certs volumeMount is added" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | yq -r '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo $object | jq -r '.volumes[0].name' | tee /dev/stderr) + [ "${actual}" = "extra-ssl-certs" ] + + local actual=$(echo $object | jq -r '.containers[0].volumeMounts[0].name' | tee /dev/stderr) + [ "${actual}" = "extra-ssl-certs" ] +} + +@test "client/SnapshotAgentDeployment: if caCert is set SSL_CERT_DIR env var is set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- +MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ + . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].env[0]' | tee /dev/stderr) + + local actual=$(echo $object | jq -r '.name' | tee /dev/stderr) + [ "${actual}" = "SSL_CERT_DIR" ] + local actual=$(echo $object | jq -r '.value' | tee /dev/stderr) + [ "${actual}" = "/etc/ssl/certs:/extra-ssl-certs" ] +} + +#-------------------------------------------------------------------- +# license-autoload + +@test "client/SnapshotAgentDeployment: adds volume for license secret when enterprise license secret name and key are provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = '{"name":"consul-license","secret":{"secretName":"foo"}}' ] +} + +@test "client/SnapshotAgentDeployment: adds volume mount for license secret when enterprise license secret name and key are provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = '{"name":"consul-license","mountPath":"/consul/license","readOnly":true}' ] +} + +@test "client/SnapshotAgentDeployment: adds env var for license path when enterprise license secret name and key are provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_LICENSE_PATH")' | tee /dev/stderr) + [ "${actual}" = '{"name":"CONSUL_LICENSE_PATH","value":"/consul/license/bar"}' ] +} + +@test "client/SnapshotAgentDeployment: does not add license secret volume if manageSystemACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: does not add license secret volume mount if manageSystemACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: does not add license env if manageSystemACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enterpriseLicense.secretName=foo' \ + --set 'global.enterpriseLicense.secretKey=bar' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_LICENSE_PATH")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "client/SnapshotAgentDeployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# Vault + +@test "client/SnapshotAgentDeployment: configures server CA to come from vault when vault is enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + # Check annotations + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "carole" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = "foo" ] + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) + [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] +} + +@test "client/SnapshotAgentDeployment: vault CA is not configured by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: vault CA is not configured when secretName is set but secretKey is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: vault CA is not configured when secretKey is set but secretName is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentDeployment: vault CA is configured when both secretName and secretKey are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +@test "client/SnapshotAgentDeployment: vault enterprise license annotations are correct when enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.enterpriseLicense.secretName=path/to/secret' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-enterpriselicense.txt"]' | tee /dev/stderr) + [ "${actual}" = "path/to/secret" ] + local actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-enterpriselicense.txt"]' | tee /dev/stderr) + local actual="$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-enterpriselicense.txt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"path/to/secret\" -}}\n{{- .Data.data.enterpriselicense -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] +} + +@test "client/SnapshotAgentDeployment: vault CONSUL_LICENSE_PATH is set to /vault/secrets/enterpriselicense.txt" { + cd `chart_dir` + local env=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.enterpriseLicense.secretName=a/b/c/d' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LICENSE_PATH") | .value' | tee /dev/stderr) + [ "${actual}" = "/vault/secrets/enterpriselicense.txt" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume for license secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.enterpriseLicense.secretName=a/b/c/d' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume mount for license secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.enterpriseLicense.secretName=a/b/c/d' \ + --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault snapshot agent config annotations are correct when enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=bar' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=path/to/secret' \ + --set 'client.snapshotAgent.configSecret.secretKey=config' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json"]' | tee /dev/stderr) + [ "${actual}" = "path/to/secret" ] + + actual=$(echo $object | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json"]' | tee /dev/stderr) + local expected=$'{{- with secret \"path/to/secret\" -}}\n{{- .Data.data.config -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + actual=$(echo $object | jq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume for snapshot agent config secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-config")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault does not add volume mount for snapshot agent config secret" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "snapshot-config")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "client/SnapshotAgentDeployment: vault sets config-file argument on snapshot agent command to config downloaded by vault agent injector" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-config-file=/vault/secrets/snapshot-agent-config.json")' | tee /dev/stderr) + [ "${actual}" = 'true' ] +} + +#-------------------------------------------------------------------- +# Vault agent annotations + +@test "client/SnapshotAgentDeployment: no vault agent annotations defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "client/SnapshotAgentDeployment: vault agent annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + + +@test "client/SnapshotAgentDeployment: vault properly sets vault role when global.secretsBackend.vault.consulCARole is set but global.secretsBackend.vault.consulSnapshotAgentRole is not set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=ca-role' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "ca-role" ] +} + +@test "client/SnapshotAgentDeployment: vault properly sets vault role when global.secretsBackend.vault.consulSnapshotAgentRole is set but global.secretsBackend.vault.consulCARole is not set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=sa-role' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "sa-role" ] +} + +@test "client/SnapshotAgentDeployment: vault properly sets vault role to global.secretsBackend.vault.consulSnapshotAgentRole value when both global.secretsBackend.vault.consulSnapshotAgentRole and global.secretsBackend.vault.consulCARole are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulSnapshotAgentRole=sa-role' \ + --set 'client.snapshotAgent.configSecret.secretName=a/b/c/d' \ + --set 'client.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ + --set 'global.secretsBackend.vault.consulCARole=ca-role' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + [ "${actual}" = "sa-role" ] +} + +@test "client/SnapshotAgentDeployment: interval defaults to 1h" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-interval=1h")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentDeployment: interval can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-deployment.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.interval=10h34m5s' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].command[2] | contains("-interval=10h34m5s")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/client-snapshot-agent-podsecuritypolicy.bats b/charts/consul/test/unit/client-snapshot-agent-podsecuritypolicy.bats new file mode 100644 index 0000000000..21c55af314 --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-podsecuritypolicy.bats @@ -0,0 +1,30 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentPodSecurityPolicy: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-podsecuritypolicy.yaml \ + . +} + +@test "client/SnapshotAgentPodSecurityPolicy: disabled with snapshot agent disabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-podsecuritypolicy.yaml \ + --set 'client.snapshotAgent.enabled=false' \ + --set 'global.enablePodSecurityPolicies=true' \ + . +} + +@test "client/SnapshotAgentPodSecurityPolicy: enabled with snapshot agent enabled global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-podsecuritypolicy.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/client-snapshot-agent-role.bats b/charts/consul/test/unit/client-snapshot-agent-role.bats new file mode 100644 index 0000000000..86aaaf3880 --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-role.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentRole: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-role.yaml \ + . +} + +@test "client/SnapshotAgentRole: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-role.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentRole: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-role.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentRole: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-role.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "client/SnapshotAgentRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-role.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -r '.rules[0].resources[0]' | tee /dev/stderr) + [ "${actual}" = "podsecuritypolicies" ] +} diff --git a/charts/consul/test/unit/client-snapshot-agent-rolebinding.bats b/charts/consul/test/unit/client-snapshot-agent-rolebinding.bats new file mode 100644 index 0000000000..f061610955 --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-rolebinding.bats @@ -0,0 +1,40 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentRoleBinding: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-rolebinding.yaml \ + . +} + +@test "client/SnapshotAgentRoleBinding: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-rolebinding.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentRoleBinding: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-rolebinding.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentRoleBinding: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-rolebinding.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . +} diff --git a/charts/consul/test/unit/client-snapshot-agent-serviceaccount.bats b/charts/consul/test/unit/client-snapshot-agent-serviceaccount.bats new file mode 100644 index 0000000000..30d7ada58b --- /dev/null +++ b/charts/consul/test/unit/client-snapshot-agent-serviceaccount.bats @@ -0,0 +1,83 @@ +#!/usr/bin/env bats + +load _helpers + +@test "client/SnapshotAgentServiceAccount: disabled by default" { + cd `chart_dir` + assert_empty helm template -s templates/client-snapshot-agent-serviceaccount.yaml . +} + +@test "client/SnapshotAgentServiceAccount: enabled with client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentServiceAccount: enabled with client.enabled=true and client.snapshotAgent.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "client/SnapshotAgentServiceAccount: disabled with client=false and client.snapshotAgent.enabled=true" { + cd `chart_dir` + assert_empty helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'client.enabled=false' \ + . +} + +#-------------------------------------------------------------------- +# global.imagePullSecrets + +@test "client/SnapshotAgentServiceAccount: can set image pull secrets" { + cd `chart_dir` + local object=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set 'global.imagePullSecrets[0].name=my-secret' \ + --set 'global.imagePullSecrets[1].name=my-secret2' \ + . | tee /dev/stderr) + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[0].name' | tee /dev/stderr) + [ "${actual}" = "my-secret" ] + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[1].name' | tee /dev/stderr) + [ "${actual}" = "my-secret2" ] +} + +#-------------------------------------------------------------------- +# client.snapshotAgent.serviceAccount.annotations + +@test "client/SnapshotAgentServiceAccount: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + . | tee /dev/stderr | + yq '.metadata.annotations | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "client/SnapshotAgentServiceAccount: annotations when enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/client-snapshot-agent-serviceaccount.yaml \ + --set 'client.snapshotAgent.enabled=true' \ + --set "client.snapshotAgent.serviceAccount.annotations=foo: bar" \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} diff --git a/charts/consul/test/unit/cni-clusterrole.bats b/charts/consul/test/unit/cni-clusterrole.bats index 4556d48f0d..02675ed882 100644 --- a/charts/consul/test/unit/cni-clusterrole.bats +++ b/charts/consul/test/unit/cni-clusterrole.bats @@ -20,29 +20,6 @@ load _helpers [[ "${actual}" == "true" ]] } -@test "cni/ClusterRole: cni namespace has a default when not set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-clusterrole.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "default" ]] -} - -@test "cni/ClusterRole: able to set cni namespace" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-clusterrole.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} - @test "cni/ClusterRole: disabled with connectInject.cni.enabled=false and connectInject.enabled=true" { cd `chart_dir` assert_empty helm template \ diff --git a/charts/consul/test/unit/cni-clusterrolebinding.bats b/charts/consul/test/unit/cni-clusterrolebinding.bats index 98cdb283c4..ba217e7706 100644 --- a/charts/consul/test/unit/cni-clusterrolebinding.bats +++ b/charts/consul/test/unit/cni-clusterrolebinding.bats @@ -55,25 +55,3 @@ load _helpers [ "${actual}" = "foo" ] } -@test "cni/ClusterRoleBinding: subject namespace is correct when not set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-clusterrolebinding.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r '.subjects[0].namespace' | tee /dev/stderr) - [[ "${actual}" == "default" ]] -} - -@test "cni/ClusterRoleBinding: subject namespace can be set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-clusterrolebinding.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r '.subjects[0].namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} diff --git a/charts/consul/test/unit/cni-daemonset.bats b/charts/consul/test/unit/cni-daemonset.bats index 675d6b877f..ccfce1b4b5 100644 --- a/charts/consul/test/unit/cni-daemonset.bats +++ b/charts/consul/test/unit/cni-daemonset.bats @@ -37,6 +37,7 @@ load _helpers --set 'connectInject.enabled=false' \ -s templates/cni-daemonset.yaml \ . + [ "$status" -eq 1 ] [[ "$output" =~ "connectInject.enabled must be true if connectInject.cni.enabled is true" ]] } @@ -295,51 +296,6 @@ rollingUpdate: [ "${actual}" = '{"mountPath":"bar","name":"cni-net-dir"}' ] } -@test "cni/DaemonSet: cni namespace has a default when not set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-daemonset.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "default" ]] -} - -@test "cni/DaemonSet: able to set cni namespace" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-daemonset.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} - -@test "cni/DaemonSet: still uses cni.namespace when helm -n is used" { - cd `chart_dir` - local actual=$(helm template -n foo \ - -s templates/cni-daemonset.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} - -@test "cni/DaemonSet: default namespace can be overridden by helm -n" { - cd `chart_dir` - local actual=$(helm template -n foo \ - -s templates/cni-daemonset.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "foo" ]] -} #-------------------------------------------------------------------- # extraLabels diff --git a/charts/consul/test/unit/cni-networkattachmentdefinition.bats b/charts/consul/test/unit/cni-networkattachmentdefinition.bats index 65730079bb..a7f0d1da03 100644 --- a/charts/consul/test/unit/cni-networkattachmentdefinition.bats +++ b/charts/consul/test/unit/cni-networkattachmentdefinition.bats @@ -59,27 +59,3 @@ load _helpers } -@test "cni/NetworkAttachmentDefinition: cni namespace has a default when not set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-networkattachmentdefinition.yaml \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.cni.multus=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "default" ]] -} - -@test "cni/NetworkAttachmentDefinition: able to set cni namespace" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-networkattachmentdefinition.yaml \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.cni.multus=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} diff --git a/charts/consul/test/unit/cni-podsecuritypolicy.bats b/charts/consul/test/unit/cni-podsecuritypolicy.bats index 21af659cde..37df761995 100644 --- a/charts/consul/test/unit/cni-podsecuritypolicy.bats +++ b/charts/consul/test/unit/cni-podsecuritypolicy.bats @@ -30,27 +30,3 @@ load _helpers [[ "${actual}" == "true" ]] } -@test "cni/PodSecurityPolicy: cni namespace has a default when not set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-podsecuritypolicy.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.enablePodSecurityPolicies=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "default" ]] -} - -@test "cni/PodSecurityPolicy: able to set cni namespace" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-podsecuritypolicy.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.enablePodSecurityPolicies=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} diff --git a/charts/consul/test/unit/cni-resourcequota.bats b/charts/consul/test/unit/cni-resourcequota.bats index f7495d3565..36c7a26b30 100644 --- a/charts/consul/test/unit/cni-resourcequota.bats +++ b/charts/consul/test/unit/cni-resourcequota.bats @@ -29,29 +29,6 @@ load _helpers . } -@test "cni/ResourceQuota: cni namespace has a default when not set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-resourcequota.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "default" ]] -} - -@test "cni/ResourceQuota: able to set cni namespace" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-resourcequota.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} - #-------------------------------------------------------------------- # pods diff --git a/charts/consul/test/unit/cni-securitycontextcontstraints.bats b/charts/consul/test/unit/cni-securitycontextcontstraints.bats index 933282f0dc..759979aee2 100644 --- a/charts/consul/test/unit/cni-securitycontextcontstraints.bats +++ b/charts/consul/test/unit/cni-securitycontextcontstraints.bats @@ -31,27 +31,3 @@ load _helpers [ "${actual}" = "true" ] } -@test "cni/SecurityContextConstraints: cni namespace has a default when not set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-securitycontextconstraints.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.openshift.enabled=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "default" ]] -} - -@test "cni/SecurityContextConstraints: able to set cni namespace" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-securitycontextconstraints.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.openshift.enabled=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} diff --git a/charts/consul/test/unit/cni-serviceaccount.bats b/charts/consul/test/unit/cni-serviceaccount.bats index 73146bd0d9..4f2071f823 100644 --- a/charts/consul/test/unit/cni-serviceaccount.bats +++ b/charts/consul/test/unit/cni-serviceaccount.bats @@ -29,29 +29,6 @@ load _helpers . } -@test "cni/ServiceAccount: cni namespace has a default when not set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-serviceaccount.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "default" ]] -} - -@test "cni/ServiceAccount: able to set cni namespace" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/cni-serviceaccount.yaml \ - --set 'connectInject.cni.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.cni.namespace=kube-system' \ - . | tee /dev/stderr | - yq -r -c '.metadata.namespace' | tee /dev/stderr) - [[ "${actual}" == "kube-system" ]] -} - #-------------------------------------------------------------------- # global.imagePullSecrets diff --git a/charts/consul/test/unit/connect-inject-clusterrole.bats b/charts/consul/test/unit/connect-inject-clusterrole.bats index 4acdf211d2..d9dd5933ca 100644 --- a/charts/consul/test/unit/connect-inject-clusterrole.bats +++ b/charts/consul/test/unit/connect-inject-clusterrole.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/ClusterRole: enabled by default" { +@test "connectInject/ClusterRole: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-clusterrole.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/ClusterRole: enabled with global.enabled false" { @@ -42,7 +40,7 @@ load _helpers --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -r '.rules[2]' | tee /dev/stderr) + yq -r '.rules[0]' | tee /dev/stderr) local actual=$(echo $object | yq -r '.resources[| index("endpoints")' | tee /dev/stderr) [ "${actual}" != null ] @@ -77,7 +75,7 @@ load _helpers --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -r '.rules[3]' | tee /dev/stderr) + yq -r '.rules[1]' | tee /dev/stderr) local actual=$(echo $object | yq -r '.resources[| index("pods")' | tee /dev/stderr) [ "${actual}" != null ] @@ -106,7 +104,7 @@ load _helpers --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -r '.rules[4]' | tee /dev/stderr) + yq -r '.rules[2]' | tee /dev/stderr) local actual=$(echo $object | yq -r '.resources[| index("leases")' | tee /dev/stderr) [ "${actual}" != null ] @@ -136,7 +134,7 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -r '.rules[2]' | tee /dev/stderr) + yq -r '.rules[0]' | tee /dev/stderr) local actual=$(echo $object | yq -r '.resources[| index("serviceaccounts")' | tee /dev/stderr) [ "${actual}" != null ] @@ -193,11 +191,14 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ . | tee /dev/stderr | - yq -r '.rules[5]' | tee /dev/stderr) + yq -r '.rules[3]' | tee /dev/stderr) local actual=$(echo $object | yq -r '.resources[0]' | tee /dev/stderr) [ "${actual}" = "mutatingwebhookconfigurations" ] diff --git a/charts/consul/test/unit/connect-inject-clusterrolebinding.bats b/charts/consul/test/unit/connect-inject-clusterrolebinding.bats index 28921d31f2..ccf30083f9 100644 --- a/charts/consul/test/unit/connect-inject-clusterrolebinding.bats +++ b/charts/consul/test/unit/connect-inject-clusterrolebinding.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/ClusterRoleBinding: enabled by default" { +@test "connectInject/ClusterRoleBinding: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-clusterrolebinding.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/ClusterRoleBinding: enabled with global.enabled false" { @@ -29,4 +27,4 @@ load _helpers -s templates/connect-inject-clusterrolebinding.yaml \ --set 'connectInject.enabled=false' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/connect-inject-deployment.bats b/charts/consul/test/unit/connect-inject-deployment.bats index 9da24a7568..b84efc0808 100755 --- a/charts/consul/test/unit/connect-inject-deployment.bats +++ b/charts/consul/test/unit/connect-inject-deployment.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/Deployment: enabled by default" { +@test "connectInject/Deployment: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-deployment.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/Deployment: enable with global.enabled false, client.enabled true" { @@ -35,38 +33,139 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=-' \ --set 'global.enabled=false' \ . } -@test "connectInject/Deployment: consul env defaults" { +@test "connectInject/Deployment: fails if global.enabled=false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if global.enabled=true and client.enabled=false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if global.enabled=false and client.enabled=false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'client.enabled=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled for connect injection" ]] +} + +@test "connectInject/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true for connect injection" ]] +} + +@test "connectInject/Deployment: command defaults" { cd `chart_dir` - local env=$(helm template \ + local cmd=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("consul-k8s-control-plane inject-connect"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# connectInject.centralConfig [DEPRECATED] + +@test "connectInject/Deployment: fails if connectInject.centralConfig.enabled is set to false" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.centralConfig.enabled cannot be set to false; to disable, set enable_central_service_config to false in server.extraConfig and client.extraConfig" ]] +} + +@test "connectInject/Deployment: fails if connectInject.centralConfig.defaultProtocol is set" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.defaultProtocol=http' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.centralConfig.defaultProtocol is no longer supported; instead you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" ]] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-server.default.svc" ] +@test "connectInject/Deployment: fails if connectInject.centralConfig.proxyDefaults is used" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.proxyDefaults="{\"key\":\"value\"}"' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.centralConfig.proxyDefaults is no longer supported; instead you must migrate to CRDs (see www.consul.io/docs/k8s/crds/upgrade-to-crds)" ]] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8502" ] +@test "connectInject/Deployment: does not fail if connectInject.centralConfig.enabled is set to true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.centralConfig.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8500" ] +@test "connectInject/Deployment: does not fail if connectInject.centralConfig.proxyDefaults is set to {}" { + cd `chart_dir` - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + # We have to actually create a values file for this test because the + # --set and --set-string flags were passing {} as a YAML object rather + # than a string. + # Previously this was the default in the values.yaml so this test is testing + # that if someone had copied this into their values.yaml then nothing would + # break. We no longer use this value, but that's okay because the default + # empty object had no effect. + temp_file=$(mktemp) + cat < "$temp_file" +connectInject: + enabled: true + centralConfig: + proxyDefaults: | + {} +EOF - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) - [ "${actual}" = "5s" ] + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + -f "$temp_file" \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] + rm -f "$temp_file" } #-------------------------------------------------------------------- @@ -212,7 +311,7 @@ load _helpers } #-------------------------------------------------------------------- -# consul and consul-dataplane images +# consul and envoy images @test "connectInject/Deployment: container image is global default" { cd `chart_dir` @@ -260,17 +359,28 @@ load _helpers [ "${actual}" = "true" ] } -@test "connectInject/Deployment: consul-dataplane-image can be set via global" { +@test "connectInject/Deployment: envoy-image can be set via global" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.imageConsulDataplane=foo' \ + --set 'global.imageEnvoy=foo' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-consul-dataplane-image=\"foo\""))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-envoy-image=\"foo\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } +@test "connectInject/Deployment: setting connectInject.imageEnvoy fails" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.imageEnvoy=new/image' . + [ "$status" -eq 1 ] + [[ "$output" =~ "connectInject.imageEnvoy must be specified in global" ]] +} + + #-------------------------------------------------------------------- # extra envoy args @@ -428,14 +538,14 @@ load _helpers #-------------------------------------------------------------------- # DNS -@test "connectInject/Deployment: -enable-consul-dns is set by default due to inheriting from connectInject.transparentProxy.defaultEnabled" { +@test "connectInject/Deployment: -enable-consul-dns unset by default" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("-enable-consul-dns=true")' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] } @test "connectInject/Deployment: -enable-consul-dns is true if dns.enabled=true and dns.enableRedirection=true" { @@ -444,42 +554,11 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'dns.enableRedirection=true' \ - --set 'dns.enabled=true' \ . | tee /dev/stderr | yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("-enable-consul-dns=true")' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "connectInject/Deployment: -enable-consul-dns is not set when connectInject.transparentProxy.defaultEnabled is false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.transparentProxy.defaultEnabled=false' \ - . | tee /dev/stderr | - yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("-enable-consul-dns=true")' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "connectInject/Deployment: -enable-consul-dns is not set if dns.enabled is false or ens.enableRedirection is false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'dns.enabled=false' \ - . | tee /dev/stderr | - yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("-enable-consul-dns=true")' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'dns.enableRedirection=false' \ - . | tee /dev/stderr | - yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("-enable-consul-dns=true")' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - @test "connectInject/Deployment: -resource-prefix always set" { cd `chart_dir` local actual=$(helm template \ @@ -493,7 +572,7 @@ load _helpers #-------------------------------------------------------------------- # global.tls.enabled -@test "connectInject/Deployment: Adds consul-ca-cert volume when global.tls.enabled is true" { +@test "connectInject/Deployment: Adds tls-ca-cert volume when global.tls.enabled is true" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ @@ -504,7 +583,7 @@ load _helpers [ "${actual}" != "" ] } -@test "connectInject/Deployment: Adds consul-ca-cert volumeMount when global.tls.enabled is true" { +@test "connectInject/Deployment: Adds tls-ca-cert volumeMounts when global.tls.enabled is true" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ @@ -538,26 +617,83 @@ load _helpers [ "${actual}" = "key" ] } -@test "connectInject/Deployment: consul env vars when global.tls.enabled is true" { +@test "connectInject/Deployment: Adds -tls-cert-dir=/etc/connect-injector/certs to command" { cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-tls-cert-dir=/etc/connect-injector/certs"))' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "connectInject/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] +@test "connectInject/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) +@test "connectInject/Deployment: adds both init containers when TLS with auto-encrypt and ACLs + namespaces are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] +@test "connectInject/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] } #-------------------------------------------------------------------- @@ -639,37 +775,6 @@ load _helpers [ "${actual}" = "true" ] } -@test "connectInject/Deployment: consul env var default set with .global.adminPartitions.enabled=true" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "default" ] -} - -@test "connectInject/Deployment: consul env var set with .global.adminPartitions.enabled=true" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=foo' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] -} - @test "connectInject/Deployment: fails if namespaces are disabled and .global.adminPartitions.enabled=true" { cd `chart_dir` run helm template \ @@ -684,6 +789,17 @@ load _helpers #-------------------------------------------------------------------- # namespaces +@test "connectInject/Deployment: fails if namespaces are disabled and mirroringK8S is true" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.enableConsulNamespaces=false' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.enableConsulNamespaces must be true if mirroringK8S=true" ]] +} + @test "connectInject/Deployment: namespace options disabled by default" { cd `chart_dir` local object=$(helm template \ @@ -728,20 +844,20 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "connectInject/Deployment: mirroring options omitted with .connectInject.consulNamespaces.mirroringK8S=false" { +@test "connectInject/Deployment: mirroring options set with .connectInject.consulNamespaces.mirroringK8S=true" { cd `chart_dir` local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ - --set 'connectInject.consulNamespaces.mirroringK8S=false' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) @@ -755,7 +871,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-k8s-namespace-mirroring=true"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + [ "${actual}" = "true" ] local actual=$(echo $object | yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -811,7 +927,7 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'connectInject.aclInjectToken.secretName=foo' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -826,115 +942,293 @@ load _helpers yq '[.spec.template.spec.containers[0].env[].name]' | tee /dev/stderr) local actual=$(echo $object | - yq 'any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + yq 'any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq 'map(select(test("CONSUL_ACL_TOKEN"))) | length' | tee /dev/stderr) + yq 'map(select(test("CONSUL_HTTP_TOKEN"))) | length' | tee /dev/stderr) [ "${actual}" = "1" ] } #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "connectInject/Deployment: ACL auth method env vars are set when acls are enabled" { +@test "connectInject/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` - local env=$(helm template \ + local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout -consul-api-timeout=5s"))' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_META").value' | tee /dev/stderr) - [ "${actual}" = 'component=connect-injector,pod=$(NAMESPACE)/$(POD_NAME)' ] + [ "${object}" = "true" ] } -@test "connectInject/Deployment: sets global auth method and primary datacenter when federation and acls" { +@test "connectInject/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc1' \ - --set 'global.datacenter=dc2' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method-dc2" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "connectInject/Deployment: sets default login partition and acls and partitions are enabled" { +@test "connectInject/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "default" ] + yq '[.spec.template.spec.containers[0].env[1].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: sets non-default login partition and acls and partitions are enabled" { +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { cd `chart_dir` - local env=$(helm template \ + local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=foo' \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] -} + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "connect-injector-acl-init" ] -@test "connectInject/Deployment: cross namespace policy is not added when global.acls.manageSystemACLs=false" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: cross namespace policy is added when global.acls.manageSystemACLs=true" { +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "connectInject/Deployment: cross namespace policy is not added when global.acls.manageSystemACLs=false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: cross namespace policy is added when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "connect-injector-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -1105,6 +1399,157 @@ load _helpers [ "${actual}" = "false" ] } +#-------------------------------------------------------------------- +# consul sidecar resources + +@test "connectInject/Deployment: default consul sidecar container resources" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request=25Mi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request=20m"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit=50Mi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit=20m"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul sidecar container resources can be set" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=100Mi' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=100m' \ + --set 'global.consulSidecarContainer.resources.limits.memory=200Mi' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=200m' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request=100Mi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request=100m"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit=200Mi"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit=200m"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul sidecar container resources can be set explicitly to 0" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=0' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=0' \ + --set 'global.consulSidecarContainer.resources.limits.memory=0' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=0' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request=0"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request=0"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit=0"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit=0"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: consul sidecar container resources can be individually set to null" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=null' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=null' \ + --set 'global.consulSidecarContainer.resources.limits.memory=null' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=null' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: consul sidecar container resources can be set to null" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources=null' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-request"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-request"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-memory-limit"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-default-consul-sidecar-cpu-limit"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: fails if global.lifecycleSidecarContainer is set" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.lifecycleSidecarContainer.resources.requests.memory=100Mi' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." ]] +} + #-------------------------------------------------------------------- # sidecarProxy.resources @@ -1250,28 +1695,6 @@ load _helpers #-------------------------------------------------------------------- # extraLabels -@test "connectInject/Deployment: no extra labels defined by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component")' | tee /dev/stderr) - [ "${actual}" = "{}" ] -} - -@test "connectInject/Deployment: can set extra labels" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.extraLabels.foo=bar' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) - - [ "${actual}" = "bar" ] -} - @test "connectInject/Deployment: can set extra global labels" { cd `chart_dir` local actual=$(helm template \ @@ -1457,8 +1880,6 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.peering.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-enable-peering=true"))' | tee /dev/stderr) @@ -1475,93 +1896,213 @@ load _helpers [[ "$output" =~ "setting global.peering.enabled to true requires connectInject.enabled to be true" ]] } -@test "connectInject/Deployment: fails if peering is enabled but tls is not" { +@test "connectInject/Deployment: -read-server-expose-service=true is set when global.peering.enabled is true and global.peering.tokenGeneration.serverAddresses.source is empty" { cd `chart_dir` - run helm template \ + local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.peering.enabled=true' . - [ "$status" -eq 1 ] - [[ "$output" =~ "setting global.peering.enabled to true requires global.tls.enabled to be true" ]] + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] } -@test "connectInject/Deployment: fails if peering is enabled but mesh gateways are not" { +@test "connectInject/Deployment: -read-server-expose-service=true is set when servers are enabled and peering is enabled" { cd `chart_dir` - run helm template \ + local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.peering.enabled=true' . - [ "$status" -eq 1 ] - [[ "$output" =~ "setting global.peering.enabled to true requires meshGateway.enabled to be true" ]] + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] } -#-------------------------------------------------------------------- -# openshift +@test "connectInject/Deployment: -read-server-expose-service is not set when servers are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'server.enabled=false' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) -@test "connectInject/Deployment: openshift is is not set by default" { + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: -read-server-expose-service is not set when peering is disabled" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=false' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-enable-openshift"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "connectInject/Deployment: -enable-openshift is set when global.openshift.enabled is true" { +@test "connectInject/Deployment: -read-server-expose-service is not set when global.peering.tokenGeneration.serverAddresses.source is set to consul" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.openshift.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'global.peering.tokenGeneration.serverAddresses.source=consul' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-enable-openshift"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: fails server address source is an invalid value" { + cd `chart_dir` + run helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'global.peering.tokenGeneration.serverAddresses.source=notempty' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.peering.tokenGeneration.serverAddresses.source must be one of empty string, 'consul' or 'static'" ]] +} + +@test "connectInject/Deployment: -read-server-expose-service and -token-server-address is not set when global.peering.tokenGeneration.serverAddresses.source is consul" { + cd `chart_dir` + local command=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + --set 'global.peering.tokenGeneration.serverAddresses.source=consul' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-read-server-expose-service=true"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "connectInject/Deployment: when servers are not enabled and externalServers.enabled=true, passes in -token-server-address flags with hosts" { + cd `chart_dir` + local command=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=1.2.3.4' \ + --set 'externalServers.hosts[1]=2.2.3.4' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"1.2.3.4:8503\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"2.2.3.4:8503\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: externalServers.grpcPort can be customized" { + cd `chart_dir` + local command=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=1.2.3.4' \ + --set 'externalServers.hosts[1]=2.2.3.4' \ + --set 'externalServers.grpcPort=1234' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"1.2.3.4:1234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"2.2.3.4:1234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: when peering token generation source is static passes in -token-server-address flags with static addresses" { + cd `chart_dir` + local command=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'global.peering.tokenGeneration.serverAddresses.source=static' \ + --set 'global.peering.tokenGeneration.serverAddresses.static[0]=1.2.3.4:1234' \ + --set 'global.peering.tokenGeneration.serverAddresses.static[1]=2.2.3.4:2234' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"1.2.3.4:1234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"2.2.3.4:2234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "connectInject/Deployment: when peering token generation source is static and externalHosts are set, passes in -token-server-address flags with static addresses, not externalServers.hosts" { + cd `chart_dir` + local command=$(helm template \ + -s templates/connect-inject-deployment.yaml \ + --set 'server.enabled=false' \ + --set 'global.peering.tokenGeneration.serverAddresses.source=static' \ + --set 'global.peering.tokenGeneration.serverAddresses.static[0]=1.2.3.4:1234' \ + --set 'global.peering.tokenGeneration.serverAddresses.static[1]=2.2.3.4:2234' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=1.1.1.1' \ + --set 'externalServers.hosts[1]=2.2.2.2' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command') + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"1.2.3.4:1234\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $command | jq -r ' . | any(contains("-token-server-address=\"2.2.3.4:2234\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } #-------------------------------------------------------------------- -# nodeMeta +# openshift -@test "connectInject/Deployment: nodeMeta is not set by default" { +@test "connectInject/Deployment: openshift is is not set by default" { cd `chart_dir` - local cmd=$(helm template \ - -s templates/connect-inject-deployment.yaml \ + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-enable-openshift"))' | tee /dev/stderr) - local actual=$(echo "$cmd" | - yq 'any(contains("-node-meta"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "connectInject/Deployment: can set nodeMeta explicitly" { +@test "connectInject/Deployment: -enable-openshift is set when global.openshift.enabled is true" { cd `chart_dir` - local cmd=$(helm template \ - -s templates/connect-inject-deployment.yaml \ + local actual=$(helm template \ + -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ - --set 'connectInject.consulNode.meta.foo=bar' \ - --set 'connectInject.consulNode.meta.test=value' \ + --set 'global.openshift.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) - - local actual=$(echo "$cmd" | - yq 'any(contains("-node-meta=foo=bar"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq '.spec.template.spec.containers[0].command | any(contains("-enable-openshift"))' | tee /dev/stderr) - local actual=$(echo "$cmd" | - yq 'any(contains("-node-meta=test=value"))' | tee /dev/stderr) [ "${actual}" = "true" ] } + #-------------------------------------------------------------------- # replicas -@test "connectInject/Deployment: replicas defaults to 1" { +@test "connectInject/Deployment: replicas defaults to 2" { cd `chart_dir` local actual=$(helm template \ -s templates/connect-inject-deployment.yaml \ @@ -1569,7 +2110,7 @@ load _helpers . | tee /dev/stderr | yq '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] + [ "${actual}" = "2" ] } @test "connectInject/Deployment: replicas can be set" { @@ -1585,54 +2126,33 @@ load _helpers } #-------------------------------------------------------------------- -# Vault +# get-auto-encrypt-client-ca -@test "connectInject/Deployment: CONSUL_CACERT env variable is set points to vault secrets when TLS and vault are enabled" { +@test "connectInject/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { cd `chart_dir` - local actual=$(helm template \ + local command=$(helm template \ -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulClientRole=foo' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env[] | select(.name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/vault/secrets/serverca.crt" ] -} + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) -@test "connectInject/Deployment: consul-ca-cert volume is not added when TLS and vault are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulClientRole=foo' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.secretsBackend.vault.consulCARole=test' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] -} + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] -@test "connectInject/Deployment: consul-ca-cert volume mount is not added when TLS and vault are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulClientRole=foo' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.secretsBackend.vault.consulCARole=test' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers.volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -1644,6 +2164,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-enable-webhook-ca-update"))' | tee /dev/stderr) @@ -1656,6 +2177,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1665,6 +2187,9 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=test' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=foo/ca' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=foo/tls' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-enable-webhook-ca-update"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -1679,6 +2204,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1699,6 +2225,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1720,6 +2247,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1741,6 +2269,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1763,6 +2292,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1771,7 +2301,7 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=connectinjectcarole' \ --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' . [ "$status" -eq 1 ] - [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName" ]] + [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and global.secretsBackend.vault.controller.caCert.secretName." ]] } @test "connectInject/Deployment: fails if vault is enabled and global.secretsBackend.vault.connectInject.tlsCert.secretName is set but global.secretsBackend.vault.connectInjectRole and global.secretsBackend.vault.connectInject.caCert.secretName are not" { @@ -1780,6 +2310,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=connectInject/Deployment: enable-webhook-ca-update flag is not set on command when using vaulttest' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1788,7 +2319,7 @@ load _helpers --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' . [ "$status" -eq 1 ] - [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName" ]] + [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and global.secretsBackend.vault.controller.caCert.secretName." ]] } @test "connectInject/Deployment: fails if vault is enabled and global.secretsBackend.vault.connectInject.caCert.secretName is set but global.secretsBackend.vault.connectInjectRole and global.secretsBackend.vault.connectInject.tlsCert.secretName are not" { @@ -1797,6 +2328,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1805,7 +2337,7 @@ load _helpers --set 'global.secretsBackend.vault.connectInject.caCert.secretName=foo/ca' \ --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' . [ "$status" -eq 1 ] - [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName" ]] + [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and global.secretsBackend.vault.controller.caCert.secretName." ]] } @test "connectInject/Deployment: vault tls annotations are set when tls is enabled" { @@ -1818,11 +2350,15 @@ load _helpers --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'server.serverCert.secretName=pki_int/issue/test' \ --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ --set 'global.secretsBackend.vault.connectInjectRole=test' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=foo/ca' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ . | tee /dev/stderr | yq -r '.spec.template.metadata' | tee /dev/stderr) @@ -1896,6 +2432,9 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ @@ -1918,6 +2457,10 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'server.serverCert.secretName=pki_int/issue/test' \ --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ . | tee /dev/stderr | @@ -1944,9 +2487,13 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ . | tee /dev/stderr | yq '.spec.template.spec.volumes[] | select(.name == "certs")' | tee /dev/stderr) [ "${actual}" == "" ] @@ -1963,9 +2510,13 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "certs")' | tee /dev/stderr) [ "${actual}" == "" ] @@ -1981,6 +2532,7 @@ load _helpers --set 'global.secretsBackend.vault.consulServerRole=foo' \ --set 'global.tls.enabled=true' \ --set 'global.tls.caCert.secretName=foo' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ . | tee /dev/stderr | yq -r '.spec.template.metadata' | tee /dev/stderr) @@ -2014,6 +2566,7 @@ load _helpers -s templates/connect-inject-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -2056,386 +2609,3 @@ reservedNameTest() { [ "$status" -eq 1 ] [[ "$output" =~ "The name $name set for key connectInject.consulNamespaces.consulDestinationNamespace is reserved by Consul for future use" ]] } - -#-------------------------------------------------------------------- -# externalServers - -@test "connectInject/Deployment: fails if externalServers.hosts is not provided when externalServers.enabled is true" { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "externalServers.hosts must be set if externalServers.enabled is true" ]] -} - -@test "connectInject/Deployment: configures the sidecar-injector env to use external servers" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr)\ - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "consul" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8502" ] -} - -@test "connectInject/Deployment: can provide a different ports for the sidecar-injector when external servers are enabled" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.httpsPort=443' \ - --set 'externalServers.grpcPort=444' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr)\ - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "consul" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "443" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "444" ] -} - -@test "connectInject/Deployment: can provide a TLS server name for the sidecar-injector when external servers are enabled" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'global.tls.enabled=true' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.tlsServerName=foo' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_TLS_SERVER_NAME").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] -} - -@test "connectInject/Deployment: does not configure CA cert for the sidecar-injector when external servers with useSystemRoots are enabled" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$spec" | yq '.containers[0].env[] | select(.name == "CONSUL_CACERT_FILE")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "connectInject/Deployment: fails if externalServers.skipServerWatch is not provided when externalServers.enabled is true" { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.skipServerWatch=true' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "externalServers.enabled must be set if externalServers.skipServerWatch is true" ]] -} - -@test "connectInject/Deployment: configures the sidecar-injector env to skip server watch" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.skipServerWatch=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr)\ - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_SKIP_SERVER_WATCH").value' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# global.cloud - -@test "connectInject/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/client-daemonset.yaml \ - --set 'client.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.datacenter=dc-foo' \ - --set 'global.domain=bar' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "connectInject/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "connectInject/Deployment: can provide a TLS server name for the sidecar-injector when global.cloud.enabled is set" { - cd `chart_dir` - local env=$(helm template \ - -s templates/connect-inject-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_TLS_SERVER_NAME").value' | tee /dev/stderr) - [ "${actual}" = "server.dc1.consul" ] -} diff --git a/charts/consul/test/unit/connect-inject-mutatingwebhookconfiguration.bats b/charts/consul/test/unit/connect-inject-mutatingwebhookconfiguration.bats index 81eda87875..6745e690c3 100755 --- a/charts/consul/test/unit/connect-inject-mutatingwebhookconfiguration.bats +++ b/charts/consul/test/unit/connect-inject-mutatingwebhookconfiguration.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/MutatingWebhookConfiguration: enabled by default" { +@test "connectInject/MutatingWebhookConfiguration: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-mutatingwebhookconfiguration.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/MutatingWebhookConfiguration: enable with global.enabled false" { @@ -35,7 +33,6 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/connect-inject-mutatingwebhookconfiguration.yaml \ - --set 'connectInject.enabled=-' \ --set 'global.enabled=false' \ . } @@ -56,19 +53,15 @@ load _helpers local actual=$(helm template \ -s templates/connect-inject-mutatingwebhookconfiguration.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ . | tee /dev/stderr | - yq '.webhooks[11].name | contains("peeringacceptors.consul.hashicorp.com")' | tee /dev/stderr) + yq '.webhooks[1].name | contains("peeringacceptors.consul.hashicorp.com")' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(helm template \ -s templates/connect-inject-mutatingwebhookconfiguration.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ . | tee /dev/stderr | - yq '.webhooks[12].name | contains("peeringdialers.consul.hashicorp.com")' | tee /dev/stderr) + yq '.webhooks[2].name | contains("peeringdialers.consul.hashicorp.com")' | tee /dev/stderr) [ "${actual}" = "true" ] } diff --git a/charts/consul/test/unit/connect-inject-service.bats b/charts/consul/test/unit/connect-inject-service.bats index 2082ea2c0b..3831793156 100755 --- a/charts/consul/test/unit/connect-inject-service.bats +++ b/charts/consul/test/unit/connect-inject-service.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/Service: enabled by default" { +@test "connectInject/Service: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-service.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/Service: enable with global.enabled false" { @@ -35,7 +33,6 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/connect-inject-service.yaml \ - --set 'connectInject.enabled=-' \ --set 'global.enabled=false' \ . } diff --git a/charts/consul/test/unit/connect-inject-serviceaccount.bats b/charts/consul/test/unit/connect-inject-serviceaccount.bats index 2832ebc95d..07b38c3d49 100644 --- a/charts/consul/test/unit/connect-inject-serviceaccount.bats +++ b/charts/consul/test/unit/connect-inject-serviceaccount.bats @@ -2,13 +2,11 @@ load _helpers -@test "connectInject/ServiceAccount: enabled by default" { +@test "connectInject/ServiceAccount: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/connect-inject-serviceaccount.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } @test "connectInject/ServiceAccount: enabled with global.enabled false" { diff --git a/charts/consul/test/unit/connect-injector-disruptionbudget.bats b/charts/consul/test/unit/connect-injector-disruptionbudget.bats index ec8d449821..ec998d0750 100755 --- a/charts/consul/test/unit/connect-injector-disruptionbudget.bats +++ b/charts/consul/test/unit/connect-injector-disruptionbudget.bats @@ -2,13 +2,10 @@ load _helpers -@test "connect-injector/DisruptionBudget: enabled by default" { +@test "connect-injector/DisruptionBudget: disabled by default" { cd `chart_dir` - local actual=$(helm template \ - -s templates/connect-injector-disruptionbudget.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + assert_empty helm template \ + -s templates/connect-injector-disruptionbudget.yaml . } @test "connect-injector/DisruptionBudget: enabled with connectInject=enabled , connectInject.disruptionBudget.enabled=true and global.enabled=true " { @@ -43,7 +40,6 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/connect-injector-disruptionbudget.yaml \ - --set 'connectInject.enabled=-' \ --set 'global.enabled=false' \ . } @@ -163,35 +159,3 @@ load _helpers # no flag to *remove* an API version so some Helm versions will always have # policy/v1 support and will always use that API version. - -#-------------------------------------------------------------------- -# minAvailable - -@test "connect-injector/DisruptionBudget: correct minAvailable when set" { - cd `chart_dir` - local tpl=$(helm template \ - -s templates/connect-injector-disruptionbudget.yaml \ - --set 'connectInject.replicas=1' \ - --set 'global.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.disruptionBudget.enabled=true' \ - --set 'connectInject.disruptionBudget.minAvailable=1' \ - . | tee /dev/stderr) - [ $(echo "$tpl" | yq '.spec.minAvailable') = "1" ] - [ $(echo "$tpl" | yq '.spec.maxUnavailable') = "null" ] -} - -@test "connect-injector/DisruptionBudget: correct minAvailable when set with maxUnavailable" { - cd `chart_dir` - local tpl=$(helm template \ - -s templates/connect-injector-disruptionbudget.yaml \ - --set 'connectInject.replicas=1' \ - --set 'global.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'connectInject.disruptionBudget.enabled=true' \ - --set 'connectInject.disruptionBudget.minAvailable=1' \ - --set 'connectInject.disruptionBudget.maxUnavailable=2' \ - . | tee /dev/stderr) - [ $(echo "$tpl" | yq '.spec.minAvailable') = "1" ] - [ $(echo "$tpl" | yq '.spec.maxUnavailable') = "null" ] -} diff --git a/charts/consul/test/unit/controller-clusterrole.bats b/charts/consul/test/unit/controller-clusterrole.bats new file mode 100644 index 0000000000..708d32d6be --- /dev/null +++ b/charts/consul/test/unit/controller-clusterrole.bats @@ -0,0 +1,231 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controller/ClusterRole: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-clusterrole.yaml \ + . +} + +@test "controller/ClusterRole: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-clusterrole.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} +#-------------------------------------------------------------------- +# rules + +@test "controller/ClusterRole: sets create, delete, get, list, patch, update and watch access to all CRDs in the consul.hashicorp.com api group" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-clusterrole.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq -r '.rules[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.apiGroups[0]' | tee /dev/stderr) + [ "${actual}" = "consul.hashicorp.com" ] + + local actual=$(echo $object | yq -r '.resources | index("servicedefaults")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("serviceresolvers")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("proxydefaults")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("meshes")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("exportedservices")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("servicerouters")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("servicesplitters")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("serviceintentions")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("ingressgateways")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("terminatinggateways")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("create")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("delete")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("get")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("list")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("patch")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("update")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("watch")' | tee /dev/stderr) + [ "${actual}" != null ] +} + +@test "controller/ClusterRole: sets get, patch, and update to all CRDs status in the consul.hashicorp.com api group" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-clusterrole.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq -r '.rules[1]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.apiGroups[0]' | tee /dev/stderr) + [ "${actual}" = "consul.hashicorp.com" ] + + local actual=$(echo $object | yq -r '.resources | index("servicedefaults/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("serviceresolvers/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("proxydefaults/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("meshes/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("exportedservices/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("servicerouters/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("servicesplitters/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("serviceintentions/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("ingressgateways/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.resources | index("terminatinggateways/status")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("get")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("patch")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("update")' | tee /dev/stderr) + [ "${actual}" != null ] +} + +@test "controller/ClusterRole: sets create, get, list, and update access to leases in the coordination.k8s.io api group" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-clusterrole.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq -r '.rules[2]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.resources[| index("leases")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.apiGroups[0]' | tee /dev/stderr) + [ "${actual}" = "coordination.k8s.io" ] + + local actual=$(echo $object | yq -r '.verbs | index("create")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("get")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("list")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("update")' | tee /dev/stderr) + [ "${actual}" != null ] +} + +#-------------------------------------------------------------------- +# global.enablePodSecurityPolicies + +@test "controller/ClusterRole: no podsecuritypolicies access with global.enablePodSecurityPolicies=false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-clusterrole.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enablePodSecurityPolicies=false' \ + . | tee /dev/stderr | + yq '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "controller/ClusterRole: allows podsecuritypolicies access with global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-clusterrole.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq '.rules | map(select(.resources[0] == "podsecuritypolicies")) | length' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +#-------------------------------------------------------------------- +# vault + +@test "controller/ClusterRole: vault sets get, list, watch, and patch access to mutatingwebhookconfigurations when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and global.secretsBackend.vault.controller.caCert.secretName." { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-clusterrole.yaml \ + --set 'controller.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ + --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test2' \ + . | tee /dev/stderr | + yq -r '.rules[3]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.resources[0]' | tee /dev/stderr) + [ "${actual}" = "mutatingwebhookconfigurations" ] + + local actual=$(echo $object | yq -r '.apiGroups[0]' | tee /dev/stderr) + [ "${actual}" = "admissionregistration.k8s.io" ] + + local actual=$(echo $object | yq -r '.verbs | index("get")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("list")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("patch")' | tee /dev/stderr) + [ "${actual}" != null ] + + local actual=$(echo $object | yq -r '.verbs | index("watch")' | tee /dev/stderr) + [ "${actual}" != null ] +} diff --git a/charts/consul/test/unit/controller-clusterrolebinding.bats b/charts/consul/test/unit/controller-clusterrolebinding.bats new file mode 100644 index 0000000000..b9777c95c6 --- /dev/null +++ b/charts/consul/test/unit/controller-clusterrolebinding.bats @@ -0,0 +1,20 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controller/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-clusterrolebinding.yaml \ + . +} + +@test "controller/ClusterRoleBinding: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-clusterrolebinding.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/controller-deployment.bats b/charts/consul/test/unit/controller-deployment.bats new file mode 100644 index 0000000000..87bb98b1f9 --- /dev/null +++ b/charts/consul/test/unit/controller-deployment.bats @@ -0,0 +1,1236 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controller/Deployment: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-deployment.yaml \ + . +} + +@test "controller/Deployment: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: command defaults" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq 'any(contains("consul-k8s-control-plane controller"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq 'any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# resourcePrefix + +@test "controller/Deployment: resource-prefix flag is set on command" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-resource-prefix=release-name-consul"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# enable-webhook-ca-update + +@test "controller/Deployment: enable-webhook-ca-update flag is not set on command by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-webhook-ca-update"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: enable-webhook-ca-update flag is not set on command when using vault" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ + --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test2' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-enable-webhook-ca-update"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# replicas + +@test "controller/Deployment: replicas defaults to 1" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.replicas' | tee /dev/stderr) + [ "${actual}" = "1" ] +} + +@test "controller/Deployment: can set replicas" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.replicas=2' \ + . | tee /dev/stderr | + yq '.spec.replicas' | tee /dev/stderr) + [ "${actual}" = "2" ] +} + +#-------------------------------------------------------------------- +# global.acls.manageSystemACLs + +@test "controller/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout -consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "controller-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when federation enabled in non-primary datacenter" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "controller/Deployment: Adds tls-ca-cert volume when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "controller/Deployment: Adds tls-ca-cert volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "controller/Deployment: can overwrite CA secret with the provided one" { + cd `chart_dir` + local ca_cert_volume=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo-ca-cert' \ + --set 'global.tls.caCert.secretKey=key' \ + --set 'global.tls.caKey.secretName=foo-ca-key' \ + --set 'global.tls.caKey.secretKey=key' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name=="consul-ca-cert")' | tee /dev/stderr) + + # check that the provided ca cert secret is attached as a volume + local actual + actual=$(echo $ca_cert_volume | jq -r '.secret.secretName' | tee /dev/stderr) + [ "${actual}" = "foo-ca-cert" ] + + # check that the volume uses the provided secret key + actual=$(echo $ca_cert_volume | jq -r '.secret.items[0].key' | tee /dev/stderr) + [ "${actual}" = "key" ] +} + +@test "controller/Deployment: Adds -webhook-tls-cert-dir=/tmp/controller-webhook/certs to command" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-webhook-tls-cert-dir=/tmp/controller-webhook/certs"))' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "controller/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: adds both init containers when TLS with auto-encrypt and ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# partitions + +@test "controller/Deployment: partitions options disabled by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("partition"))' | tee /dev/stderr) + + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: partition name set with .global.adminPartitions.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("partition=default"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: fails if namespaces are disabled and .global.adminPartitions.enabled=true" { + cd `chart_dir` + run helm template \ + -s templates/controller-deployment.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.enableConsulNamespaces=false' \ + --set 'controller.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" ]] +} +#-------------------------------------------------------------------- +# namespaces + +@test "controller/Deployment: namespace options disabled by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: namespace options set with .global.enableConsulNamespaces=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: mirroring options set with connectInject.consulNamespaces.mirroringK8S=true" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: prefix can be set with connectInject.consulNamespaces.mirroringK8SPrefix" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'connectInject.consulNamespaces.mirroringK8S=true' \ + --set 'connectInject.consulNamespaces.mirroringK8SPrefix=k8s-' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq 'any(contains("enable-namespaces=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("consul-destination-namespace=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("enable-k8s-namespace-mirroring=true"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq 'any(contains("k8s-namespace-mirroring-prefix=k8s-"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: cross namespace policy is not added when global.acls.manageSystemACLs=false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: cross namespace policy is added when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-consul-cross-namespace-acl-policy"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# affinity + +@test "controller/Deployment: affinity not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.affinity == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: affinity can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.affinity=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .affinity == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# nodeSelector + +@test "controller/Deployment: nodeSelector is not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "null" ] +} + +@test "controller/Deployment: nodeSelector can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.nodeSelector=testing' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) + [ "${actual}" = "testing" ] +} + +#-------------------------------------------------------------------- +# tolerations + +@test "controller/Deployment: tolerations not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.tolerations == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: tolerations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.tolerations=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .tolerations == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# priorityClassName + +@test "controller/Deployment: priorityClassName not set by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.priorityClassName == null' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: priorityClassName can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.priorityClassName=foobar' \ + . | tee /dev/stderr | + yq '.spec.template.spec | .priorityClassName == "foobar"' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# resources + +@test "controller/Deployment: default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"100m","memory":"50Mi"}}' ] +} + +@test "controller/Deployment: can set resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.resources.requests.memory=100Mi' \ + --set 'controller.resources.requests.cpu=100m' \ + --set 'controller.resources.limits.memory=200Mi' \ + --set 'controller.resources.limits.cpu=200m' \ + . | tee /dev/stderr | + yq -rc '.spec.template.spec.containers[0].resources' | tee /dev/stderr) + [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] +} + +#-------------------------------------------------------------------- +# aclToken + +@test "controller/Deployment: aclToken enabled when secretName and secretKey is provided" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.aclToken.secretName=foo' \ + --set 'controller.aclToken.secretKey=bar' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: aclToken env is set when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: aclToken env is not set when ACLs are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +#-------------------------------------------------------------------- +# logLevel + +@test "controller/Deployment: logLevel info by default from global" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq '.containers[0].command | any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq '.initContainers[0].command | any(contains("-log-level=info"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: logLevel can be overridden" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'controller.logLevel=error' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec' | tee /dev/stderr) + + local actual=$(echo "$cmd" | + yq '.containers[0].command | any(contains("-log-level=error"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo "$cmd" | + yq '.initContainers[0].command | any(contains("-log-level=error"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "controller/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# Vault + +@test "controller/Deployment: vault CA is not configured by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: vault CA is not configured when secretName is set but secretKey is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: vault CA is not configured when secretKey is set but secretName is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "controller/Deployment: vault CA is configured when both secretName and secretKey are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +@test "controller/Deployment: vault tls annotations are set when tls is enabled" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test2' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ + --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=pki/issue/controller-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test2' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"pki_int/cert/ca\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr)" + [ "${actual}" = "pki_int/cert/ca" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-ca.crt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"foo/ca\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-ca.crt"]' | tee /dev/stderr)" + [ "${actual}" = "foo/ca" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/secret-volume-path-ca.crt"]' | tee /dev/stderr)" + [ "${actual}" = "/vault/secrets/controller-webhook/certs" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr)" + [ "${actual}" = "test" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-tls.crt"]' | tee /dev/stderr)" + [ "${actual}" = "pki/issue/controller-webhook-cert-dc1" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-tls.crt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"pki/issue/controller-webhook-cert-dc1\" \"common_name=release-name-consul-controller-webhook\"\n\"alt_names=release-name-consul-controller-webhook,release-name-consul-controller-webhook.default,release-name-consul-controller-webhook.default.svc,release-name-consul-controller-webhook.default.svc.cluster.local\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/secret-volume-path-tls.crt"]' | tee /dev/stderr)" + [ "${actual}" = "/vault/secrets/controller-webhook/certs" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-tls.key"]' | tee /dev/stderr)" + [ "${actual}" = "pki/issue/controller-webhook-cert-dc1" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-tls.key"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"pki/issue/controller-webhook-cert-dc1\" \"common_name=release-name-consul-controller-webhook\"\n\"alt_names=release-name-consul-controller-webhook,release-name-consul-controller-webhook.default,release-name-consul-controller-webhook.default.svc,release-name-consul-controller-webhook.default.svc.cluster.local\" -}}\n{{- .Data.private_key -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/secret-volume-path-tls.key"]' | tee /dev/stderr)" + [ "${actual}" = "/vault/secrets/controller-webhook/certs" ] +} + +@test "controller/Deployment: vault does not add cert volume when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'controller.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ + --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test2' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "cert")' | tee /dev/stderr) + [ "${actual}" == "" ] +} + +@test "controller/Deployment: vault does not add cert volumeMounts when global.tls.enabled is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'controller.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ + --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test2' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "cert")' | tee /dev/stderr) + [ "${actual}" == "" ] +} + +@test "controller/Deployment: vault webhook-tls-cert-dir flag is set to /vault/secrets" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ + --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test2' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-webhook-tls-cert-dir=/vault/secrets"))' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "controller/Deployment: fails if vault is enabled and global.secretsBackend.vault.controllerRole is set but global.secretsBackend.vault.connectInject.tlsCert.secretName and global.secretsBackend.vault.connectInject.caCert.secretName are not" { + cd `chart_dir` + run helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.controllerRole=controllerinjectcarole' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' . + [ "$status" -eq 1 ] + [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and global.secretsBackend.vault.controller.caCert.secretName." ]] +} + +@test "controller/Deployment: fails if vault is enabled and global.secretsBackend.vault.controller.tlsCert.secretName is set but global.secretsBackend.vault.connectInjectRole and global.secretsBackend.vault.connectInject.caCert.secretName are not" { + cd `chart_dir` + run helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' . + [ "$status" -eq 1 ] + [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and global.secretsBackend.vault.controller.caCert.secretName." ]] +} + +@test "controller/Deployment: fails if vault is enabled and global.secretsBackend.vault.controller.caCert.secretName is set but global.secretsBackend.vault.connectInjectRole and global.secretsBackend.vault.connectInject.tlsCert.secretName are not" { + cd `chart_dir` + run helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' . + [ "$status" -eq 1 ] + [[ "$output" =~ "When one of the following has been set, all must be set: global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and global.secretsBackend.vault.controller.caCert.secretName." ]] +} + +@test "controller/Deployment: vault vault.hashicorp.com/role set to global.secretsBackend.vault.controllerRole if global.secretsBackend.vault.controllerRole is not set" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test2' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr)" + [ "${actual}" = "test2" ] +} +#-------------------------------------------------------------------- +# Vault agent annotations + +@test "controller/Deployment: no vault agent annotations defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "controller/Deployment: vault agent annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} + + diff --git a/charts/consul/test/unit/controller-leader-election-role.bats b/charts/consul/test/unit/controller-leader-election-role.bats new file mode 100644 index 0000000000..3abf9c81dc --- /dev/null +++ b/charts/consul/test/unit/controller-leader-election-role.bats @@ -0,0 +1,20 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controllerLeaderElection/Role: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-leader-election-role.yaml \ + . +} + +@test "controllerLeaderElection/Role: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-leader-election-role.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/controller-leader-election-rolebinding.bats b/charts/consul/test/unit/controller-leader-election-rolebinding.bats new file mode 100644 index 0000000000..94f31e01b8 --- /dev/null +++ b/charts/consul/test/unit/controller-leader-election-rolebinding.bats @@ -0,0 +1,20 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controllerLeaderElection/RoleBinding: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-leader-election-rolebinding.yaml \ + . +} + +@test "controllerLeaderElection/RoleBinding: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-leader-election-rolebinding.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/controller-mutatingwebhookconfiguration.bats b/charts/consul/test/unit/controller-mutatingwebhookconfiguration.bats new file mode 100644 index 0000000000..b65c94666a --- /dev/null +++ b/charts/consul/test/unit/controller-mutatingwebhookconfiguration.bats @@ -0,0 +1,20 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controller/MutatingWebhookConfiguration: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-mutatingwebhookconfiguration.yaml \ + . +} + +@test "controller/MutatingWebhookConfiguration: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-mutatingwebhookconfiguration.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/controller-podsecuritypolicy.bats b/charts/consul/test/unit/controller-podsecuritypolicy.bats new file mode 100644 index 0000000000..1ad286b8ba --- /dev/null +++ b/charts/consul/test/unit/controller-podsecuritypolicy.bats @@ -0,0 +1,29 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controller/PodSecurityPolicy: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-podsecuritypolicy.yaml \ + . +} + +@test "controller/PodSecurityPolicy: disabled by default with controller enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-podsecuritypolicy.yaml \ + --set 'controller.enabled=true' \ + . +} + +@test "controller/PodSecurityPolicy: enabled with controller enabled and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-podsecuritypolicy.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/controller-serviceaccount.bats b/charts/consul/test/unit/controller-serviceaccount.bats new file mode 100644 index 0000000000..3dd95cfea4 --- /dev/null +++ b/charts/consul/test/unit/controller-serviceaccount.bats @@ -0,0 +1,65 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controller/ServiceAccount: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-serviceaccount.yaml \ + . +} + +@test "controller/ServiceAccount: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-serviceaccount.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# global.imagePullSecrets + +@test "controller/ServiceAccount: can set image pull secrets" { + cd `chart_dir` + local object=$(helm template \ + -s templates/controller-serviceaccount.yaml \ + --set 'controller.enabled=true' \ + --set 'global.imagePullSecrets[0].name=my-secret' \ + --set 'global.imagePullSecrets[1].name=my-secret2' \ + . | tee /dev/stderr) + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[0].name' | tee /dev/stderr) + [ "${actual}" = "my-secret" ] + + local actual=$(echo "$object" | + yq -r '.imagePullSecrets[1].name' | tee /dev/stderr) + [ "${actual}" = "my-secret2" ] +} + +#-------------------------------------------------------------------- +# controller.serviceAccount.annotations + +@test "controller/ServiceAccount: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-serviceaccount.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.metadata.annotations | length > 0' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "controller/ServiceAccount: annotations when enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-serviceaccount.yaml \ + --set 'controller.enabled=true' \ + --set "controller.serviceAccount.annotations=foo: bar" \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +} diff --git a/charts/consul/test/unit/controller-webhook-service.bats b/charts/consul/test/unit/controller-webhook-service.bats new file mode 100644 index 0000000000..fc78e8e2e4 --- /dev/null +++ b/charts/consul/test/unit/controller-webhook-service.bats @@ -0,0 +1,20 @@ +#!/usr/bin/env bats + +load _helpers + +@test "controllerWebhook/Service: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/controller-webhook-service.yaml \ + . +} + +@test "controllerWebhook/Service: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/controller-webhook-service.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} diff --git a/charts/consul/test/unit/crd-exportedservices.bats b/charts/consul/test/unit/crd-exportedservices.bats index 1b8f4430b5..cf1a35a587 100644 --- a/charts/consul/test/unit/crd-exportedservices.bats +++ b/charts/consul/test/unit/crd-exportedservices.bats @@ -2,20 +2,18 @@ load _helpers -@test "exportedServices/CustomResourceDefinition: enabled by default" { +@test "exportedServices/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-exportedservices.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "exportedServices/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "exportedServices/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-exportedservices.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use diff --git a/charts/consul/test/unit/crd-ingressgateways.bats b/charts/consul/test/unit/crd-ingressgateways.bats index c1311ef664..315a22c8e7 100644 --- a/charts/consul/test/unit/crd-ingressgateways.bats +++ b/charts/consul/test/unit/crd-ingressgateways.bats @@ -2,24 +2,18 @@ load _helpers -@test "ingressGateway/CustomResourceDefinition: enabled by default" { +@test "ingressGateway/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-ingressgateways.yaml \ - . | tee /dev/stderr | - # The generated CRDs have "---" at the top which results in two objects - # being detected by yq, the first of which is null. We must therefore use - # yq -s so that length operates on both objects at once rather than - # individually, which would output false\ntrue and fail the test. - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "ingressGateway/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "ingressGateway/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-ingressgateways.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use @@ -28,11 +22,3 @@ load _helpers yq -s 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } - -@test "ingressGateway/CustomResourceDefinition: disabled with connectInject.enabled=false" { - cd `chart_dir` - assert_empty helm template \ - -s templates/crd-meshes.yaml \ - --set 'connectInject.enabled=false' \ - . -} diff --git a/charts/consul/test/unit/crd-meshes.bats b/charts/consul/test/unit/crd-meshes.bats index 97e73af24e..4ad7acf321 100644 --- a/charts/consul/test/unit/crd-meshes.bats +++ b/charts/consul/test/unit/crd-meshes.bats @@ -2,24 +2,18 @@ load _helpers -@test "mesh/CustomResourceDefinition: enabled by default" { +@test "mesh/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-meshes.yaml \ - . | tee /dev/stderr | - # The generated CRDs have "---" at the top which results in two objects - # being detected by yq, the first of which is null. We must therefore use - # yq -s so that length operates on both objects at once rather than - # individually, which would output false\ntrue and fail the test. - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "mesh/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "mesh/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-meshes.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use @@ -28,11 +22,3 @@ load _helpers yq -s 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } - -@test "ingressGateway/CustomResourceDefinition: disabled with connectInject.enabled=false" { - cd `chart_dir` - assert_empty helm template \ - -s templates/crd-meshes.yaml \ - --set 'connectInject.enabled=false' \ - . -} diff --git a/charts/consul/test/unit/crd-proxydefaults.bats b/charts/consul/test/unit/crd-proxydefaults.bats index 1cc9cfe5ee..8f6c080c17 100644 --- a/charts/consul/test/unit/crd-proxydefaults.bats +++ b/charts/consul/test/unit/crd-proxydefaults.bats @@ -2,20 +2,18 @@ load _helpers -@test "proxyDefaults/CustomResourceDefinition: enabled by default" { +@test "proxyDefaults/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-proxydefaults.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "proxyDefaults/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "proxyDefaults/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-proxydefaults.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use diff --git a/charts/consul/test/unit/crd-servicedefaults.bats b/charts/consul/test/unit/crd-servicedefaults.bats index fcff312505..bf61eb934f 100644 --- a/charts/consul/test/unit/crd-servicedefaults.bats +++ b/charts/consul/test/unit/crd-servicedefaults.bats @@ -2,20 +2,18 @@ load _helpers -@test "serviceDefaults/CustomResourceDefinition: enabled by default" { +@test "serviceDefaults/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-servicedefaults.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceDefaults/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "serviceDefaults/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-servicedefaults.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use diff --git a/charts/consul/test/unit/crd-serviceintentions.bats b/charts/consul/test/unit/crd-serviceintentions.bats index 9ca100d324..8f699d165e 100644 --- a/charts/consul/test/unit/crd-serviceintentions.bats +++ b/charts/consul/test/unit/crd-serviceintentions.bats @@ -2,20 +2,18 @@ load _helpers -@test "serviceintentions/CustomResourceDefinitions: enabled by default" { +@test "serviceintentions/CustomResourceDefinitions: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-serviceintentions.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceintentions/CustomResourceDefinitions: enabled with connectInject.enabled=true" { +@test "serviceintentions/CustomResourceDefinitions: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-serviceintentions.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use diff --git a/charts/consul/test/unit/crd-serviceresolvers.bats b/charts/consul/test/unit/crd-serviceresolvers.bats index eedd69477e..660211ae9f 100644 --- a/charts/consul/test/unit/crd-serviceresolvers.bats +++ b/charts/consul/test/unit/crd-serviceresolvers.bats @@ -2,20 +2,18 @@ load _helpers -@test "serviceResolvers/CustomResourceDefinition: enabled by default" { +@test "serviceResolvers/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-serviceresolvers.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceResolvers/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "serviceResolvers/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-serviceresolvers.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use diff --git a/charts/consul/test/unit/crd-servicerouters.bats b/charts/consul/test/unit/crd-servicerouters.bats index c93cb1cc62..cfe9bf7b06 100644 --- a/charts/consul/test/unit/crd-servicerouters.bats +++ b/charts/consul/test/unit/crd-servicerouters.bats @@ -2,20 +2,18 @@ load _helpers -@test "serviceRouters/CustomResourceDefinition: enabled by default" { +@test "serviceRouters/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-servicerouters.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceRouters/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "serviceRouters/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-servicerouters.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use diff --git a/charts/consul/test/unit/crd-servicesplitters.bats b/charts/consul/test/unit/crd-servicesplitters.bats index 6e85613b84..4e7bbdf61a 100644 --- a/charts/consul/test/unit/crd-servicesplitters.bats +++ b/charts/consul/test/unit/crd-servicesplitters.bats @@ -2,20 +2,18 @@ load _helpers -@test "serviceSplitters/CustomResourceDefinition: enabled by default" { +@test "serviceSplitters/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-servicesplitters.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "serviceSplitters/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "serviceSplitters/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-servicesplitters.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use diff --git a/charts/consul/test/unit/crd-terminatinggateway.bats b/charts/consul/test/unit/crd-terminatinggateway.bats index 69976848e8..84ed725c90 100644 --- a/charts/consul/test/unit/crd-terminatinggateway.bats +++ b/charts/consul/test/unit/crd-terminatinggateway.bats @@ -2,20 +2,18 @@ load _helpers -@test "terminatingGateway/CustomResourceDefinition: enabled by default" { +@test "terminatingGateway/CustomerResourceDefinition: disabled by default" { cd `chart_dir` - local actual=$(helm template \ + assert_empty helm template \ -s templates/crd-terminatinggateways.yaml \ - . | tee /dev/stderr | - yq -s 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] + . } -@test "terminatingGateway/CustomResourceDefinition: enabled with connectInject.enabled=true" { +@test "terminatingGateway/CustomerResourceDefinition: enabled with controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/crd-terminatinggateways.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | # The generated CRDs have "---" at the top which results in two objects # being detected by yq, the first of which is null. We must therefore use diff --git a/charts/consul/test/unit/create-federation-secret-job.bats b/charts/consul/test/unit/create-federation-secret-job.bats index e528f28f0e..5a569d2670 100644 --- a/charts/consul/test/unit/create-federation-secret-job.bats +++ b/charts/consul/test/unit/create-federation-secret-job.bats @@ -181,6 +181,33 @@ load _helpers [ "${actual}" = "true" ] } +@test "createFederationSecret/Job: auto-encrypt enabled" { + cd `chart_dir` + local obj=$(helm template \ + -s templates/create-federation-secret-job.yaml \ + --set 'global.federation.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.federation.createFederationSecret=true' \ + . | tee /dev/stderr) + + local actual + + # test it has the auto-encrypt volume + actual=$(echo "$obj" | yq '.spec.template.spec.volumes | map(select(.name == "consul-auto-encrypt-ca-cert")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] + + # test it adds the init container + actual=$(echo "$obj" | yq '.spec.template.spec.initContainers | map(select(.name == "get-auto-encrypt-client-ca")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] + + # test it sets CONSUL_CACERT to the auto-encrypt ca cert + actual=$(echo "$obj" | yq '.spec.template.spec.containers[0].env | map(select(.name == "CONSUL_CACERT" and .value == "/consul/tls/client/ca/tls.crt")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # global.gossipEncryption @@ -363,6 +390,39 @@ load _helpers [ "${actual}" = "testing" ] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "createFederationSecret/Job: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/create-federation-secret-job.yaml \ + --set 'global.federation.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.federation.createFederationSecret=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # extraLabels diff --git a/charts/consul/test/unit/dns-service.bats b/charts/consul/test/unit/dns-service.bats index bc5777ac53..d5fea64d04 100755 --- a/charts/consul/test/unit/dns-service.bats +++ b/charts/consul/test/unit/dns-service.bats @@ -2,7 +2,7 @@ load _helpers -@test "dns/Service: enabled by default due to inheriting from connectInject.transparentProxy.defaultEnabled" { +@test "dns/Service: enabled by default" { cd `chart_dir` local actual=$(helm template \ -s templates/dns-service.yaml \ @@ -11,11 +11,11 @@ load _helpers [ "${actual}" = "true" ] } -@test "dns/Service: enable with connectInject.transparentProxy.defaultEnabled false" { +@test "dns/Service: enable with global.enabled false" { cd `chart_dir` local actual=$(helm template \ -s templates/dns-service.yaml \ - --set 'connectInject.transparentProxy.defaultEnabled=false' \ + --set 'global.enabled=false' \ --set 'dns.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -30,11 +30,11 @@ load _helpers . } -@test "dns/Service: disable with connectInject.transparentProxy.defaultEnabled false" { +@test "dns/Service: disable with global.enabled" { cd `chart_dir` assert_empty helm template \ -s templates/dns-service.yaml \ - --set 'connectInject.transparentProxy.defaultEnabled=false' \ + --set 'global.enabled=false' \ . } diff --git a/charts/consul/test/unit/expose-servers-service.bats b/charts/consul/test/unit/expose-servers-service.bats index 09dd9b5b8a..a2a3e21c17 100644 --- a/charts/consul/test/unit/expose-servers-service.bats +++ b/charts/consul/test/unit/expose-servers-service.bats @@ -5,8 +5,45 @@ load _helpers @test "expose-servers/Service: disabled by default" { cd `chart_dir` assert_empty helm template \ - -s templates/expose-servers-service.yaml \ - . + -s templates/expose-servers-service.yaml \ + . +} + +@test "expose-servers/Service: enabled when servers and peering are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'client.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "expose-servers/Service: enable with global.enabled true and global.peering.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'global.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "expose-servers/Service: enable with global.peering.enabled true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/expose-servers-service.yaml \ + --set 'connectInject.enabled=true' \ + --set 'global.peering.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } @test "expose-servers/Service: enable with global.adminPartitions.enabled true" { @@ -14,12 +51,19 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } +@test "expose-servers/Service: disable when peering.enabled is false" { + cd `chart_dir` + assert_empty helm template \ + -s templates/expose-servers-service.yaml \ + --set 'server.enabled=true' \ + --set 'global.peering.enabled=false' \ + . +} @test "expose-servers/Service: disable with server.enabled" { cd `chart_dir` @@ -27,8 +71,7 @@ load _helpers -s templates/expose-servers-service.yaml \ --set 'server.enabled=false' \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ . } @@ -39,8 +82,7 @@ load _helpers --set 'global.enabled=false' \ --set 'client.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ . } @@ -49,8 +91,7 @@ load _helpers local cmd=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.ports[0]' | tee /dev/stderr) @@ -65,8 +106,7 @@ load _helpers local cmd=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq '.spec.ports[0]' | tee /dev/stderr) @@ -81,8 +121,7 @@ load _helpers local cmd=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=false' \ . | tee /dev/stderr | @@ -95,8 +134,7 @@ load _helpers local cmd=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'global.tls.httpsOnly=false' \ . | tee /dev/stderr | @@ -115,8 +153,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ . | tee /dev/stderr | yq -r '.metadata.annotations | length' | tee /dev/stderr) [ "${actual}" = "0" ] @@ -127,8 +164,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.annotations=key: value' \ . | tee /dev/stderr | yq -r '.metadata.annotations.key' | tee /dev/stderr) @@ -143,8 +179,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.http=4443' \ . | tee /dev/stderr | @@ -157,8 +192,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'global.tls.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.https=4443' \ @@ -172,8 +206,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.rpc=4443' \ . | tee /dev/stderr | @@ -186,8 +219,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.serf=4444' \ . | tee /dev/stderr | @@ -200,8 +232,7 @@ load _helpers local actual=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.grpc=4444' \ . | tee /dev/stderr | @@ -214,8 +245,7 @@ load _helpers local ports=$(helm template \ -s templates/expose-servers-service.yaml \ --set 'connectInject.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.peering.enabled=true' \ --set 'server.exposeService.type=NodePort' \ --set 'server.exposeService.nodePort.rpc=4443' \ --set 'server.exposeService.nodePort.grpc=4444' \ diff --git a/charts/consul/test/unit/ingress-gateways-deployment.bats b/charts/consul/test/unit/ingress-gateways-deployment.bats index 8ed76be13a..f4bbc40881 100644 --- a/charts/consul/test/unit/ingress-gateways-deployment.bats +++ b/charts/consul/test/unit/ingress-gateways-deployment.bats @@ -39,6 +39,27 @@ load _helpers [ "${actual}" = "release-name-consul-ingress-gateway" ] } +@test "ingressGateways/Deployment: Adds consul service volumeMount to gateway container" { + cd `chart_dir` + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | yq '.spec.template.spec.containers[0].volumeMounts[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-service" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/service" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # prerequisites @@ -52,6 +73,40 @@ load _helpers [[ "$output" =~ "connectInject.enabled must be true" ]] } +@test "ingressGateways/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true" ]] +} + +@test "ingressGateways/Deployment: fails if global.enabled is false and clients are not explicitly enabled" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + +@test "ingressGateways/Deployment: fails if global.enabled is true but clients are explicitly disabled" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + @test "ingressGateways/Deployment: fails if there are duplicate gateway names" { cd `chart_dir` run helm template \ @@ -83,64 +138,70 @@ load _helpers [[ "$output" =~ "terminating gateways cannot have duplicate names of any ingress gateways" ]] } #-------------------------------------------------------------------- -# dataplaneImage +# envoyImage -@test "ingressGateways/Deployment: dataplane image can be set using the global value" { +@test "ingressGateways/Deployment: envoy image has default global value" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.imageConsulDataplane=new/image' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "new/image" ] -} + [[ "${actual}" =~ "envoyproxy/envoy:v" ]] -#-------------------------------------------------------------------- -# global.tls.enabled +} -@test "ingressGateways/Deployment: sets flags when global.tls.enabled is false" { +@test "ingressGateways/Deployment: envoy image can be set using the global value" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ + --set 'global.imageEnvoy=new/image' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '. | any(contains("-tls-disabled"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq -s -r '.[0].spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "new/image" ] } -@test "ingressGateways/Deployment: sets flags when global.tls.enabled is false and global.enableConsulNamespaces=true" { +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "ingressGateways/Deployment: sets TLS env variables when global.tls.enabled" { cd `chart_dir` - local object=$(helm template \ + local env=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ - --set 'global.enableConsulNamespaces=true' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | any(contains("-tls-disabled"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_GRPC_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8502' ] + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } -@test "ingressGateways/Deployment: sets TLS flags when global.tls.enabled" { +@test "ingressGateways/Deployment: sets TLS env variables in consul sidecar when global.tls.enabled" { cd `chart_dir` - local object=$(helm template \ + local env=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @test "ingressGateways/Deployment: can overwrite CA secret with the provided one" { @@ -179,86 +240,183 @@ load _helpers } #-------------------------------------------------------------------- -# global.acls.manageSystemACLs +# global.tls.enableAutoEncrypt -@test "ingressGateways/Deployment: Adds consul envvars on ingress-gateway-init init container when ACLs are enabled and tls is enabled" { +@test "ingressGateways/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { cd `chart_dir` - local env=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_AUTH_METHOD") | .value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] + yq -s '.[0].spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_DATACENTER") | .value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] +@test "ingressGateways/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_META") | .value' | tee /dev/stderr) - [ "${actual}" = 'component=ingress-gateway,pod=$(NAMESPACE)/$(POD_NAME)' ] +@test "ingressGateways/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "ingressGateways/Deployment: ACL flags are not set when acls are disabled" { +@test "ingressGateways/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +#-------------------------------------------------------------------- +# global.acls.manageSystemACLs + +@test "ingressGateways/Deployment: consul-sidecar uses -token-file flag when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ - --set 'global.acls.manageSystemACLs=false' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -s '[.[0].spec.template.spec.containers[1].command[7]] | any(contains("-token-file=/consul/service/acl-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo $object | yq -r '. | any(contains("-login-bearer-path"))' | tee /dev/stderr) - [ "${actual}" = "false" ] +@test "ingressGateways/Deployment: consul-sidecar uses -consul-api-timeout flag" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s '[.[0].spec.template.spec.containers[1].command[6]] | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo $object | yq -r '. | any(contains("-login-method"))' | tee /dev/stderr) - [ "${actual}" = "false" ] +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on ingress-gateway-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | any(contains("-credential-type=login"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://\$(HOST_IP):8501" ] } -@test "ingressGateways/Deployment: command flags are set when acls are enabled" { +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on ingress-gateway-init init container when ACLs are enabled and tls is not enabled" { cd `chart_dir` - local object=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ + local env=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'ingressGateways.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | any(contains("-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://\$(HOST_IP):8500" ] +} - local actual=$(echo $object | yq -r '. | any(contains("-login-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) - [ "${actual}" = "true" ] +@test "ingressGateways/Deployment: Does not add consul envvars CONSUL_CACERT on ingress-gateway-init init container when ACLs are enabled and tls is not enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | any(contains("-credential-type=login"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "" ] } -@test "ingressGateways/Deployment: add consul-dataplane envvars on ingress-gateway container" { +@test "ingressGateways/Deployment: Adds consul envvars CONSUL_CACERT on ingress-gateway-init init container when ACLs are enabled and tls is enabled" { cd `chart_dir` local env=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'ingressGateways.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "DP_ENVOY_READY_BIND_ADDRESS") | .valueFrom.fieldRef.fieldPath' | tee /dev/stderr) - [ "${actual}" = "status.podIP" ] +@test "ingressGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "DP_CREDENTIAL_LOGIN_META1") | .value' | tee /dev/stderr) - [ "${actual}" = 'pod=$(NAMESPACE)/$(POD_NAME)' ] +@test "ingressGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s '[.[0].spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "DP_CREDENTIAL_LOGIN_META2") | .value' | tee /dev/stderr) - [ "${actual}" = "component=ingress-gateway" ] +@test "ingressGateways/Deployment: consul-logout preStop hook is added when ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -300,6 +458,19 @@ load _helpers [ "${actual}" = "/metrics" ] } +@test "ingressGateways/Deployment: when global.metrics.enabled=true, sets proxy setting" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.metrics.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + @test "ingressGateways/Deployment: when global.metrics.enableGatewayMetrics=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ @@ -311,6 +482,9 @@ load _helpers . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -331,6 +505,9 @@ load _helpers . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -341,31 +518,10 @@ load _helpers [ "${actual}" = "null" ] } -#-------------------------------------------------------------------- -# externalServers.skipServerWatch - -@test "ingressGateways/Deployment: sets server-watch-disabled flag when externalServers.enabled and externalServers.skipServerWatch is true" { - cd `chart_dir` - local object=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.skipServerWatch=true' \ - . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '. | any(contains("-server-watch-disabled=true"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # replicas -@test "ingressGateways/Deployment: replicas defaults to 1" { +@test "ingressGateways/Deployment: replicas defaults to 2" { cd `chart_dir` local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -373,7 +529,7 @@ load _helpers --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -s -r '.[0].spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] + [ "${actual}" = "2" ] } @test "ingressGateways/Deployment: replicas can be set through defaults" { @@ -575,47 +731,166 @@ load _helpers yq -s -r '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) - [ "${actual}" = "50Mi" ] + [ "${actual}" = "25Mi" ] local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) [ "${actual}" = "50m" ] local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) - [ "${actual}" = "50Mi" ] + [ "${actual}" = "150Mi" ] local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) [ "${actual}" = "50m" ] } -#-------------------------------------------------------------------- -# affinity - -@test "ingressGateways/Deployment: affinity defaults to null" { +@test "ingressGateways/Deployment: init container resources can be set through defaults" { cd `chart_dir` - local actual=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.limits.cpu=cpu2' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.affinity' | tee /dev/stderr) - [ "${actual}" = "null" ] + yq -s -r '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] } -@test "ingressGateways/Deployment: affinity can be set through defaults" { +@test "ingressGateways/Deployment: init container resources can be set through specific gateway, overriding defaults" { cd `chart_dir` - local actual=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.affinity=key: value' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'ingressGateways.defaults.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + --set 'ingressGateways.gateways[0].name=gateway1' \ + --set 'ingressGateways.gateways[0].initCopyConsulContainer.resources.requests.memory=gwmemory' \ + --set 'ingressGateways.gateways[0].initCopyConsulContainer.resources.requests.cpu=gwcpu' \ + --set 'ingressGateways.gateways[0].initCopyConsulContainer.resources.limits.memory=gwmemory2' \ + --set 'ingressGateways.gateways[0].initCopyConsulContainer.resources.limits.cpu=gwcpu2' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.affinity.key' | tee /dev/stderr) - [ "${actual}" = "value" ] + yq -s '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu2" ] } -@test "ingressGateways/Deployment: affinity can be set through specific gateway, overriding defaults" { +#-------------------------------------------------------------------- +# consul sidecar resources + +@test "ingressGateways/Deployment: consul sidecar has default resources" { cd `chart_dir` - local actual=$(helm template \ + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "25Mi" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "20m" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "50Mi" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "20m" ] +} + +@test "ingressGateways/Deployment: consul sidecar resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=memory' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=cpu' \ + --set 'global.consulSidecarContainer.resources.limits.memory=memory2' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +@test "ingressGateways/Deployment: fails if global.lifecycleSidecarContainer is set" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.lifecycleSidecarContainer.resources.requests.memory=100Mi' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." ]] +} + +#-------------------------------------------------------------------- +# affinity + +@test "ingressGateways/Deployment: affinity defaults to one per node" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey' | tee /dev/stderr) + [ "${actual}" = "kubernetes.io/hostname" ] +} + +@test "ingressGateways/Deployment: affinity can be set through defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.affinity=key: value' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.affinity.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +@test "ingressGateways/Deployment: affinity can be set through specific gateway, overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ @@ -799,7 +1074,7 @@ load _helpers --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "5" ] + [ "${actual}" = "1" ] } @test "ingressGateways/Deployment: extra annotations can be set through defaults" { @@ -814,7 +1089,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "7" ] + [ "${actual}" = "3" ] local actual=$(echo $object | yq -r '.key1' | tee /dev/stderr) [ "${actual}" = "value1" ] @@ -836,7 +1111,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "7" ] + [ "${actual}" = "3" ] local actual=$(echo $object | yq -r '.key1' | tee /dev/stderr) [ "${actual}" = "value1" ] @@ -859,7 +1134,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "8" ] + [ "${actual}" = "4" ] local actual=$(echo $object | yq -r '.defaultkey' | tee /dev/stderr) [ "${actual}" = "defaultvalue" ] @@ -871,6 +1146,413 @@ key2: value2' \ [ "${actual}" = "value2" ] } +#-------------------------------------------------------------------- +# WAN_ADDR + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for ClusterIP service set in defaults (the default)" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for ClusterIP service set in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=Static' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=ClusterIP' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for LoadBalancer service set in defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for LoadBalancer service set in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"$(cat /tmp/address.txt)\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for NodePort service set in defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"${HOST_IP}\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR set correctly for NodePort service set in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=NodePort' \ + --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_ADDR=\"${HOST_IP}\"")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_ADDR definition fails if using unknown service type in defaults" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=Static' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "currently set ingressGateway value service.type is not supported" ]] +} + +@test "ingressGateways/Deployment: WAN_ADDR definition fails if using unknown service type in specific gateway overriding defaults" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=Static' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "currently set ingressGateway value service.type is not supported" ]] +} + +#-------------------------------------------------------------------- +# WAN_PORT + +@test "ingressGateways/Deployment: WAN_PORT set correctly for non-NodePort service in defaults (the default)" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=80")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT can be set for non-NodePort service in defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.ports[0].port=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT set correctly for non-NodePort service in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.ports[0].port=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT set correctly for NodePort service with nodePort set in defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT set correctly for NodePort service with nodePort set in specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.ports[0].nodePort=8888' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=NodePort' \ + --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("WAN_PORT=1234")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: WAN_PORT definition fails if .service.type=NodePort and ports[0].nodePort is empty in defaults" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "if ingressGateways .service.type=NodePort and using ingressGateways.defaults.service.ports, the first port entry must include a nodePort" ]] +} + +@test "ingressGateways/Deployment: WAN_PORT definition fails if .service.type=NodePort and ports[0].nodePort is empty in specific gateway and not provided in defaults" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.ports[0].port=1234' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "if ingressGateways .service.type=NodePort and defining ingressGateways.gateways.service.ports, the first port entry must include a nodePort" ]] +} + +@test "ingressGateways/Deployment: WAN_PORT definition fails if .service.type=NodePort and ports[0].nodePort is empty in defaults and specific gateway" { + cd `chart_dir` + run helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.defaults.service.ports=null' \ + . + + [ "$status" -eq 1 ] + [[ "$output" =~ "if ingressGateways .service.type=NodePort, the first port entry in either the defaults or specific gateway must include a nodePort" ]] +} + +#-------------------------------------------------------------------- +# ingress-gateway-init init container + +@test "ingressGateways/Deployment: ingress-gateway-init init container defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-ingress-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT=8080 + +cat > /consul/service/service.hcl << EOF +service { + kind = "ingress-gateway" + name = "ingress-gateway" + id = "${POD_NAME}" + port = ${WAN_PORT} + address = "${WAN_ADDR}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 21000 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + proxy { + config { + envoy_gateway_no_default_bind = true + envoy_gateway_bind_addresses { + all-interfaces { + address = "0.0.0.0" + } + } + } + } + checks = [ + { + name = "Ingress Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:21000" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container with acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane acl-init \ + -component-name=ingress-gateway/release-name-consul-ingress-gateway \ + -acl-auth-method=release-name-consul-k8s-component-auth-method \ + -token-sink-file=/consul/service/acl-token \ + -consul-api-timeout=5s \ + -log-level=info \ + -log-json=false + +consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-ingress-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT=8080 + +cat > /consul/service/service.hcl << EOF +service { + kind = "ingress-gateway" + name = "ingress-gateway" + id = "${POD_NAME}" + port = ${WAN_PORT} + address = "${WAN_ADDR}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 21000 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + proxy { + config { + envoy_gateway_no_default_bind = true + envoy_gateway_bind_addresses { + all-interfaces { + address = "0.0.0.0" + } + } + } + } + checks = [ + { + name = "Ingress Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:21000" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + -token-file=/consul/service/acl-token \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container includes service-address command for LoadBalancer set through defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container includes service-address command for LoadBalancer set through specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=LoadBalancer' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container does not include service-address command for NodePort set through defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.defaults.service.type=NodePort' \ + --set 'ingressGateways.defaults.service.ports[0].port=80' \ + --set 'ingressGateways.defaults.service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "ingressGateways/Deployment: ingress-gateway-init init container does not include service-address command for NodePort set through specific gateway overriding defaults" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=ingress-gateway' \ + --set 'ingressGateways.gateways[0].service.type=NodePort' \ + --set 'ingressGateways.gateways[0].service.ports[0].port=80' \ + --set 'ingressGateways.gateways[0].service.ports[0].nodePort=1234' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "ingress-gateway-init"))[0] | .command[2] | contains("consul-k8s-control-plane service-address")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + #-------------------------------------------------------------------- # namespaces @@ -881,10 +1563,13 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) - [ "${actual}" = "null" ] + local actual=$(echo $object | yq -r '.command | any(contains("-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } @test "ingressGateways/Deployment: namespace command flag is specified through defaults" { @@ -896,13 +1581,16 @@ key2: value2' \ --set 'global.enableConsulNamespaces=true' \ --set 'ingressGateways.defaults.consulNamespace=namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) - [ "${actual}" = "namespace" ] + local actual=$(echo $object | yq -r '.command | any(contains("-namespace=namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace=namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "ingressGateways/Deployment: namespace annotation is specified through specific gateway overriding defaults" { +@test "ingressGateways/Deployment: namespace command flag is specified through specific gateway overriding defaults" { cd `chart_dir` local object=$(helm template \ -s templates/ingress-gateways-deployment.yaml \ @@ -913,10 +1601,13 @@ key2: value2' \ --set 'ingressGateways.gateways[0].name=ingress-gateway' \ --set 'ingressGateways.gateways[0].consulNamespace=new-namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) - [ "${actual}" = "new-namespace" ] + local actual=$(echo $object | yq -r '.command | any(contains("-namespace=new-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace=new-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -929,9 +1620,12 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | any(contains("-partition"))' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.command | any(contains("-partition"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-partition"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -945,9 +1639,12 @@ key2: value2' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=default' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo $object | yq -r '. | any(contains("-service-partition=default"))' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-partition=default"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -994,6 +1691,38 @@ key2: value2' \ [ "${actual}" = "false" ] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "ingressGateways/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/ingress-gateways-deployment.yaml \ + --set 'ingressGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'ingressGateways.gateways[0].name=gateway1' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault @@ -1004,6 +1733,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -1027,21 +1757,6 @@ key2: value2' \ local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] - - actual=$(echo $object | jq -r '.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $object | jq -r '.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $object | jq -r '.spec.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $object | jq -r '.spec.initContainers[0].env[] | select(.name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/vault/secrets/serverca.crt" ] - - actual=$(echo $object | jq -r '.spec.containers[0].args | any(contains("-ca-certs=/vault/secrets/serverca.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] } @test "ingressGateway/Deployment: vault CA is not configured by default" { @@ -1051,6 +1766,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1058,10 +1774,8 @@ key2: value2' \ --set 'global.secretsBackend.vault.consulCARole=test' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) - local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') [ "${actual}" = "false" ] - local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') [ "${actual}" = "false" ] } @@ -1074,6 +1788,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1095,6 +1810,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1116,6 +1832,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1146,7 +1863,7 @@ key2: value2' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role") | del(."consul.hashicorp.com/gateway-wan-address-source") | del(."consul.hashicorp.com/gateway-wan-port") | del(."vconsul.hashicorp.com/gateway-wan-address-source") | del(."consul.hashicorp.com/gateway-consul-service-name") | del(."consul.hashicorp.com/gateway-kind")' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) [ "${actual}" = "{}" ] } @@ -1157,6 +1874,7 @@ key2: value2' \ --set 'ingressGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1208,256 +1926,6 @@ key2: value2' \ [ "${actual}" = "30" ] } -#-------------------------------------------------------------------- -# global.cloud - -@test "ingressGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "ingressGateways/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'ingressGateways.defaults.terminationGracePeriodSeconds=5' \ - --set 'ingressGateways.gateways[0].name=gateway1' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # extraLabels diff --git a/charts/consul/test/unit/mesh-gateway-clusterrole.bats b/charts/consul/test/unit/mesh-gateway-clusterrole.bats index 3cb5826969..da4d0bdb2c 100644 --- a/charts/consul/test/unit/mesh-gateway-clusterrole.bats +++ b/charts/consul/test/unit/mesh-gateway-clusterrole.bats @@ -38,6 +38,7 @@ load _helpers -s templates/mesh-gateway-clusterrole.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=LoadBalancer' \ --set 'meshGateway.wanAddress.source=Service' \ . | tee /dev/stderr | @@ -65,6 +66,7 @@ load _helpers --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.enablePodSecurityPolicies=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=LoadBalancer' \ --set 'meshGateway.wanAddress.source=Service' \ . | tee /dev/stderr | diff --git a/charts/consul/test/unit/mesh-gateway-deployment.bats b/charts/consul/test/unit/mesh-gateway-deployment.bats index 588b026d40..300886e713 100755 --- a/charts/consul/test/unit/mesh-gateway-deployment.bats +++ b/charts/consul/test/unit/mesh-gateway-deployment.bats @@ -20,6 +20,16 @@ load _helpers [ "${actual}" = "true" ] } +@test "meshGateway/Deployment: consul-sidecar uses -consul-api-timeout" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} #-------------------------------------------------------------------- # prerequisites @@ -33,6 +43,40 @@ load _helpers [[ "$output" =~ "connectInject.enabled must be true" ]] } +@test "meshGateway/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true" ]] +} + +@test "meshGateway/Deployment: fails if global.enabled is false and clients are not explicitly enabled" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + +@test "meshGateway/Deployment: fails if global.enabled is true but clients are explicitly disabled" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + #-------------------------------------------------------------------- # annotations @@ -44,7 +88,7 @@ load _helpers --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "7" ] + [ "${actual}" = "1" ] } @test "meshGateway/Deployment: extra annotations can be set" { @@ -57,7 +101,7 @@ load _helpers key2: value2' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "9" ] + [ "${actual}" = "3" ] } #-------------------------------------------------------------------- @@ -99,7 +143,20 @@ key2: value2' \ [ "${actual}" = "/metrics" ] } -@test "meshGateway/Deployment: when global.metrics.enableGatewayMetrics=false, does not set annotations" { +@test "meshGateway/Deployment: when global.metrics.enabled=true, sets proxy setting" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.metrics.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: when global.metrics.enableGatewayMetrics=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -110,6 +167,9 @@ key2: value2' \ . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -120,7 +180,7 @@ key2: value2' \ [ "${actual}" = "null" ] } -@test "meshGateway/Deployment: when global.metrics.enabled=false, does not set annotations" { +@test "meshGateway/Deployment: when global.metrics.enabled=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -130,6 +190,9 @@ key2: value2' \ . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -140,31 +203,10 @@ key2: value2' \ [ "${actual}" = "null" ] } -#-------------------------------------------------------------------- -# externalServers.skipServerWatch - -@test "meshGateway/Deployment: sets server-watch-disabled flag when externalServers.enabled and externalServers.skipServerWatch is true" { - cd `chart_dir` - local object=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.skipServerWatch=true' \ - . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '. | any(contains("-server-watch-disabled"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # replicas -@test "meshGateway/Deployment: replicas defaults to 1" { +@test "meshGateway/Deployment: replicas defaults to 2" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -172,7 +214,7 @@ key2: value2' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] + [ "${actual}" = "2" ] } @test "meshGateway/Deployment: replicas can be overridden" { @@ -190,15 +232,15 @@ key2: value2' \ #-------------------------------------------------------------------- # affinity -@test "meshGateway/Deployment: affinity defaults to null" { +@test "meshGateway/Deployment: affinity defaults to one per node" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.affinity' | tee /dev/stderr) - [ "${actual}" = "null" ] + yq -r '.spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey' | tee /dev/stderr) + [ "${actual}" = "kubernetes.io/hostname" ] } @test "meshGateway/Deployment: affinity can be overridden" { @@ -317,6 +359,32 @@ key2: value2' \ [ "${actual}" = "ClusterFirst" ] } +#-------------------------------------------------------------------- +# envoyImage + +@test "meshGateway/Deployment: envoy image has default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) + [[ "${actual}" =~ "envoyproxy/envoy:v" ]] + +} + +@test "meshGateway/Deployment: setting meshGateway.imageEnvoy fails" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.imageEnvoy=new/image' . + [ "$status" -eq 1 ] + [[ "$output" =~ "meshGateway.imageEnvoy must be specified in global" ]] +} + #-------------------------------------------------------------------- # resources @@ -373,13 +441,57 @@ key2: value2' \ . | tee /dev/stderr | yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + [ $(echo "${actual}" | yq -r '.requests.memory') = "25Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "50m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "150Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] +} + +@test "meshGateway/Deployment: init container resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'meshGateway.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'meshGateway.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'meshGateway.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +#-------------------------------------------------------------------- +# mesh-gateway-init container resources + +@test "meshGateway/Deployment: init mesh-gateway-init container has default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].resources' | tee /dev/stderr) + [ $(echo "${actual}" | yq -r '.requests.memory') = "50Mi" ] [ $(echo "${actual}" | yq -r '.requests.cpu') = "50m" ] [ $(echo "${actual}" | yq -r '.limits.memory') = "50Mi" ] [ $(echo "${actual}" | yq -r '.limits.cpu') = "50m" ] } -@test "meshGateway/Deployment: init container resources can be set" { +@test "meshGateway/Deployment: init mesh-gateway-init container resources can be set" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -390,7 +502,51 @@ key2: value2' \ --set 'meshGateway.initServiceInitContainer.resources.limits.memory=memory2' \ --set 'meshGateway.initServiceInitContainer.resources.limits.cpu=cpu2' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +#-------------------------------------------------------------------- +# consul sidecar resources + +@test "meshGateway/Deployment: consul sidecar has default resources" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[1].resources' | tee /dev/stderr) + + [ $(echo "${actual}" | yq -r '.requests.memory') = "25Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "20m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "50Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "20m" ] +} + +@test "meshGateway/Deployment: consul sidecar resources can be set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.consulSidecarContainer.resources.requests.memory=memory' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=cpu' \ + --set 'global.consulSidecarContainer.resources.limits.memory=memory2' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) [ "${actual}" = "memory" ] @@ -405,6 +561,17 @@ key2: value2' \ [ "${actual}" = "cpu2" ] } +@test "meshGateway/Deployment: fails if global.lifecycleSidecarContainer is set" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.lifecycleSidecarContainer.resources.requests.memory=100Mi' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." ]] +} + #-------------------------------------------------------------------- # containerPort @@ -453,199 +620,217 @@ key2: value2' \ [[ "$output" =~ "if global.acls.manageSystemACLs is true, meshGateway.consulServiceName cannot be set" ]] } -@test "meshGateway/Deployment: add consul-dataplane envvars on mesh-gateway container" { +#-------------------------------------------------------------------- +# manageSystemACLs + +@test "meshGateway/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo $env | jq -r '. | select(.name == "DP_CREDENTIAL_LOGIN_META1") | .value' | tee /dev/stderr) - [ "${actual}" = 'pod=$(NAMESPACE)/$(POD_NAME)' ] - - local actual=$(echo $env | jq -r '. | select(.name == "DP_CREDENTIAL_LOGIN_META2") | .value' | tee /dev/stderr) - [ "${actual}" = "component=mesh-gateway" ] + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -#-------------------------------------------------------------------- -# manageSystemACLs - -@test "meshGateway/Deployment: ACL specific flags are not set when acls are disabled" { +@test "meshGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { cd `chart_dir` - local command=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo $command | yq -r '. | any(contains("credential-type=login"))'| tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-bearer-path"))'| tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-method"))'| tee /dev/stderr) - [ "${actual}" = "false" ] + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "meshGateway/Deployment: ACL specific flags are set when acls are enabled" { +@test "meshGateway/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` - local command=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo $command | yq -r '. | any(contains("credential-type=login"))'| tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token"))'| tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $command | yq -r '. | any(contains("-login-auth-method=release-name-consul-k8s-component-auth-method"))'| tee /dev/stderr) - [ "${actual}" = "true" ] + yq '[.spec.template.spec.containers[0].env[2].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: correct login-method and login-datacenter are set with federation is enabled and in secondary DC" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { cd `chart_dir` - local command=$(helm template \ + local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc2' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) - local actual=$(echo $command | yq -r '. | any(contains("-login-auth-method=release-name-consul-k8s-component-auth-method-dc1"))'| tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "mesh-gateway-init" ] - local actual=$(echo $command | yq -r '. | any(contains("-login-datacenter=dc2"))'| tee /dev/stderr) - [ "${actual}" = "true" ] -} + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] -@test "meshGateway/Deployment: correct login-partition is set with partitions is enabled" { - cd `chart_dir` - local command=$(helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'meshGateway.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=other-partition' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args' | tee /dev/stderr) + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] - local actual=$(echo $command | yq -r '. | any(contains("-login-partition=other-partition"))'| tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: init container has correct environment with global.acls.manageSystemACLs=true" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.name' | tee /dev/stderr) - [ "${actual}" = "mesh-gateway-init" ] + yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_LOGIN_AUTH_METHOD"))' | tee /dev/stderr) + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].value] | any(contains("release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].value] | any(contains("dc1"))' | tee /dev/stderr) + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[10].name] | any(contains("CONSUL_LOGIN_META"))' | tee /dev/stderr) + yq '.volumeMounts[2] | any(contains("consul-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[10].value] | any(contains("component=mesh-gateway,pod=$(NAMESPACE)/$(POD_NAME)"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: init container has correct environment variables when tls enabled" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_USE_TLS"))' | tee /dev/stderr) + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].value] | any(contains("true"))' | tee /dev/stderr) + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_CACERT_FILE"))' | tee /dev/stderr) + yq '.volumeMounts[2] | any(contains("consul-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: init container has correct envs with Partitions enabled" { +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=default' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_PARTITION"))' | tee /dev/stderr) + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[3].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].value] | any(contains("default"))' | tee /dev/stderr) + yq '[.env[3].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_LOGIN_PARTITION"))' | tee /dev/stderr) + yq '.volumeMounts[2] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[9].value] | any(contains("default"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct env when federation enabled in non-primary datacenter" { +@test "meshGateway/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "meshGateway/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when federation enabled in non-primary datacenter" { cd `chart_dir` local object=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ @@ -657,24 +842,25 @@ key2: value2' \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq '.spec.template.spec.initContainers[] | select(.name == "mesh-gateway-init")' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[10].name] | any(contains("CONSUL_LOGIN_AUTH_METHOD"))' | tee /dev/stderr) + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[10].value] | any(contains("release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[11].name] | any(contains("CONSUL_LOGIN_DATACENTER"))' | tee /dev/stderr) + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[11].value] | any(contains("dc1"))' | tee /dev/stderr) + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -783,54 +969,41 @@ key2: value2' \ #-------------------------------------------------------------------- # global.tls.enabled -@test "meshGateway/Deployment: sets TLS args when global.tls.disabled" { +@test "meshGateway/Deployment: sets TLS env variables when global.tls.enabled" { cd `chart_dir` - local flags=$(helm template \ + local env=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual=$(echo $flags | yq -r '. | any(contains("-tls-disabled"))' | tee /dev/stderr) - [ "${actual}" = 'true' ] -} + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] -@test "meshGateway/Deployment: sets TLS args when global.tls.enabled" { - cd `chart_dir` - local flags=$(helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'meshGateway.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].args' | tee /dev/stderr) + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_GRPC_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8502' ] - local actual=$(echo $flags | yq -r '. | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = 'true' ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } -@test "meshGateway/Deployment: sets external server args when global.tls.enabled and externalServers.enabled" { +@test "meshGateway/Deployment: sets TLS env variables in consul sidecar when global.tls.enabled" { cd `chart_dir` - local flags=$(helm template \ + local env=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.useSystemRoots=true' \ - --set 'externalServers.tlsServerName=foo.tls.server' \ - --set 'externalServers.hosts[0]=host' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -r '.spec.template.spec.containers[1].env[]' | tee /dev/stderr) - local actual=$(echo $flags | yq -r '. | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = 'false' ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] - local actual=$(echo $flags | yq -r '. | any(contains("-tls-server-name=foo.tls.server"))' | tee /dev/stderr) - [ "${actual}" = 'true' ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @test "meshGateway/Deployment: can overwrite CA secret with the provided one" { @@ -869,113 +1042,379 @@ key2: value2' \ [ "${actual}" != "" ] } -@test "meshGateway/Deployment: CA cert volume mount present when TLS is enabled" { +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "meshGateway/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" != "" ] + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: CA cert volume is not present when TLS is enabled with externalServers and useSystemRoots" { +@test "meshGateway/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "meshGateway/Deployment: CA cert volume mount is not present when TLS is enabled with externalServers and useSystemRoots" { +@test "meshGateway/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "meshGateway/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ + --set 'externalServers.hosts[0]=foo.com' \ --set 'externalServers.useSystemRoots=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) [ "${actual}" = "" ] } ##-------------------------------------------------------------------- -## mesh-gateway service annotations +## mesh-gateway-init init container -@test "meshGateway/Deployment: mesh-gateway annotations containerPort and wanAddress.port can be changed" { +@test "meshGateway/Deployment: mesh-gateway-init init container" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'meshGateway.containerPort=8888' \ - --set 'meshGateway.wanAddress.source=NodeIP' \ - --set 'meshGateway.wanAddress.port=9999' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8888" ] + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "NodeIP" ] +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "9999" ] + [ "${actual}" = "${exp}" ] } -@test "meshGateway/Deployment: mesh-gateway annotations wanAddress.source=NodeIP" { +@test "meshGateway/Deployment: mesh-gateway-init init container with acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane acl-init \ + -component-name=mesh-gateway \ + -token-sink-file=/consul/service/acl-token \ + -acl-auth-method=release-name-consul-k8s-component-auth-method \ + -consul-api-timeout=5s \ + -log-level=info \ + -log-json=false + +consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + -token-file=/consul/service/acl-token \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container with global.federation.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + meta { + consul-wan-federation = "1" + } + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container containerPort and wanAddress.port can be changed" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.containerPort=8888' \ --set 'meshGateway.wanAddress.source=NodeIP' \ + --set 'meshGateway.wanAddress.port=9999' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "NodeIP" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] -} - -@test "meshGateway/Deployment: mesh-gateway annotations wanAddress.source=NodeName" { + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="${HOST_IP}" +WAN_PORT="9999" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8888 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8888 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8888" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=NodeIP" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'meshGateway.wanAddress.source=NodeName' \ + --set 'meshGateway.wanAddress.source=NodeIP' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="${HOST_IP}" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=NodeName" { + cd `chart_dir` + local obj=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.wanAddress.source=NodeName' \ + . | tee /dev/stderr) - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "NodeName" ] + local actual=$(echo "$obj" | + yq -r '.spec.template.spec.containers[0].env | map(select(.name == "NODE_NAME")) | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] + local actual=$(echo "$obj" | + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="${NODE_NAME}" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Static fails if wanAddress.static is empty" { @@ -987,76 +1426,174 @@ key2: value2' \ --set 'meshGateway.wanAddress.source=Static' \ --set 'meshGateway.wanAddress.static=' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "if meshGateway.wanAddress.source=Static then meshGateway.wanAddress.static cannot be empty" ]] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Static" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Static' \ --set 'meshGateway.wanAddress.static=example.com' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "Static" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-static"]' | tee /dev/stderr) - [ "${actual}" = "example.com" ] + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="example.com" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service fails if service.enable is false" { + cd `chart_dir` + run helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.wanAddress.source=Service' \ + --set 'meshGateway.service.enabled=false' \ + . - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] + [ "$status" -eq 1 ] + [[ "$output" =~ "if meshGateway.wanAddress.source=Service then meshGateway.service.enabled must be set to true" ]] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=LoadBalancer" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Service' \ --set 'meshGateway.wanAddress.port=ignored' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=LoadBalancer' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "Service" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=NodePort" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Service' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.nodePort=9999' \ --set 'meshGateway.service.type=NodePort' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "Service" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "9999" ] + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='WAN_ADDR="${HOST_IP}" +WAN_PORT="9999" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=NodePort fails if service.nodePort is null" { @@ -1066,60 +1603,119 @@ key2: value2' \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Service' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=NodePort' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "if meshGateway.wanAddress.source=Service and meshGateway.service.type=NodePort, meshGateway.service.nodePort must be set" ]] } @test "meshGateway/Deployment: mesh-gateway-init init container wanAddress.source=Service, type=ClusterIP" { cd `chart_dir` - local annotations=$(helm template \ + local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'meshGateway.wanAddress.source=Service' \ --set 'meshGateway.wanAddress.port=ignored' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=ClusterIP' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations ' | tee /dev/stderr) - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/mesh-gateway-container-port"]' | tee /dev/stderr) - [ "${actual}" = "8443" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-address-source"]' | tee /dev/stderr) - [ "${actual}" = "Service" ] - - local actual=$(echo $annotations | yq -r '.["consul.hashicorp.com/gateway-wan-port"]' | tee /dev/stderr) - [ "${actual}" = "443" ] -} - -@test "meshGateway/Deployment: CA cert volume mount present on the init container when TLS is enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'meshGateway.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" != "" ] -} - -@test "meshGateway/Deployment: CA cert volume mount present is not present on the init container when TLS is enabled with externalServers and useSystemRoots" { + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "mesh-gateway" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "meshGateway/Deployment: mesh-gateway-init init container consulServiceName can be changed" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] + --set 'meshGateway.consulServiceName=new-name' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers | map(select(.name == "mesh-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane service-address \ + -log-level=info \ + -log-json=false \ + -k8s-namespace=default \ + -name=release-name-consul-mesh-gateway \ + -output-file=/tmp/address.txt +WAN_ADDR="$(cat /tmp/address.txt)" +WAN_PORT="443" + +cat > /consul/service/service.hcl << EOF +service { + kind = "mesh-gateway" + name = "new-name" + port = 8443 + address = "${POD_IP}" + tagged_addresses { + lan { + address = "${POD_IP}" + port = 8443 + } + wan { + address = "${WAN_ADDR}" + port = ${WAN_PORT} + } + } + checks = [ + { + name = "Mesh Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } #-------------------------------------------------------------------- @@ -1147,7 +1743,7 @@ key2: value2' \ --set 'connectInject.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("partition"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("partition"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -1161,7 +1757,7 @@ key2: value2' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0].env[8].value | contains("default")' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[1].command | any(contains("partition = \"default\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1175,7 +1771,7 @@ key2: value2' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("partition=default"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("partition=default"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1192,6 +1788,37 @@ key2: value2' \ [[ "$output" =~ "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" ]] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "meshGateway/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault @@ -1289,7 +1916,7 @@ key2: value2' \ @test "meshGateway/Deployment: vault tls annotations are set when tls is enabled" { cd `chart_dir` - local obj=$(helm template \ + local cmd=$(helm template \ -s templates/mesh-gateway-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'meshGateway.enabled=true' \ @@ -1302,34 +1929,28 @@ key2: value2' \ --set 'server.serverCert.secretName=pki_int/issue/test' \ --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ . | tee /dev/stderr | - yq -r '.spec.template' | tee /dev/stderr) + yq -r '.spec.template.metadata' | tee /dev/stderr) - local actual="$(echo $obj | - yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr)" + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr)" local expected=$'{{- with secret \"pki_int/cert/ca\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] - local actual="$(echo $obj | - yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr)" + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr)" [ "${actual}" = "pki_int/cert/ca" ] - local actual="$(echo $obj | - yq -r '.metadata.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr)" + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr)" [ "${actual}" = "true" ] - local actual="$(echo $obj | - yq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr)" + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr)" [ "${actual}" = "true" ] - local actual="$(echo $obj | - yq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr)" + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr)" [ "${actual}" = "test" ] - - actual=$(echo $obj | jq -r '.spec.initContainers[0].env[] | select(.name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/vault/secrets/serverca.crt" ] - - actual=$(echo $obj | jq -r '.spec.containers[0].args | any(contains("-ca-certs=/vault/secrets/serverca.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -1347,7 +1968,7 @@ key2: value2' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role") | del(."consul.hashicorp.com/gateway-kind") | del(."consul.hashicorp.com/gateway-wan-address-source") | del(."consul.hashicorp.com/mesh-gateway-container-port") | del(."consul.hashicorp.com/gateway-wan-address-static") | del(."consul.hashicorp.com/gateway-wan-port") | del(."consul.hashicorp.com/gateway-consul-service-name")' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) [ "${actual}" = "{}" ] } @@ -1370,234 +1991,6 @@ key2: value2' \ [ "${actual}" = "bar" ] } -#-------------------------------------------------------------------- -# global.cloud - -@test "meshGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "meshGateway/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # extraLabels diff --git a/charts/consul/test/unit/mesh-gateway-service.bats b/charts/consul/test/unit/mesh-gateway-service.bats index acedeb22b3..60ca3a3503 100755 --- a/charts/consul/test/unit/mesh-gateway-service.bats +++ b/charts/consul/test/unit/mesh-gateway-service.bats @@ -20,12 +20,13 @@ load _helpers [ "${actual}" = "true" ] } -@test "meshGateway/Service: enabled with meshGateway.enabled=true" { +@test "meshGateway/Service: enabled with meshGateway.enabled=true meshGateway.service.enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -40,6 +41,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.metadata.annotations' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -51,6 +53,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.annotations=key: value' \ . | tee /dev/stderr | yq -r '.metadata.annotations.key' | tee /dev/stderr) @@ -66,6 +69,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.ports[0].port' | tee /dev/stderr) [ "${actual}" = "443" ] @@ -77,6 +81,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.port=8443' \ . | tee /dev/stderr | yq -r '.spec.ports[0].port' | tee /dev/stderr) @@ -92,6 +97,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.ports[0].targetPort' | tee /dev/stderr) [ "${actual}" = "8443" ] @@ -103,6 +109,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.containerPort=9443' \ . | tee /dev/stderr | yq -r '.spec.ports[0].targetPort' | tee /dev/stderr) @@ -118,6 +125,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -129,6 +137,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.nodePort=8443' \ . | tee /dev/stderr | yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) @@ -144,6 +153,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.type' | tee /dev/stderr) [ "${actual}" = "LoadBalancer" ] @@ -155,6 +165,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.type=ClusterIP' \ . | tee /dev/stderr | yq -r '.spec.type' | tee /dev/stderr) @@ -170,6 +181,7 @@ load _helpers -s templates/mesh-gateway-service.yaml \ --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'meshGateway.service.enabled=true' \ --set 'meshGateway.service.additionalSpec=key: value' \ . | tee /dev/stderr | yq -r '.spec.key' | tee /dev/stderr) diff --git a/charts/consul/test/unit/partition-init-job.bats b/charts/consul/test/unit/partition-init-job.bats index a3524090aa..75c6df70ca 100644 --- a/charts/consul/test/unit/partition-init-job.bats +++ b/charts/consul/test/unit/partition-init-job.bats @@ -6,15 +6,14 @@ load _helpers cd `chart_dir` assert_empty helm template \ -s templates/partition-init-job.yaml \ - . + . } -@test "partitionInit/Job: enabled with global.adminPartitions.enabled=true and server.enabled=false" { +@test "partitionInit/Job: enabled with global.adminPartitions.enabled=true and servers = false" { cd `chart_dir` local actual=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ --set 'global.adminPartitions.name=bar' \ --set 'externalServers.enabled=true' \ @@ -29,7 +28,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -39,7 +37,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . } @@ -49,7 +46,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -59,7 +55,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -70,80 +65,62 @@ load _helpers -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=false' . [ "$status" -eq 1 ] [[ "$output" =~ "externalServers.enabled needs to be true and configured to create a non-default partition." ]] } -@test "partitionInit/Job: consul env defaults" { +@test "partitionInit/Job: command defaults" { cd `chart_dir` - local env=$(helm template \ - -s templates/partition-init-job.yaml \ + local command=$(helm template \ + -s templates/partition-init-job.yaml \ + --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ - --set 'server.enabled=false' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8502" ] + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + local actual + actual=$(echo $command | jq -r '. | any(contains("consul-k8s-control-plane partition-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) - [ "${actual}" = "5s" ] + actual=$(echo $command | jq -r '. | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- # global.tls.enabled -@test "partitionInit/Job: sets TLS env vars when global.tls.enabled" { +@test "partitionInit/Job: sets TLS flags when global.tls.enabled" { cd `chart_dir` - local env=$(helm template \ + local command=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.tls.enabled=true' \ --set 'global.adminPartitions.name=bar' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] + local actual + actual=$(echo $command | jq -r '. | any(contains("-use-https"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) + actual=$(echo $command | jq -r '. | any(contains("-ca-file=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] + actual=$(echo $command | jq -r '. | any(contains("-server-port=8501"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "partitionInit/Job: does not set consul ca cert when .externalServers.useSystemRoots is true" { +@test "partitionInit/Job: does not set consul ca cert or server-port when .externalServers.useSystemRoots is true" { cd `chart_dir` - local spec=$(helm template \ + local command=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ @@ -153,19 +130,11 @@ load _helpers --set 'externalServers.hosts[0]=foo' \ --set 'externalServers.useSystemRoots=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '.containers[0].env[] | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "" ] + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '.volumes[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$env" | - jq -r '.spec.volumeMounts[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] + local actual + actual=$(echo $command | jq -r '. | any(contains("-ca-file=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } @test "partitionInit/Job: can overwrite CA secret with the provided one" { @@ -175,7 +144,6 @@ load _helpers --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -199,20 +167,19 @@ load _helpers #-------------------------------------------------------------------- # global.acls.bootstrapToken -@test "partitionInit/Job: CONSUL_ACL_TOKEN is set when global.acls.bootstrapToken is provided" { +@test "partitionInit/Job: HTTP_TOKEN is set when global.acls.bootstrapToken is provided" { cd `chart_dir` local actual=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.acls.bootstrapToken.secretName=partition-token' \ --set 'global.acls.bootstrapToken.secretKey=token' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -262,7 +229,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.acls.bootstrapToken.secretName=boot' \ --set 'global.acls.bootstrapToken.secretKey=token' \ @@ -283,7 +249,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ @@ -315,7 +280,7 @@ reservedNameTest() { [ "${actual}" = "${expected}" ] # Check that the bootstrap token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_ACL_TOKEN_FILE").value') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_HTTP_TOKEN_FILE").value') [ "${actual}" = "/vault/secrets/bootstrap-token" ] # Check that no (secret) volumes are not attached @@ -333,7 +298,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ @@ -377,7 +341,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -419,7 +382,7 @@ reservedNameTest() { [ "${actual}" = "${expected}" ] # Check that the bootstrap token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_ACL_TOKEN_FILE").value') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_HTTP_TOKEN_FILE").value') [ "${actual}" = "/vault/secrets/bootstrap-token" ] # Check that the consul-ca-cert volume is not attached @@ -437,7 +400,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -462,7 +424,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -488,7 +449,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -514,7 +474,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -544,7 +503,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -566,7 +524,6 @@ reservedNameTest() { --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=bar" \ - --set 'global.enableConsulNamespaces=true' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ --set 'global.tls.enabled=true' \ @@ -582,304 +539,6 @@ reservedNameTest() { [ "${actual}" = "bar" ] } -#-------------------------------------------------------------------- -# global.cloud - -@test "partitionInit/Job: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/Job: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set "global.adminPartitions.name=bar" \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.tls.caCert.secretName=foo' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # extraLabels diff --git a/charts/consul/test/unit/partition-init-podsecuritypolicy.bats b/charts/consul/test/unit/partition-init-podsecuritypolicy.bats index 8f21cc9f56..c7d4ce4ddd 100644 --- a/charts/consul/test/unit/partition-init-podsecuritypolicy.bats +++ b/charts/consul/test/unit/partition-init-podsecuritypolicy.bats @@ -97,4 +97,4 @@ load _helpers --set 'global.enablePodSecurityPolicies=false' \ --set 'server.enabled=true' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-init-role.bats b/charts/consul/test/unit/partition-init-role.bats index 16a5b980b3..c434aa3d87 100644 --- a/charts/consul/test/unit/partition-init-role.bats +++ b/charts/consul/test/unit/partition-init-role.bats @@ -9,24 +9,22 @@ load _helpers . } -@test "partitionInit/Role: enabled with global.adminPartitions.enabled=true and server.enabled=false" { +@test "partitionInit/Role: enabled with global.adminPartitions.enabled=true and servers = false" { cd `chart_dir` local actual=$(helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "partitionInit/Role: disabled with global.adminPartitions.enabled=true and server.enabled=true" { +@test "partitionInit/Role: disabled with global.adminPartitions.enabled=true and servers = true" { cd `chart_dir` assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -46,7 +43,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-init-rolebinding.bats b/charts/consul/test/unit/partition-init-rolebinding.bats index f8af27cb21..d96f6e6cd3 100644 --- a/charts/consul/test/unit/partition-init-rolebinding.bats +++ b/charts/consul/test/unit/partition-init-rolebinding.bats @@ -14,7 +14,6 @@ load _helpers local actual=$(helm template \ -s templates/partition-init-rolebinding.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -26,7 +25,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-rolebinding.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-rolebinding.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -46,7 +43,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-rolebinding.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-init-serviceaccount.bats b/charts/consul/test/unit/partition-init-serviceaccount.bats index 155e6d9e28..6195969686 100644 --- a/charts/consul/test/unit/partition-init-serviceaccount.bats +++ b/charts/consul/test/unit/partition-init-serviceaccount.bats @@ -14,7 +14,6 @@ load _helpers local actual=$(helm template \ -s templates/partition-init-serviceaccount.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -26,7 +25,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-serviceaccount.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-serviceaccount.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -46,7 +43,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-serviceaccount.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-name-configmap.bats b/charts/consul/test/unit/partition-name-configmap.bats index 40e65ca3c5..e516c9ae13 100644 --- a/charts/consul/test/unit/partition-name-configmap.bats +++ b/charts/consul/test/unit/partition-name-configmap.bats @@ -14,7 +14,6 @@ load _helpers local actual=$(helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=false' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -26,7 +25,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'server.enabled=true' \ . } @@ -36,7 +34,6 @@ load _helpers assert_empty helm template \ -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ --set 'global.enabled=true' \ . } @@ -47,4 +44,4 @@ load _helpers -s templates/partition-init-role.yaml \ --set 'global.adminPartitions.enabled=false' \ . -} +} \ No newline at end of file diff --git a/charts/consul/test/unit/partition-service.bats b/charts/consul/test/unit/partition-service.bats new file mode 100755 index 0000000000..b772b32d5e --- /dev/null +++ b/charts/consul/test/unit/partition-service.bats @@ -0,0 +1,133 @@ +#!/usr/bin/env bats + +load _helpers + +@test "partition/Service: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/partition-service.yaml \ + . +} + +@test "partition/Service: enable with global.enabled false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.enabled=false' \ + --set 'server.enabled=true' \ + --set 'global.adminPartitions.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "partition/Service: disable with adminPartitions.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=false' \ + . +} + +@test "partition/Service: disable with server.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'server.enabled=false' \ + . +} + +@test "partition/Service: disable with global.enabled" { + cd `chart_dir` + assert_empty helm template \ + -s templates/partition-service.yaml \ + --set 'global.enabled=false' \ + . +} + +#-------------------------------------------------------------------- +# annotations + +@test "partition/Service: no annotations by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations | length' | tee /dev/stderr) + [ "${actual}" = "0" ] +} + +@test "partition/Service: can set annotations" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.annotations=key: value' \ + . | tee /dev/stderr | + yq -r '.metadata.annotations.key' | tee /dev/stderr) + [ "${actual}" = "value" ] +} + +#-------------------------------------------------------------------- +# nodePort + +@test "partition/Service: RPC node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.type=NodePort' \ + --set 'global.adminPartitions.service.nodePort.rpc=4443' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "server") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4443" ] +} + +@test "partition/Service: Serf node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.type=NodePort' \ + --set 'global.adminPartitions.service.nodePort.serf=4444' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "serflan") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] +} + +@test "partition/Service: HTTPS node port can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.type=NodePort' \ + --set 'global.adminPartitions.service.nodePort.https=4444' \ + . | tee /dev/stderr | + yq -r '.spec.ports[] | select(.name == "https") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] +} + +@test "partition/Service: RPC, Serf and HTTPS node ports can be set" { + cd `chart_dir` + local ports=$(helm template \ + -s templates/partition-service.yaml \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.service.type=NodePort' \ + --set 'global.adminPartitions.service.nodePort.rpc=4443' \ + --set 'global.adminPartitions.service.nodePort.https=4444' \ + --set 'global.adminPartitions.service.nodePort.serf=4445' \ + . | tee /dev/stderr | + yq -r '.spec.ports[]' | tee /dev/stderr) + + local actual + actual=$(echo $ports | jq -r 'select(.name == "server") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4443" ] + + actual=$(echo $ports | jq -r 'select(.name == "https") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4444" ] + + actual=$(echo $ports | jq -r 'select(.name == "serflan") | .nodePort' | tee /dev/stderr) + [ "${actual}" == "4445" ] +} diff --git a/charts/consul/test/unit/server-acl-init-job.bats b/charts/consul/test/unit/server-acl-init-job.bats index 63450aa4c2..570d3a396e 100644 --- a/charts/consul/test/unit/server-acl-init-job.bats +++ b/charts/consul/test/unit/server-acl-init-job.bats @@ -99,49 +99,67 @@ load _helpers [[ "$output" =~ "global.bootstrapACLs was removed, use global.acls.manageSystemACLs instead" ]] } -@test "serverACLInit/Job: sets -client=false when client is disabled" { +@test "serverACLInit/Job: does not set -client=false when client is enabled (the default)" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'client.enabled=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command[2] | contains("-client=false")' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] } -#-------------------------------------------------------------------- -# dns - -@test "serverACLInit/Job: dns acl option enabled with .dns.enabled=- due to inheriting from connectInject.transparentProxy.defaultEnabled" { +@test "serverACLInit/Job: sets -consul-api-timeout=5s" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("allow-dns"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command[2] | contains("-consul-api-timeout=5s")' | + tee /dev/stderr) [ "${actual}" = "true" ] } -@test "serverACLInit/Job: dns acl option disabled with connectInject.transparentProxy.defaultEnabled=false" { +@test "serverACLInit/Job: sets -client=false when client is disabled" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'connectInject.transparentProxy.defaultEnabled=false' \ + --set 'client.enabled=false' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("allow-dns"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + yq '.spec.template.spec.containers[0].command[2] | contains("-client=false")' | + tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "serverACLInit/Job: dns acl option enabled with .dns.enabled=true and connectInject.transparentProxy.defaultEnabled=false" { +@test "serverACLInit/Job: server address is set to the DNS names of the server stateful set" { + cd `chart_dir` + local command=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual + actual=$(echo $command | jq -r '. | any(contains("-server-address=\"${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | any(contains("-server-address=\"${CONSUL_FULLNAME}-server-1.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + actual=$(echo $command | jq -r '. | any(contains("-server-address=\"${CONSUL_FULLNAME}-server-2.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc\""))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +#-------------------------------------------------------------------- +# dns + +@test "serverACLInit/Job: dns acl option enabled with .dns.enabled=-" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'connectInject.transparentProxy.defaultEnabled=false' \ - --set 'dns.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("allow-dns"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -210,7 +228,7 @@ load _helpers } #-------------------------------------------------------------------- -# server.snapshotAgent +# client.snapshotAgent @test "serverACLInit/Job: snapshot agent acl option disabled by default" { cd `chart_dir` @@ -222,12 +240,12 @@ load _helpers [ "${actual}" = "false" ] } -@test "serverACLInit/Job: snapshot agent acl option enabled with .server.snapshotAgent.enabled=true" { +@test "serverACLInit/Job: snapshot agent acl option enabled with .client.snapshotAgent.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'server.snapshotAgent.enabled=true' \ + --set 'client.snapshotAgent.enabled=true' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].command | any(contains("-snapshot-agent"))' | tee /dev/stderr) [ "${actual}" = "true" ] @@ -521,50 +539,24 @@ load _helpers #-------------------------------------------------------------------- # global.tls.enabled -@test "serverACLInit/Job: sets TLS env vars when global.tls.enabled" { +@test "serverACLInit/Job: sets TLS flags when global.tls.enabled" { cd `chart_dir` - local object=$(helm template \ + local command=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0]' | tee /dev/stderr) + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) - local actual=$(echo $object | - yq '[.env[7].name] | any(contains("CONSUL_USE_TLS"))' | tee /dev/stderr) + local actual + actual=$(echo $command | jq -r '. | any(contains("-use-https"))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo $object | - yq '[.env[7].value] | any(contains("true"))' | tee /dev/stderr) + actual=$(echo $command | jq -r '. | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] - local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_CACERT_FILE"))' | tee /dev/stderr) + actual=$(echo $command | jq -r '. | any(contains("-server-port=8501"))' | tee /dev/stderr) [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[8].value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "serverACLInit/Job: does not add consul-ca-cert volume when global.tls.enabled with externalServers and useSystemRoots" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.useSystemRoots=true' \ - --set 'servers.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - actual=$(echo $spec | jq -r '.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $spec | jq -r '.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] } @test "serverACLInit/Job: can overwrite CA secret with the provided one" { @@ -634,31 +626,32 @@ load _helpers yq -r '.spec.template' | tee /dev/stderr) # Check annotations + local actual actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-pre-populate-only"]' | tee /dev/stderr) [ "${actual}" = "true" ] - - local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr) [ "${actual}" = "true" ] - - local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) + local actual + actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) [ "${actual}" = "aclrole" ] - local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-secret-bootstrap-token"]' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-secret-bootstrap-token"') [ "${actual}" = "foo" ] - local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-bootstrap-token"]' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-inject-template-bootstrap-token"') local expected=$'{{- with secret \"foo\" -}}\n{{- .Data.data.bar -}}\n{{- end -}}' [ "${actual}" = "${expected}" ] # Check that the bootstrap token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') [ "${actual}" = "true" ] # Check that no (secret) volumes are not attached local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] } @@ -702,7 +695,7 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] } @@ -844,11 +837,11 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] # Check that the replication token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') [ "${actual}" = "true" ] } @@ -892,14 +885,14 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] # Check that the replication and bootstrap token flags are set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-acl-replication-token-file=/vault/secrets/replication-token"))') [ "${actual}" = "true" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-bootstrap-token-file=/vault/secrets/bootstrap-token"))') [ "${actual}" = "true" ] } @@ -922,7 +915,6 @@ load _helpers --set 'global.acls.partitionToken.secretKey=token' \ --set 'global.adminPartitions.enabled=true' \ --set "global.adminPartitions.name=default" \ - --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | yq -r '.spec.template' | tee /dev/stderr) @@ -942,11 +934,11 @@ load _helpers local actual=$(echo $object | jq -r '.spec.volumes') [ "${actual}" = "null" ] - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").volumeMounts') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").volumeMounts') [ "${actual}" = "null" ] # Check that the replication token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="server-acl-init-job").command | any(contains("-partition-token-file=/vault/secrets/partition-token"))') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="post-install-job").command | any(contains("-partition-token-file=/vault/secrets/partition-token"))') [ "${actual}" = "true" ] } @@ -1021,7 +1013,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) @@ -1068,15 +1060,15 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1103,7 +1095,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-sync-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("sync-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1111,15 +1103,15 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1155,15 +1147,15 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1200,15 +1192,15 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1247,19 +1239,19 @@ load _helpers local actual=$(echo $object | yq 'any(contains("connect-inject"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("consul-inject-destination-namespace"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] } @test "serverACLInit/Job: inject namespace options set with .global.enableConsulNamespaces=true and inject enabled" { @@ -1298,7 +1290,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-inject-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("inject-k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -1415,8 +1407,6 @@ load _helpers local object=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | @@ -1455,22 +1445,14 @@ load _helpers --set 'global.adminPartitions.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[7].name] | any(contains("CONSUL_PARTITION"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[7].value] | any(contains("default"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_LOGIN_PARTITION"))' | tee /dev/stderr) + yq 'any(contains("enable-partitions"))' | tee /dev/stderr) [ "${actual}" = "true" ] local actual=$(echo $object | - yq '[.env[8].value] | any(contains("default"))' | tee /dev/stderr) + yq 'any(contains("partition"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1630,47 +1612,46 @@ load _helpers @test "serverACLInit/Job: sets server address if externalServers.hosts are set" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo.com' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[2].name] | any(contains("CONSUL_ADDRESSES"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-server-address=\"foo.com\""))' | tee /dev/stderr) [ "${actual}" = "true" ] +} - local actual=$(echo $object | - yq '[.env[2].value] | any(contains("foo.com"))' | tee /dev/stderr) +@test "serverACLInit/Job: can pass cloud auto-join string to server address via externalServers.hosts" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=provider=my-cloud config=val' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("-server-address=\"provider=my-cloud config=val\""))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "serverACLInit/Job: port 8501 is used by default" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'server.enabled=false' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=1.1.1.1' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[4].name] | any(contains("CONSUL_HTTP_PORT"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[4].value] | any(contains("8501"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-server-port=8501"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "serverACLInit/Job: can override externalServers.httpsPort" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'server.enabled=false' \ @@ -1678,14 +1659,7 @@ load _helpers --set 'externalServers.hosts[0]=1.1.1.1' \ --set 'externalServers.httpsPort=443' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[4].name] | any(contains("CONSUL_HTTP_PORT"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[4].value] | any(contains("443"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-server-port=443"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1721,7 +1695,7 @@ load _helpers @test "serverACLInit/Job: sets the CA cert if TLS is enabled and externalServers.enabled is true but externalServers.useSystemRoots is false" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ @@ -1730,20 +1704,13 @@ load _helpers --set 'externalServers.hosts[0]=1.1.1.1' \ --set 'externalServers.useSystemRoots=false' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_CACERT_FILE"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[8].value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "serverACLInit/Job: sets the CA cert if TLS is enabled and externalServers.useSystemRoots is true but externalServers.enabled is false" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ @@ -1751,36 +1718,19 @@ load _helpers --set 'externalServers.hosts[0]=1.1.1.1' \ --set 'externalServers.useSystemRoots=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[8].name] | any(contains("CONSUL_CACERT_FILE"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[8].value] | any(contains("/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-consul-ca-cert=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @test "serverACLInit/Job: sets TLS server name if externalServers.tlsServerName is set" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ --set 'global.tls.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=1.1.1.1' \ --set 'externalServers.tlsServerName=foo' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_TLS_SERVER_NAME"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[9].value] | any(contains("foo"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-consul-tls-server-name=foo"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1938,232 +1888,43 @@ load _helpers } #-------------------------------------------------------------------- -# global.federation.enabled +# controller -@test "serverACLInit/Job: ensure federation is passed when federation is enabled" { +@test "serverACLInit/Job: -controller not set by default" { cd `chart_dir` local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.federation.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("-federation"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# global.cloud - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] + yq '.spec.template.spec.containers[0].command | any(contains("controller"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { +@test "serverACLInit/Job: -controller set when controller.enabled=true" { cd `chart_dir` - run helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].command | any(contains("controller"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "partitionInit/JobserverACLInit/Job: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-acl-init-job.yaml \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} +#-------------------------------------------------------------------- +# global.federation.enabled -@test "serverACLInit/Job: sets TLS server name if global.cloud.enabled is set" { +@test "serverACLInit/Job: ensure federation is passed when federation is enabled" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/server-acl-init-job.yaml \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.federation.enabled=true' \ --set 'global.tls.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq '[.env[9].name] | any(contains("CONSUL_TLS_SERVER_NAME"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[9].value] | any(contains("server.dc1.consul"))' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].command | any(contains("-federation"))' | tee /dev/stderr) [ "${actual}" = "true" ] } diff --git a/charts/consul/test/unit/server-config-configmap.bats b/charts/consul/test/unit/server-config-configmap.bats index 5e849d5a4f..d31cbe774c 100755 --- a/charts/consul/test/unit/server-config-configmap.bats +++ b/charts/consul/test/unit/server-config-configmap.bats @@ -62,36 +62,6 @@ load _helpers [ "${actual}" = "release-name-consul-server.default.svc:8301" ] } -#-------------------------------------------------------------------- -# grpc - -@test "server/ConfigMap: if tls is disabled, grpc port is set and grpc_tls port is disabled" { - cd `chart_dir` - local configmap=$(helm template \ - -s templates/server-config-configmap.yaml \ - . | tee /dev/stderr | - yq -r '.data["server.json"]' | tee /dev/stderr) - - local actual=$(echo $configmap | jq -r .ports.grpc | tee /dev/stderr) - [ "${actual}" = "8502" ] - local actual=$(echo $configmap | jq -r .ports.grpc_tls | tee /dev/stderr) - [ "${actual}" = "-1" ] -} - -@test "server/ConfigMap: if tls is enabled, grpc_tls port is set and grpc port is disabled" { - cd `chart_dir` - local configmap=$(helm template \ - --set 'global.tls.enabled=true' \ - -s templates/server-config-configmap.yaml \ - . | tee /dev/stderr | - yq -r '.data["server.json"]' | tee /dev/stderr) - - local actual=$(echo $configmap | jq -r .ports.grpc_tls | tee /dev/stderr) - [ "${actual}" = "8502" ] - local actual=$(echo $configmap | jq -r .ports.grpc | tee /dev/stderr) - [ "${actual}" = "-1" ] -} - #-------------------------------------------------------------------- # serflan @@ -148,7 +118,7 @@ load _helpers -s templates/server-config-configmap.yaml \ . | tee /dev/stderr | yq -r '.data["server.json"]' | jq .bootstrap_expect | tee /dev/stderr) - [ "${actual}" = "1" ] + [ "${actual}" = "3" ] } @test "server/ConfigMap: bootstrap_expect can be set by server.bootstrapExpect" { @@ -708,22 +678,22 @@ load _helpers yq -r '.data["tls-config.json"]' | tee /dev/stderr) local actual - actual=$(echo $config | jq -r .tls.defaults.ca_file | tee /dev/stderr) + actual=$(echo $config | jq -r .ca_file | tee /dev/stderr) [ "${actual}" = "/consul/tls/ca/tls.crt" ] - actual=$(echo $config | jq -r .tls.defaults.cert_file | tee /dev/stderr) + actual=$(echo $config | jq -r .cert_file | tee /dev/stderr) [ "${actual}" = "/consul/tls/server/tls.crt" ] - actual=$(echo $config | jq -r .tls.defaults.key_file | tee /dev/stderr) + actual=$(echo $config | jq -r .key_file | tee /dev/stderr) [ "${actual}" = "/consul/tls/server/tls.key" ] - actual=$(echo $config | jq -r .tls.internal_rpc.verify_incoming | tee /dev/stderr) + actual=$(echo $config | jq -r .verify_incoming_rpc | tee /dev/stderr) [ "${actual}" = "true" ] - actual=$(echo $config | jq -r .tls.defaults.verify_outgoing | tee /dev/stderr) + actual=$(echo $config | jq -r .verify_outgoing | tee /dev/stderr) [ "${actual}" = "true" ] - actual=$(echo $config | jq -r .tls.internal_rpc.verify_server_hostname | tee /dev/stderr) + actual=$(echo $config | jq -r .verify_server_hostname | tee /dev/stderr) [ "${actual}" = "true" ] actual=$(echo $config | jq -c .ports | tee /dev/stderr) @@ -735,7 +705,6 @@ load _helpers local config=$(helm template \ -s templates/server-config-configmap.yaml \ --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | @@ -789,7 +758,6 @@ load _helpers local config=$(helm template \ -s templates/server-config-configmap.yaml \ --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.verify=false' \ @@ -848,13 +816,13 @@ load _helpers . | tee /dev/stderr | yq -r '.data["tls-config.json"]' | tee /dev/stderr) - local actual=$(echo $object | jq -r .tls.defaults.ca_file | tee /dev/stderr) + local actual=$(echo $object | jq -r .ca_file | tee /dev/stderr) [ "${actual}" = "/vault/secrets/serverca.crt" ] - local actual=$(echo $object | jq -r .tls.defaults.cert_file | tee /dev/stderr) + local actual=$(echo $object | jq -r .cert_file | tee /dev/stderr) [ "${actual}" = "/vault/secrets/servercert.crt" ] - local actual=$(echo $object | jq -r .tls.defaults.key_file | tee /dev/stderr) + local actual=$(echo $object | jq -r .key_file | tee /dev/stderr) [ "${actual}" = "/vault/secrets/servercert.key" ] } @@ -863,7 +831,6 @@ load _helpers local object=$(helm template \ -s templates/server-config-configmap.yaml \ --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'global.peering.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enableAutoEncrypt=true' \ @@ -957,8 +924,6 @@ load _helpers local actual=$(helm template \ -s templates/server-config-configmap.yaml \ --set 'global.peering.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.data["server.json"]' | jq -r .peering.enabled | tee /dev/stderr) diff --git a/charts/consul/test/unit/server-podsecuritypolicy.bats b/charts/consul/test/unit/server-podsecuritypolicy.bats index e862cd90d1..99902d1971 100644 --- a/charts/consul/test/unit/server-podsecuritypolicy.bats +++ b/charts/consul/test/unit/server-podsecuritypolicy.bats @@ -39,7 +39,7 @@ load _helpers --set 'server.exposeGossipAndRPCPorts=true' \ . | tee /dev/stderr | yq -c '.spec.hostPorts' | tee /dev/stderr) - [ "${actual}" = '[{"min":8300,"max":8300},{"min":8301,"max":8301},{"min":8302,"max":8302},{"min":8502,"max":8502}]' ] + [ "${actual}" = '[{"min":8300,"max":8300},{"min":8301,"max":8301},{"min":8302,"max":8302},{"min":8503,"max":8503}]' ] } @test "server/PodSecurityPolicy: hostPort 8300, server.ports.serflan.port and 8302 allowed when exposeGossipAndRPCPorts=true" { @@ -51,5 +51,5 @@ load _helpers --set 'server.ports.serflan.port=8333' \ . | tee /dev/stderr | yq -c '.spec.hostPorts' | tee /dev/stderr) - [ "${actual}" = '[{"min":8300,"max":8300},{"min":8333,"max":8333},{"min":8302,"max":8302},{"min":8502,"max":8502}]' ] + [ "${actual}" = '[{"min":8300,"max":8300},{"min":8333,"max":8333},{"min":8302,"max":8302},{"min":8503,"max":8503}]' ] } diff --git a/charts/consul/test/unit/server-service.bats b/charts/consul/test/unit/server-service.bats index 1aafd08fd4..c639d38b51 100755 --- a/charts/consul/test/unit/server-service.bats +++ b/charts/consul/test/unit/server-service.bats @@ -42,6 +42,11 @@ load _helpers # this is such an important part of making everything work we verify it here. @test "server/Service: tolerates unready endpoints" { cd `chart_dir` + local actual=$(helm template \ + -s templates/server-service.yaml \ + . | tee /dev/stderr | + yq -r '.metadata.annotations["service.alpha.kubernetes.io/tolerate-unready-endpoints"]' | tee /dev/stderr) + [ "${actual}" = "true" ] local actual=$(helm template \ -s templates/server-service.yaml \ @@ -98,13 +103,13 @@ load _helpers #-------------------------------------------------------------------- # annotations -@test "server/Service: no annotation by default" { +@test "server/Service: one annotation by default" { cd `chart_dir` local actual=$(helm template \ -s templates/server-service.yaml \ . | tee /dev/stderr | yq -r '.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "0" ] + [ "${actual}" = "1" ] } @test "server/Service: can set annotations" { diff --git a/charts/consul/test/unit/server-statefulset.bats b/charts/consul/test/unit/server-statefulset.bats index 2d21cf7c1e..a6cccccb79 100755 --- a/charts/consul/test/unit/server-statefulset.bats +++ b/charts/consul/test/unit/server-statefulset.bats @@ -45,9 +45,7 @@ load _helpers cd `chart_dir` run helm template \ -s templates/server-statefulset.yaml \ - --set 'server.bootstrapExpect=1' \ - --set 'server.replicas=3' \ - . + --set 'server.bootstrapExpect=1' . [ "$status" -eq 1 ] [[ "$output" =~ "server.bootstrapExpect cannot be less than server.replicas" ]] } @@ -62,6 +60,7 @@ load _helpers --set 'global.adminPartitions.enabled=true' \ --set 'global.federation.enabled=true' \ . + [ "$status" -eq 1 ] [[ "$output" =~ "If global.federation.enabled is true, global.adminPartitions.enabled must be false because they are mutually exclusive" ]] } @@ -619,6 +618,28 @@ load _helpers [ "${actualTemplateBaz}" = "qux" ] } +#-------------------------------------------------------------------- +# DNS + +@test "server/StatefulSet: recursor flags unset by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-statefulset.yaml \ + . | tee /dev/stderr | + yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("$recursor_flags")' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "server/StatefulSet: add recursor flags if dns.enableRedirection is true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/server-statefulset.yaml \ + --set 'dns.enableRedirection=true' \ + . | tee /dev/stderr | + yq -c -r '.spec.template.spec.containers[0].command | join(" ") | contains("$recursor_flags")' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # annotations @@ -686,7 +707,7 @@ load _helpers -s templates/server-statefulset.yaml \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = 251dd23c6cc44bf8362acddc24c78440b6a65c4618785d027fae526958af5dde ] + [ "${actual}" = 04cc39bf3f56ff39a2f4ae188fc37fc54b7775a073e8f97111eb37a548d7e229 ] } @test "server/StatefulSet: adds config-checksum annotation when extraConfig is provided" { @@ -696,7 +717,7 @@ load _helpers --set 'server.extraConfig="{\"hello\": \"world\"}"' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = 473d54d05b794be1526d42ef04fdc049f4f979a75d3394c897eef149d399207d ] + [ "${actual}" = e8d2e9535eb6e69eedebef725a66a8b47fd8845a77772f0e19911d2273b9b804 ] } @test "server/StatefulSet: adds config-checksum annotation when config is updated" { @@ -706,7 +727,7 @@ load _helpers --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.annotations."consul.hashicorp.com/config-checksum"' | tee /dev/stderr) - [ "${actual}" = 6acd3761c0981d4d6194b3375b0f7a291e3927602ce7857344c26010381d3a61 ] + [ "${actual}" = d5f4de988e9d51ff8ae91a24a1a990dc65ce046c0494836f6d0f0eae34108235 ] } #-------------------------------------------------------------------- @@ -1401,6 +1422,20 @@ load _helpers [[ "$output" =~ "global.secretsBackend.vault.consulServerRole must be provided if global.secretsBackend.vault.enabled=true" ]] } +@test "server/StatefulSet: fail when vault is enabled with tls but autoencrypt is disabled" { + cd `chart_dir` + run helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.server.serverCert.secretName=test' \ + --set 'global.tls.caCert.secretName=test' \ + --set 'global.tls.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.tls.enableAutoEncrypt must be true if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" ]] +} + @test "server/StatefulSet: fail when vault, tls are enabled but no caCert provided" { cd `chart_dir` run helm template \ @@ -1414,6 +1449,36 @@ load _helpers [[ "$output" =~ "global.tls.caCert.secretName must be provided if global.tls.enabled=true and global.secretsBackend.vault.enabled=true." ]] } +@test "server/StatefulSet: fail when vault, tls are enabled with a serverCert but no autoencrypt" { + cd `chart_dir` + run helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.enabled=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.tls.enableAutoEncrypt must be true if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" ]] +} + +@test "server/StatefulSet: fail when vault is enabled with tls but no consulCARole is provided" { + cd `chart_dir` + run helm template \ + -s templates/server-statefulset.yaml \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.server.serverCert.secretName=test' \ + --set 'global.tls.caCert.secretName=test' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.secretsBackend.vault.consulCARole must be provided if global.secretsBackend.vault.enabled=true and global.tls.enabled=true" ]] +} + @test "server/StatefulSet: vault annotations not set by default" { cd `chart_dir` local object=$(helm template \ @@ -1895,882 +1960,3 @@ load _helpers local actual="$(echo $object | yq -r '.spec.containers[] | select(.name=="consul").command | any(contains("-config-file=/vault/secrets/replication-token-config.hcl"))' | tee /dev/stderr)" [ "${actual}" = "true" ] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "server/StatefulSet: cloud config is not set in command when global.cloud.enabled is not set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - . | tee /dev/stderr) - - # Test the flag is set. - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[] | select(.name == "consul") | .command | any(contains("-hcl=\"cloud { resource_id = \\\"${HCP_RESOURCE_ID}\\\" }\""))' | tee /dev/stderr) - [ "${actual}" = "false" ] - - # Test the HCP_RESOURCE_ID environment variable is set. - local envvar=$(echo "$object" | - yq -r -c '.spec.template.spec.containers[] | select(.name == "consul") | .env | select(.name == "HCP_RESOURCE_ID")' | tee /dev/stderr) - [ "${envvar}" = "" ] -} - -@test "server/StatefulSet: does not create HCP_RESOURCE_ID, HCP_CLIENT_ID, HCP_CLIENT_SECRET, HCP_AUTH_URL, HCP_SCADA_ADDRESS, and HCP_API_HOSTNAME envvars in consul container when global.cloud.enabled is not set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - . | tee /dev/stderr ) - - local container=$(echo "$object" | - yq -r '.spec.template.spec.containers[] | select(.name == "consul")' | tee /dev/stderr) - - - local envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_CLIENT_ID")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_CLIENT_SECRET")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_RESOURCE_ID")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_AUTH_URL")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_API_HOSTNAME")' | tee /dev/stderr) - [ "${envvar}" = "" ] - - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_SCADA_ADDRESS")' | tee /dev/stderr) - [ "${envvar}" = "" ] - -} - -@test "server/StatefulSet: cloud config is set in command when global.cloud.enabled and global.cloud.resourceId are set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr) - - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[] | select(.name == "consul") | .command | any(contains("-hcl=\"cloud { resource_id = \\\"${HCP_RESOURCE_ID}\\\" }\""))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - - -@test "server/StatefulSet: creates HCP_RESOURCE_ID, HCP_CLIENT_ID, HCP_CLIENT_SECRET envvars in consul container when global.cloud.enabled is true" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr ) - - local container=$(echo "$object" | - yq -r '.spec.template.spec.containers[] | select(.name == "consul")' | tee /dev/stderr) - - # HCP_CLIENT_ID - local envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_CLIENT_ID")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "client-id-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "client-id-key" ] - - # HCP_CLIENT_SECRET - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_CLIENT_SECRET")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "client-secret-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "client-secret-key" ] - - # HCP_RESOURCE_ID - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_RESOURCE_ID")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "resource-id-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "resource-id-key" ] -} - -@test "server/StatefulSet: creates HCP_AUTH_URL, HCP_SCADA_ADDRESS, and HCP_API_HOSTNAME envvars in consul container when global.cloud.enabled is true and those cloud values are specified" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.secretName=foo' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - --set 'global.cloud.apiHost.secretName=api-host-name' \ - --set 'global.cloud.apiHost.secretKey=api-host-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . | tee /dev/stderr ) - - local container=$(echo "$object" | - yq -r '.spec.template.spec.containers[] | select(.name == "consul")' | tee /dev/stderr) - - # HCP_AUTH_URL - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_AUTH_URL")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - echo "actual: $actual" - - [ "${actual}" = "auth-url-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "auth-url-key" ] - - # HCP_API_HOST - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_API_HOST")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "api-host-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "api-host-key" ] - - # HCP_SCADA_ADDRESS - envvar=$(echo "$container" | - yq -r '.env[] | select(.name == "HCP_SCADA_ADDRESS")' | tee /dev/stderr) - - local actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "scada-address-name" ] - - actual=$(echo "$envvar" | - yq -r '.valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "scada-address-key" ] -} - -@test "server/StatefulSet: cloud config is set in command global.cloud.enabled is not set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.acls.enabled=true' \ - --set 'global.acls.bootstrapToken.secretName=name' \ - --set 'global.acls.bootstrapToken.secretKey=key' \ - . | tee /dev/stderr) - - # Test the flag is set. - local actual=$(echo "$object" | - yq '.spec.template.spec.containers[0].command | any(contains("-hcl=\"acl { tokens { initial_management = \\\"${ACL_BOOTSTRAP_TOKEN}\\\" } }\""))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - # Test the ACL_BOOTSTRAP_TOKEN environment variable is set. - local actual=$(echo "$object" | - yq -r -c '.spec.template.spec.containers[0].env | map(select(.name == "ACL_BOOTSTRAP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = '[{"name":"ACL_BOOTSTRAP_TOKEN","valueFrom":{"secretKeyRef":{"name":"name","key":"key"}}}]' ] -} - -@test "server/StatefulSet: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "server/StatefulSet: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -#-------------------------------------------------------------------- -# server.snapshotAgent - -@test "server/StatefulSet: snapshot-agent: snapshot agent container not added by default" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[] | select(.name == "consul-snapshot-agent")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - - -@test "server/StatefulSet: snapshot-agent: snapshot agent container added with server.snapshotAGent.enabled=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[] | select(.name == "consul-snapshot-agent") | .name' | tee /dev/stderr) - [ "${actual}" = "consul-snapshot-agent" ] -} - -@test "server/StatefulSet: snapshot-agent: when server.snapshotAgent.configSecret.secretKey!=null and server.snapshotAgent.configSecret.secretName=null, fail" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=' \ - --set 'server.snapshotAgent.configSecret.secretKey=bar' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "server.snapshotAgent.configSecret.secretKey and server.snapshotAgent.configSecret.secretName must both be specified." ]] -} - -@test "server/StatefulSet: snapshot-agent: when server.snapshotAgent.configSecret.secretName!=null and server.snapshotAgent.configSecret.secretKey=null, fail" { - cd `chart_dir` - run helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=foo' \ - --set 'server.snapshotAgent.configSecret.secretKey=' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "server.snapshotAgent.configSecret.secretKey and server.snapshotAgent.configSecret.secretName must both be specified." ]] -} - -@test "server/StatefulSet: snapshot-agent: adds volume for snapshot agent config secret when secret is configured" { - cd `chart_dir` - local vol=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-agent-user-config")' | tee /dev/stderr) - local actual - actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-user-config' ] - - actual=$(echo $vol | jq -r '. .secret.secretName' | tee /dev/stderr) - [ "${actual}" = 'a/b/c/d' ] - - actual=$(echo $vol | jq -r '. .secret.items[0].key' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-config' ] - - actual=$(echo $vol | jq -r '. .secret.items[0].path' | tee /dev/stderr) - [ "${actual}" = 'snapshot-config.json' ] -} - -@test "server/StatefulSet: snapshot-agent: adds volume mount to snapshot container for snapshot agent config secret when secret is configured" { - cd `chart_dir` - local vol=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "snapshot-agent-user-config")' | tee /dev/stderr) - local actual - actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-user-config' ] - - actual=$(echo $vol | jq -r '. .readOnly' | tee /dev/stderr) - [ "${actual}" = 'true' ] - - actual=$(echo $vol | jq -r '. .mountPath' | tee /dev/stderr) - [ "${actual}" = '/consul/user-config' ] -} - -@test "server/StatefulSet: snapshot-agent: set config-dir argument on snapshot agent command to volume mount" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-config-dir=/consul/user-config")' | tee /dev/stderr) - [ "${actual}" = 'true' ] -} - -@test "server/StatefulSet: snapshot-agent: does not configure snapshot agent login config secret when acls are disabled" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=false' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec' | tee /dev/stderr) - actual=$(echo $spec | yq -r '.volumes[] | select(.name == "snapshot-agent-config")') - [ "${actual}" = "" ] - - actual=$(echo $spec | yq -r '.containers[1].volumeMounts') - [ "${actual}" = "null" ] - - actual=$(echo $spec | yq -r '.containers[1].command[2] | contains("-config-file=/consul/config/snapshot-login.json")') - [ "${actual}" = "false" ] -} - -@test "server/StatefulSet: snapshot-agent: adds volume for snapshot agent login config secret when acls are enabled" { - cd `chart_dir` - local vol=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-agent-config")' | tee /dev/stderr) - local actual - actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-config' ] - - actual=$(echo $vol | jq -r '. .configMap.name' | tee /dev/stderr) - [ "${actual}" = 'release-name-consul-snapshot-agent-config' ] -} - -@test "server/StatefulSet: snapshot-agent: adds volume mount to snapshot container for snapshot agent login config secret when acls are enabled" { - cd `chart_dir` - local vol=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "snapshot-agent-config")' | tee /dev/stderr) - local actual - actual=$(echo $vol | jq -r '. .name' | tee /dev/stderr) - [ "${actual}" = 'snapshot-agent-config' ] - - actual=$(echo $vol | jq -r '. .readOnly' | tee /dev/stderr) - [ "${actual}" = 'true' ] - - actual=$(echo $vol | jq -r '. .mountPath' | tee /dev/stderr) - [ "${actual}" = '/consul/config' ] -} - -@test "server/StatefulSet: snapshot-agent: set config-file argument on snapshot agent command to login config when acls are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-config-file=/consul/config/snapshot-login.json")' | tee /dev/stderr) - [ "${actual}" = 'true' ] -} - -@test "server/StatefulSet: snapshot-agent: uses default consul addr when TLS is disabled" { - cd `chart_dir` - local env=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].env[]' | tee /dev/stderr) - - local actual - actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) - [ "${actual}" = 'http://127.0.0.1:8500' ] -} - -@test "server/StatefulSet: snapshot-agent: sets TLS env vars when global.tls.enabled" { - cd `chart_dir` - local env=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].env[]' | tee /dev/stderr) - - local actual - actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) - [ "${actual}" = 'https://127.0.0.1:8501' ] - - actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] -} - -@test "server/StatefulSet: snapshot-agent: populates container volumeMounts when global.tls.enabled is true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-ca-cert") | .name' | tee /dev/stderr) - [ "${actual}" = "consul-ca-cert" ] -} - -#-------------------------------------------------------------------- -# server.snapshotAgent.resources - -@test "server/StatefulSet: snapshot-agent: default resources" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - . | tee /dev/stderr | - yq -rc '.spec.template.spec.containers[1].resources' | tee /dev/stderr) - [ "${actual}" = '{"limits":{"cpu":"50m","memory":"50Mi"},"requests":{"cpu":"50m","memory":"50Mi"}}' ] -} - -@test "server/StatefulSet: snapshot-agent: can set resources" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.resources.requests.memory=100Mi' \ - --set 'server.snapshotAgent.resources.requests.cpu=100m' \ - --set 'server.snapshotAgent.resources.limits.memory=200Mi' \ - --set 'server.snapshotAgent.resources.limits.cpu=200m' \ - . | tee /dev/stderr | - yq -rc '.spec.template.spec.containers[1].resources' | tee /dev/stderr) - [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] -} - -#-------------------------------------------------------------------- -# server.snapshotAgent.caCert - -@test "server/StatefulSet: snapshot-agent: if caCert is set command is modified correctly" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- -MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("cat < /extra-ssl-certs/custom-ca.pem")' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/StatefulSet: snapshot-agent: if caCert is set extra-ssl-certs volumeMount is added" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- -MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ - . | tee /dev/stderr | yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo $object | jq -r '.volumes[] | select(.name == "extra-ssl-certs") | .name' | tee /dev/stderr) - [ "${actual}" = "extra-ssl-certs" ] -} - -@test "server/StatefulSet: snapshot-agent: if caCert is set SSL_CERT_DIR env var is set" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.caCert=-----BEGIN CERTIFICATE----- -MIICFjCCAZsCCQCdwLtdjbzlYzAKBggqhkjOPQQDAjB0MQswCQYDVQQGEwJDQTEL' \ - . | tee /dev/stderr | yq -r '.spec.template.spec.containers[1].env[] | select(.name == "SSL_CERT_DIR")' | tee /dev/stderr) - - local actual=$(echo $object | jq -r '.name' | tee /dev/stderr) - [ "${actual}" = "SSL_CERT_DIR" ] - local actual=$(echo $object | jq -r '.value' | tee /dev/stderr) - [ "${actual}" = "/etc/ssl/certs:/extra-ssl-certs" ] -} - - -#-------------------------------------------------------------------- -# snapshotAgent license-autoload - -@test "server/StatefulSet: snapshot-agent: adds volume mount for license secret when enterprise license secret name and key are provided" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.enterpriseLicense.secretName=foo' \ - --set 'global.enterpriseLicense.secretKey=bar' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) - [ "${actual}" = '{"name":"consul-license","mountPath":"/consul/license","readOnly":true}' ] -} - -@test "server/StatefulSet: snapshot-agent: adds env var for license path when enterprise license secret name and key are provided" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.enterpriseLicense.secretName=foo' \ - --set 'global.enterpriseLicense.secretKey=bar' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].env[] | select(.name == "CONSUL_LICENSE_PATH")' | tee /dev/stderr) - [ "${actual}" = '{"name":"CONSUL_LICENSE_PATH","value":"/consul/license/bar"}' ] -} - -@test "server/StatefulSet: snapshot-agent: does not add license secret volume mount if manageSystemACLs are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.enterpriseLicense.secretName=foo' \ - --set 'global.enterpriseLicense.secretKey=bar' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "server/StatefulSet: snapshot-agent: does not add license env if manageSystemACLs are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.enterpriseLicense.secretName=foo' \ - --set 'global.enterpriseLicense.secretKey=bar' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].env[] | select(.name == "CONSUL_LICENSE_PATH")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -#-------------------------------------------------------------------- -# snapshotAgent Vault - -@test "server/StatefulSet: snapshot-agent: vault CONSUL_LICENSE_PATH is set to /vault/secrets/enterpriselicense.txt" { - cd `chart_dir` - local env=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.enterpriseLicense.secretName=a/b/c/d' \ - --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].env[]' | tee /dev/stderr) - - local actual - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LICENSE_PATH") | .value' | tee /dev/stderr) - [ "${actual}" = "/vault/secrets/enterpriselicense.txt" ] -} - -@test "server/StatefulSet: snapshot-agent: vault does not add volume mount for license secret" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'global.enterpriseLicense.secretName=a/b/c/d' \ - --set 'global.enterpriseLicense.secretKey=enterpriselicense' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[1].volumeMounts[] | select(.name == "consul-license")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "server/StatefulSet: snapshot-agent: vault snapshot agent config annotations are correct when enabled" { - cd `chart_dir` - local object=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=path/to/secret' \ - --set 'server.snapshotAgent.configSecret.secretKey=config' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-snapshot-agent-config.json"]' | tee /dev/stderr) - [ "${actual}" = "path/to/secret" ] - - actual=$(echo $object | - yq -r '.annotations["vault.hashicorp.com/agent-inject-template-snapshot-agent-config.json"]' | tee /dev/stderr) - local expected=$'{{- with secret \"path/to/secret\" -}}\n{{- .Data.data.config -}}\n{{- end -}}' - [ "${actual}" = "${expected}" ] - - actual=$(echo $object | jq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr) - [ "${actual}" = "test" ] -} - -@test "server/StatefulSet: snapshot-agent: vault does not add volume for snapshot agent config secret" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.volumes[] | select(.name == "snapshot-agent-user-config")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "server/StatefulSet: snapshot-agent: vault does not add volume mount for snapshot agent config secret" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r -c '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "snapshot-agent-user-config")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "server/StatefulSet: snapshot-agent: vault sets config-file argument on snapshot agent command to config downloaded by vault agent injector" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'global.secretsBackend.vault.enabled=true' \ - --set 'global.secretsBackend.vault.consulServerRole=test' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.configSecret.secretName=a/b/c/d' \ - --set 'server.snapshotAgent.configSecret.secretKey=snapshot-agent-config' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-config-file=/vault/secrets/snapshot-agent-config.json")' | tee /dev/stderr) - [ "${actual}" = 'true' ] -} - -#-------------------------------------------------------------------- -# snapshotAgent Interval - -@test "server/StatefulSet: snapshot-agent: interval defaults to 1h" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-interval=1h")' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/StatefulSet: snapshot-agent: interval can be set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/server-statefulset.yaml \ - --set 'server.enabled=true' \ - --set 'server.snapshotAgent.enabled=true' \ - --set 'server.snapshotAgent.interval=10h34m5s' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[1].command[2] | contains("-interval=10h34m5s")' | tee /dev/stderr) - [ "${actual}" = "true" ] -} \ No newline at end of file diff --git a/charts/consul/test/unit/sync-catalog-deployment.bats b/charts/consul/test/unit/sync-catalog-deployment.bats index ae1fe1a854..797432e21a 100755 --- a/charts/consul/test/unit/sync-catalog-deployment.bats +++ b/charts/consul/test/unit/sync-catalog-deployment.bats @@ -62,33 +62,21 @@ load _helpers [ "${actual}" = "bar" ] } -@test "syncCatalog/Deployment: consul env defaults" { +@test "syncCatalog/Deployment: command defaults" { cd `chart_dir` - local env=$(helm template \ - -s templates/sync-catalog-deployment.yaml \ + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-server.default.svc" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8502" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8500" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r ' any(contains("consul-k8s-control-plane sync-catalog"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) - [ "${actual}" = "5s" ] + local actual=$(echo $object | + yq -r ' any(contains("consul-api-timeout=5"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -450,82 +438,285 @@ load _helpers #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "syncCatalog/Deployment: ACL auth method env vars are set when acls are enabled" { +@test "syncCatalog/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout -consul-api-timeout=5s"))' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] + [ "${actual}" = "true" ] +} - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_META").value' | tee /dev/stderr) - [ "${actual}" = 'component=sync-catalog,pod=$(NAMESPACE)/$(POD_NAME)' ] +@test "syncCatalog/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "false" ] } -@test "syncCatalog/Deployment: sets global auth method and primary datacenter when federation and acls and namespaces are enabled" { +@test "syncCatalog/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc1' \ - --set 'global.datacenter=dc2' \ - --set 'global.enableConsulNamespaces=true' \ - --set 'global.tls.enabled=true' \ - --set 'meshGateway.enabled=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "sync-catalog-acl-init" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].value] | any(contains("http://$(HOST_IP):8500"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_AUTH_METHOD").value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method-dc2" ] + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_DATACENTER").value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "syncCatalog/Deployment: sets default login partition and acls and partitions are enabled" { +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { cd `chart_dir` - local env=$(helm template \ + local object=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ --set 'global.enableConsulNamespaces=true' \ + --set 'global.adminPartitions.enabled=true' \ + --set 'global.adminPartitions.name=default' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.containers[] | select(.name == "sync-catalog")' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "default" ] + local actual=$(echo $object | + yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } -@test "syncCatalog/Deployment: sets non-default login partition and acls and partitions are enabled" { +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { cd `chart_dir` - local env=$(helm template \ + local object=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.acls.manageSystemACLs=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=foo' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '[.env[2].value] | any(contains("https://$(HOST_IP):8501"))' | tee /dev/stderr) + echo $actual + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq '.volumeMounts[1] | any(contains("consul-auto-encrypt-ca-cert"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: auto-encrypt init container is created and is the first init-container when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled and autoencrypt enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "get-auto-encrypt-client-ca" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces disabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.datacenter=dc2' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when in non-primary datacenter with Consul Namespaces enabled" { + cd `chart_dir` + local object=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.datacenter=dc2' \ --set 'global.enableConsulNamespaces=true' \ + --set 'global.federation.enabled=true' \ + --set 'global.federation.primaryDatacenter=dc1' \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[] | select(.name == "sync-catalog-acl-init")' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_LOGIN_PARTITION").value' | tee /dev/stderr) - [ "${actual}" = "foo" ] + local actual=$(echo $object | + yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | + yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -559,23 +750,17 @@ load _helpers cd `chart_dir` local env=$(helm template \ -s templates/sync-catalog-deployment.yaml \ - --set 'client.enabled=true' \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) - [ "${actual}" = "8501" ] - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @test "syncCatalog/Deployment: can overwrite CA secret with the provided one" { @@ -601,17 +786,81 @@ load _helpers [ "${actual}" = "key" ] } -@test "syncCatalog/Deployment: consul-ca-cert volumeMount is added when TLS is enabled" { +@test "syncCatalog/Deployment: consul-auto-encrypt-ca-cert volume is not added with auto-encrypt and client.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "syncCatalog/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: consul-ca-cert volumeMount is added when TLS with auto-encrypt is enabled and client disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ . | tee /dev/stderr | yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert") | length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } +@test "syncCatalog/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "syncCatalog/Deployment: adds both init containers when TLS with auto-encrypt and ACLs are enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers | length == 2' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "syncCatalog/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { cd `chart_dir` local actual=$(helm template \ @@ -729,7 +978,7 @@ load _helpers local actual=$(echo $object | yq 'any(contains("enable-k8s-namespace-mirroring"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + [ "${actual}" = "false" ] local actual=$(echo $object | yq 'any(contains("k8s-namespace-mirroring-prefix"))' | tee /dev/stderr) @@ -844,6 +1093,79 @@ load _helpers [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] } + +#-------------------------------------------------------------------- +# clients.enabled + +@test "syncCatalog/Deployment: HOST_IP is used when client.enabled=true" { + cd `chart_dir` + local env=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'http://$(HOST_IP):8500' ] +} + +@test "syncCatalog/Deployment: HOST_IP is used when client.enabled=true and global.tls.enabled=true" { + cd `chart_dir` + local env=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'client.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "syncCatalog/Deployment: consul service is used when client.enabled=false and global.tls.enabled=true and autoencrypt on" { + cd `chart_dir` + local env=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://release-name-consul-server:8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + +@test "syncCatalog/Deployment: consul service is used when client.enabled=false and global.tls.enabled=true" { + cd `chart_dir` + local env=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'client.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) + + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://release-name-consul-server:8501' ] + + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} + #-------------------------------------------------------------------- # priorityClassName @@ -981,6 +1303,36 @@ load _helpers [ "${actual}" = "true" ] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "syncCatalog/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/sync-catalog-deployment.yaml \ + --set 'syncCatalog.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault @@ -990,6 +1342,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -1014,12 +1367,6 @@ load _helpers local actual actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] - - actual=$(echo $object | jq -r '.spec.volumes[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $object | jq -r '.spec.containers[0].volumeMounts[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] } @test "syncCatalog/Deployment: vault CA is not configured by default" { @@ -1028,6 +1375,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1048,6 +1396,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1069,6 +1418,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1090,6 +1440,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1130,6 +1481,7 @@ load _helpers -s templates/sync-catalog-deployment.yaml \ --set 'syncCatalog.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1172,181 +1524,3 @@ reservedNameTest() { [ "$status" -eq 1 ] [[ "$output" =~ "The name $name set for key syncCatalog.consulNamespaces.consulDestinationNamespace is reserved by Consul for future use" ]] } - -#-------------------------------------------------------------------- -# global.cloud - -@test "syncCatalog/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "syncCatalog/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/sync-catalog-deployment.yaml \ - --set 'syncCatalog.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} diff --git a/charts/consul/test/unit/terminating-gateways-deployment.bats b/charts/consul/test/unit/terminating-gateways-deployment.bats index 523138a351..5165c63565 100644 --- a/charts/consul/test/unit/terminating-gateways-deployment.bats +++ b/charts/consul/test/unit/terminating-gateways-deployment.bats @@ -25,6 +25,41 @@ load _helpers [ "${actual}" = "release-name-consul-terminating-gateway" ] } +@test "terminatingGateways/Deployment: Adds consul service volumeMount to gateway container" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | yq '.spec.template.spec.containers[0].volumeMounts[1]' | tee /dev/stderr) + + local actual=$(echo $object | + yq -r '.name' | tee /dev/stderr) + [ "${actual}" = "consul-service" ] + + local actual=$(echo $object | + yq -r '.mountPath' | tee /dev/stderr) + [ "${actual}" = "/consul/service" ] + + local actual=$(echo $object | + yq -r '.readOnly' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: consul-sidecar uses -consul-api-timeout" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # prerequisites @@ -38,6 +73,40 @@ load _helpers [[ "$output" =~ "connectInject.enabled must be true" ]] } +@test "terminatingGateways/Deployment: fails if client.grpc=false" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'client.grpc=false' \ + --set 'connectInject.enabled=true' . + [ "$status" -eq 1 ] + [[ "$output" =~ "client.grpc must be true" ]] +} + +@test "terminatingGateways/Deployment: fails if global.enabled is false and clients are not explicitly enabled" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + +@test "terminatingGateways/Deployment: fails if global.enabled is true but clients are explicitly disabled" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enabled=true' \ + --set 'client.enabled=false' . + [ "$status" -eq 1 ] + [[ "$output" =~ "clients must be enabled" ]] +} + @test "terminatingGateways/Deployment: fails if there are duplicate gateway names" { cd `chart_dir` run helm template \ @@ -70,92 +139,69 @@ load _helpers } #-------------------------------------------------------------------- -# dataplaneImage +# envoyImage -@test "terminatingGateways/Deployment: dataplane image can be set using the global value" { +@test "terminatingGateways/Deployment: envoy image has default global value" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.imageConsulDataplane=new/image' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "new/image" ] + [[ "${actual}" =~ "envoyproxy/envoy:v" ]] } -#-------------------------------------------------------------------- -# global.tls.enabled - -@test "terminatingGateways/Deployment: sets TLS env variables for terminating-gateway-init when global.tls.enabled" { +@test "terminatingGateways/Deployment: envoy image can be set using the global value" { cd `chart_dir` - local env=$(helm template \ + local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ + --set 'global.imageEnvoy=new/image' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_PORT") | .value' | tee /dev/stderr) - [ "${actual}" = '8501' ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_USE_TLS") | .value' | tee /dev/stderr) - [ "${actual}" = 'true' ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT_FILE") | .value' | tee /dev/stderr) - [ "${actual}" = "/consul/tls/ca/tls.crt" ] + yq -s -r '.[0].spec.template.spec.containers[0].image' | tee /dev/stderr) + [ "${actual}" = "new/image" ] } -@test "terminatingGateways/Deployment: sets TLS env variables for terminating-gateway-init when global.tls.enabled=false" { +#-------------------------------------------------------------------- +# global.tls.enabled + +@test "terminatingGateways/Deployment: sets TLS env variables when global.tls.enabled" { cd `chart_dir` local env=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_PORT") | .value' | tee /dev/stderr) - [ "${actual}" = '8500' ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_USE_TLS")' | tee /dev/stderr) - [ "${actual}" = '' ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_TLS_SERVER_NAME")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT_FILE")' | tee /dev/stderr) - [ "${actual}" = "" ] -} + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_GRPC_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8502' ] -@test "terminatingGateways/Deployment: sets TLS flags for terminating-gateway when global.tls.enabled is false" { - cd `chart_dir` - local object=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ - . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '. | any(contains("-tls-disabled"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } -@test "terminatingGateways/Deployment: sets TLS flags for terminating-gateway when global.tls.enabled" { +@test "terminatingGateways/Deployment: sets TLS env variables in consul sidecar when global.tls.enabled" { cd `chart_dir` - local object=$(helm template \ + local env=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[1].env[]' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | any(contains("-ca-certs=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = 'https://$(HOST_IP):8501' ] + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } @test "terminatingGateways/Deployment: can overwrite CA secret with the provided one" { @@ -188,21 +234,67 @@ load _helpers --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ - . | yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr ) + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) [ "${actual}" != "" ] } -@test "terminatingGateways/Deployment: CA cert volume omitted when TLS is enabled with external servers and use system roots" { +#-------------------------------------------------------------------- +# global.tls.enableAutoEncrypt + +@test "terminatingGateways/Deployment: consul-auto-encrypt-ca-cert volume is added when TLS with auto-encrypt is enabled" { cd `chart_dir` local actual=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.volumes[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: consul-auto-encrypt-ca-cert volumeMount is added when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-auto-encrypt-ca-cert") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: get-auto-encrypt-client-ca init container is created when TLS with auto-encrypt is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca") | length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: consul-ca-cert volume is not added if externalServers.enabled=true and externalServers.useSystemRoots=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=foo.com' \ --set 'externalServers.useSystemRoots=true' \ - . | yq '.[0]spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr ) - [ "${actual}" == "" ] + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] } @test "terminatingGateways/Deployment: serviceAccountName is set properly" { @@ -222,81 +314,113 @@ load _helpers #-------------------------------------------------------------------- # global.acls.manageSystemACLs -@test "terminatingGateways/Deployment: Adds consul envvars on terminating-gateway-init init container when ACLs are enabled and tls is enabled" { +@test "terminatingGateways/Deployment: consul-sidecar uses -token-file flag when global.acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[1].command | any(contains("-token-file=/consul/service/acl-token"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on terminating-gateway-init init container when ACLs are enabled and tls is enabled" { cd `chart_dir` local env=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_AUTH_METHOD") | .value' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-k8s-component-auth-method" ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "https://\$(HOST_IP):8501" ] +} - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_DATACENTER") | .value' | tee /dev/stderr) - [ "${actual}" = "dc1" ] +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_HTTP_ADDR on terminating-gateway-init init container when ACLs are enabled and tls is not enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) - local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_LOGIN_META") | .value' | tee /dev/stderr) - [ "${actual}" = 'component=terminating-gateway,pod=$(NAMESPACE)/$(POD_NAME)' ] + local actual + actual=$(echo $env | jq -r '. | select(.name == "CONSUL_HTTP_ADDR") | .value' | tee /dev/stderr) + [ "${actual}" = "http://\$(HOST_IP):8500" ] } -@test "terminatingGateways/Deployment: ACL flags are not set when acls are disabled" { +@test "terminatingGateways/Deployment: Does not add consul envvars CONSUL_CACERT on terminating-gateway-init init container when ACLs are enabled and tls is not enabled" { cd `chart_dir` - local object=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ --set 'connectInject.enabled=true' \ --set 'terminatingGateways.enabled=true' \ - --set 'global.acls.manageSystemACLs=false' \ + --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq '.spec.template.spec.initContainers[1].env[] | select(.name == "CONSUL_CACERT")' | tee /dev/stderr) - local actual=$(echo $object | yq -r '. | any(contains("-login-bearer-path"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + [ "${actual}" = "" ] +} - local actual=$(echo $object | yq -r '. | any(contains("-login-method"))' | tee /dev/stderr) - [ "${actual}" = "false" ] +@test "terminatingGateways/Deployment: Adds consul envvars CONSUL_CACERT on terminating-gateway-init init container when ACLs are enabled and tls is enabled" { + cd `chart_dir` + local env=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec.initContainers[1].env[]' | tee /dev/stderr) + + local actual=$(echo $env | jq -r '. | select(.name == "CONSUL_CACERT") | .value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] +} - local actual=$(echo $object | yq -r '. | any(contains("-credential-type=login"))' | tee /dev/stderr) +@test "terminatingGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ + --set 'global.acls.manageSystemACLs=false' \ + . | tee /dev/stderr | + yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) [ "${actual}" = "false" ] } -@test "terminatingGateways/Deployment: command flags are set when acls are enabled" { +@test "terminatingGateways/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '. | any(contains("-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | yq -r '. | any(contains("-login-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | yq -r '. | any(contains("-credential-type=login"))' | tee /dev/stderr) + yq -s '[.[0].spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "terminatingGateways/Deployment: add consul-dataplane envvars on terminating-gateway container" { +@test "terminatingGateways/Deployment: consul-logout preStop hook is added when ACLs are enabled" { cd `chart_dir` - local env=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.enabled=true' \ --set 'global.acls.manageSystemACLs=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - - local actual=$(echo $env | jq -r '. | select(.name == "DP_CREDENTIAL_LOGIN_META1") | .value' | tee /dev/stderr) - [ "${actual}" = 'pod=$(NAMESPACE)/$(POD_NAME)' ] - - local actual=$(echo $env | jq -r '. | select(.name == "DP_CREDENTIAL_LOGIN_META2") | .value' | tee /dev/stderr) - [ "${actual}" = "component=terminating-gateway" ] + yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[3]] | any(contains("/consul-bin/consul logout"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -338,7 +462,20 @@ load _helpers [ "${actual}" = "/metrics" ] } -@test "terminatingGateways/Deployment: when global.metrics.enableGatewayMetrics=false, does not set prometheus annotations" { +@test "terminatingGateways/Deployment: when global.metrics.enabled=true, sets proxy setting" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.metrics.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: when global.metrics.enableGatewayMetrics=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -349,6 +486,9 @@ load _helpers . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -359,7 +499,7 @@ load _helpers [ "${actual}" = "null" ] } -@test "terminatingGateways/Deployment: when global.metrics.enabled=false, does not set prometheus annotations" { +@test "terminatingGateways/Deployment: when global.metrics.enabled=false, does not set proxy setting" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -369,6 +509,9 @@ load _helpers . | tee /dev/stderr | yq '.spec.template' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.spec.initContainers[1].command | join(" ") | contains("envoy_prometheus_bind_addr = \"${POD_IP}:20200\"")' | tee /dev/stderr) + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -s -r '.[0].metadata.annotations."prometheus.io/path"' | tee /dev/stderr) [ "${actual}" = "null" ] @@ -379,31 +522,10 @@ load _helpers [ "${actual}" = "null" ] } -#-------------------------------------------------------------------- -# externalServers.skipServerWatch - -@test "terminatingGateways/Deployment: sets server-watch-disabled flag when externalServers.enabled and externalServers.skipServerWatch is true" { - cd `chart_dir` - local object=$(helm template \ - -s templates/ingress-gateways-deployment.yaml \ - --set 'ingressGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=false' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.skipServerWatch=true' \ - . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '. | any(contains("-server-watch-disabled=true"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # replicas -@test "terminatingGateways/Deployment: replicas defaults to 1" { +@test "terminatingGateways/Deployment: replicas defaults to 2" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -411,7 +533,7 @@ load _helpers --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -s -r '.[0].spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] + [ "${actual}" = "2" ] } @test "terminatingGateways/Deployment: replicas can be set through defaults" { @@ -597,81 +719,225 @@ load _helpers . | tee /dev/stderr | yq -s -r '.[0].spec.template.spec.containers[0].resources' | tee /dev/stderr) - [ $(echo "${actual}" | yq -r '.requests.memory') = "100Mi" ] - [ $(echo "${actual}" | yq -r '.requests.cpu') = "100m" ] - [ $(echo "${actual}" | yq -r '.limits.memory') = "100Mi" ] - [ $(echo "${actual}" | yq -r '.limits.cpu') = "100m" ] + [ $(echo "${actual}" | yq -r '.requests.memory') = "100Mi" ] + [ $(echo "${actual}" | yq -r '.requests.cpu') = "100m" ] + [ $(echo "${actual}" | yq -r '.limits.memory') = "100Mi" ] + [ $(echo "${actual}" | yq -r '.limits.cpu') = "100m" ] +} + +@test "terminatingGateways/Deployment: resources can be set through defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.defaults.resources.requests.memory=memory' \ + --set 'terminatingGateways.defaults.resources.requests.cpu=cpu' \ + --set 'terminatingGateways.defaults.resources.limits.memory=memory2' \ + --set 'terminatingGateways.defaults.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +@test "terminatingGateways/Deployment: resources can be set through specific gateway, overriding defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.defaults.resources.requests.memory=memory' \ + --set 'terminatingGateways.defaults.resources.requests.cpu=cpu' \ + --set 'terminatingGateways.defaults.resources.limits.memory=memory2' \ + --set 'terminatingGateways.defaults.resources.limits.cpu=cpu2' \ + --set 'terminatingGateways.gateways[0].name=gateway1' \ + --set 'terminatingGateways.gateways[0].resources.requests.memory=gwmemory' \ + --set 'terminatingGateways.gateways[0].resources.requests.cpu=gwcpu' \ + --set 'terminatingGateways.gateways[0].resources.limits.memory=gwmemory2' \ + --set 'terminatingGateways.gateways[0].resources.limits.cpu=gwcpu2' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.containers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu2" ] +} + +#-------------------------------------------------------------------- +# init container resources + +@test "terminatingGateways/Deployment: init container has default resources" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "25Mi" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "50m" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "150Mi" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "50m" ] +} + +@test "terminatingGateways/Deployment: init container resources can be set through defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "memory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "memory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "cpu2" ] +} + +@test "terminatingGateways/Deployment: init container resources can be set through specific gateway, overriding defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.requests.memory=memory' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.requests.cpu=cpu' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.limits.memory=memory2' \ + --set 'terminatingGateways.defaults.initCopyConsulContainer.resources.limits.cpu=cpu2' \ + --set 'terminatingGateways.gateways[0].name=gateway1' \ + --set 'terminatingGateways.gateways[0].initCopyConsulContainer.resources.requests.memory=gwmemory' \ + --set 'terminatingGateways.gateways[0].initCopyConsulContainer.resources.requests.cpu=gwcpu' \ + --set 'terminatingGateways.gateways[0].initCopyConsulContainer.resources.limits.memory=gwmemory2' \ + --set 'terminatingGateways.gateways[0].initCopyConsulContainer.resources.limits.cpu=gwcpu2' \ + . | tee /dev/stderr | + yq -s '.[0].spec.template.spec.initContainers[0].resources' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory" ] + + local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu" ] + + local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) + [ "${actual}" = "gwmemory2" ] + + local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) + [ "${actual}" = "gwcpu2" ] } -@test "terminatingGateways/Deployment: resources can be set through defaults" { +#-------------------------------------------------------------------- +# consul sidecar resources + +@test "terminatingGateways/Deployment: consul sidecar has default resources" { cd `chart_dir` local object=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ + -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'terminatingGateways.defaults.resources.requests.memory=memory' \ - --set 'terminatingGateways.defaults.resources.requests.cpu=cpu' \ - --set 'terminatingGateways.defaults.resources.limits.memory=memory2' \ - --set 'terminatingGateways.defaults.resources.limits.cpu=cpu2' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].resources' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) - [ "${actual}" = "memory" ] + [ "${actual}" = "25Mi" ] local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) - [ "${actual}" = "cpu" ] + [ "${actual}" = "20m" ] local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) - [ "${actual}" = "memory2" ] + [ "${actual}" = "50Mi" ] local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) - [ "${actual}" = "cpu2" ] + [ "${actual}" = "20m" ] } -@test "terminatingGateways/Deployment: resources can be set through specific gateway, overriding defaults" { +@test "terminatingGateways/Deployment: consul sidecar resources can be set" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ - --set 'terminatingGateways.defaults.resources.requests.memory=memory' \ - --set 'terminatingGateways.defaults.resources.requests.cpu=cpu' \ - --set 'terminatingGateways.defaults.resources.limits.memory=memory2' \ - --set 'terminatingGateways.defaults.resources.limits.cpu=cpu2' \ - --set 'terminatingGateways.gateways[0].name=gateway1' \ - --set 'terminatingGateways.gateways[0].resources.requests.memory=gwmemory' \ - --set 'terminatingGateways.gateways[0].resources.requests.cpu=gwcpu' \ - --set 'terminatingGateways.gateways[0].resources.limits.memory=gwmemory2' \ - --set 'terminatingGateways.gateways[0].resources.limits.cpu=gwcpu2' \ + --set 'global.consulSidecarContainer.resources.requests.memory=memory' \ + --set 'global.consulSidecarContainer.resources.requests.cpu=cpu' \ + --set 'global.consulSidecarContainer.resources.limits.memory=memory2' \ + --set 'global.consulSidecarContainer.resources.limits.cpu=cpu2' \ . | tee /dev/stderr | - yq -s '.[0].spec.template.spec.containers[0].resources' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[1].resources' | tee /dev/stderr) local actual=$(echo $object | yq -r '.requests.memory' | tee /dev/stderr) - [ "${actual}" = "gwmemory" ] + [ "${actual}" = "memory" ] local actual=$(echo $object | yq -r '.requests.cpu' | tee /dev/stderr) - [ "${actual}" = "gwcpu" ] + [ "${actual}" = "cpu" ] local actual=$(echo $object | yq -r '.limits.memory' | tee /dev/stderr) - [ "${actual}" = "gwmemory2" ] + [ "${actual}" = "memory2" ] local actual=$(echo $object | yq -r '.limits.cpu' | tee /dev/stderr) - [ "${actual}" = "gwcpu2" ] + [ "${actual}" = "cpu2" ] +} + +@test "terminatingGateways/Deployment: fails if global.lifecycleSidecarContainer is set" { + cd `chart_dir` + run helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.lifecycleSidecarContainer.resources.requests.memory=100Mi' . + [ "$status" -eq 1 ] + [[ "$output" =~ "global.lifecycleSidecarContainer has been renamed to global.consulSidecarContainer. Please set values using global.consulSidecarContainer." ]] } #-------------------------------------------------------------------- # affinity -@test "terminatingGateways/Deployment: affinity defaults to null" { +@test "terminatingGateways/Deployment: affinity defaults to one per node" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.affinity' | tee /dev/stderr) - [ "${actual}" = "null" ] + yq -s -r '.[0].spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey' | tee /dev/stderr) + [ "${actual}" = "kubernetes.io/hostname" ] } @test "terminatingGateways/Deployment: affinity can be set through defaults" { @@ -871,7 +1137,7 @@ load _helpers --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -s -r '.[0].spec.template.metadata.annotations | length' | tee /dev/stderr) - [ "${actual}" = "3" ] + [ "${actual}" = "1" ] } @test "terminatingGateways/Deployment: extra annotations can be set through defaults" { @@ -886,7 +1152,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "5" ] + [ "${actual}" = "3" ] local actual=$(echo $object | yq -r '.key1' | tee /dev/stderr) [ "${actual}" = "value1" ] @@ -908,7 +1174,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "5" ] + [ "${actual}" = "3" ] local actual=$(echo $object | yq -r '.key1' | tee /dev/stderr) [ "${actual}" = "value1" ] @@ -931,7 +1197,7 @@ key2: value2' \ yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) local actual=$(echo $object | yq '. | length' | tee /dev/stderr) - [ "${actual}" = "6" ] + [ "${actual}" = "4" ] local actual=$(echo $object | yq -r '.defaultkey' | tee /dev/stderr) [ "${actual}" = "defaultvalue" ] @@ -944,23 +1210,87 @@ key2: value2' \ } #-------------------------------------------------------------------- -# consul namespaces +# terminating-gateway-init init container command -@test "terminatingGateways/Deployment: namespace annotation is not present by default" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container defaults" { cd `chart_dir` - local object=$(helm template \ + local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) - local actual=$(echo $object | yq -r 'any(contains("consul.hashicorp.com/gateway-namespace"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + exp=' +cat > /consul/service/service.hcl << EOF +service { + kind = "terminating-gateway" + name = "terminating-gateway" + id = "${POD_NAME}" + address = "${POD_IP}" + port = 8443 + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] } +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + [ "${actual}" = "${exp}" ] +} -@test "terminatingGateways/Deployment: consulNamespace is set as an annotation" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container with acls.manageSystemACLs=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'terminatingGateways.gateways[0].name=terminating' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp='consul-k8s-control-plane acl-init \ + -component-name=terminating-gateway/release-name-consul-terminating \ + -acl-auth-method=release-name-consul-k8s-component-auth-method \ + -token-sink-file=/consul/service/acl-token \ + -consul-api-timeout=5s \ + -log-level=info \ + -log-json=false + +cat > /consul/service/service.hcl << EOF +service { + kind = "terminating-gateway" + name = "terminating" + id = "${POD_NAME}" + address = "${POD_IP}" + port = 8443 + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + -token-file=/consul/service/acl-token \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +@test "terminatingGateways/Deployment: terminating-gateway-init init container gateway namespace can be specified through defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -969,12 +1299,35 @@ key2: value2' \ --set 'global.enableConsulNamespaces=true' \ --set 'terminatingGateways.defaults.consulNamespace=namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp=' +cat > /consul/service/service.hcl << EOF +service { + kind = "terminating-gateway" + name = "terminating-gateway" + id = "${POD_NAME}" + namespace = "namespace" + address = "${POD_IP}" + port = 8443 + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF - [ "${actual}" = "namespace" ] +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] } -@test "terminatingGateways/Deployment: consulNamespace is set as an annotation when set on the individual gateway" { +@test "terminatingGateways/Deployment: terminating-gateway-init init container gateway namespace can be specified through specific gateway overriding defaults" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -985,9 +1338,90 @@ key2: value2' \ --set 'terminatingGateways.gateways[0].name=terminating-gateway' \ --set 'terminatingGateways.gateways[0].consulNamespace=new-namespace' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.metadata.annotations."consul.hashicorp.com/gateway-namespace"' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.initContainers | map(select(.name == "terminating-gateway-init"))[0] | .command[2]' | tee /dev/stderr) + + exp=' +cat > /consul/service/service.hcl << EOF +service { + kind = "terminating-gateway" + name = "terminating-gateway" + id = "${POD_NAME}" + namespace = "new-namespace" + address = "${POD_IP}" + port = 8443 + checks = [ + { + name = "Terminating Gateway Listening" + interval = "10s" + tcp = "${POD_IP}:8443" + deregister_critical_service_after = "6h" + } + ] +} +EOF + +/consul-bin/consul services register \ + /consul/service/service.hcl' + + [ "${actual}" = "${exp}" ] +} + +#-------------------------------------------------------------------- +# namespaces + +@test "terminatingGateways/Deployment: namespace command flag is not present by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace"))' | tee /dev/stderr) + [ "${actual}" = "false" ] +} + +@test "terminatingGateways/Deployment: namespace command flag is specified through defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'terminatingGateways.defaults.consulNamespace=namespace' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-namespace=namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace=namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "terminatingGateways/Deployment: namespace command flag is specified through specific gateway overriding defaults" { + cd `chart_dir` + local object=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enableConsulNamespaces=true' \ + --set 'terminatingGateways.defaults.consulNamespace=namespace' \ + --set 'terminatingGateways.gateways[0].name=terminating-gateway' \ + --set 'terminatingGateways.gateways[0].consulNamespace=new-namespace' \ + . | tee /dev/stderr | + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-namespace=new-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] + - [ "${actual}" = "new-namespace" ] + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-namespace=new-namespace"))' | tee /dev/stderr) + [ "${actual}" = "true" ] } #-------------------------------------------------------------------- @@ -1000,9 +1434,12 @@ key2: value2' \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-partition"))' | tee /dev/stderr) + [ "${actual}" = "false" ] - local actual=$(echo $object | yq -r '. | any(contains("-partition"))' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-partition"))' | tee /dev/stderr) [ "${actual}" = "false" ] } @@ -1016,9 +1453,12 @@ key2: value2' \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=default' \ . | tee /dev/stderr | - yq -s -r '.[0].spec.template.spec.containers[0].args' | tee /dev/stderr) + yq -s -r '.[0].spec.template.spec.containers[0]' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) + [ "${actual}" = "true" ] - local actual=$(echo $object | yq -r '. | any(contains("-partition=default"))' | tee /dev/stderr) + local actual=$(echo $object | yq -r '.lifecycle.preStop.exec.command | any(contains("-partition=default"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -1065,16 +1505,49 @@ key2: value2' \ [ "${actual}" = "false" ] } +#-------------------------------------------------------------------- +# get-auto-encrypt-client-ca + +@test "terminatingGateways/Deployment: get-auto-encrypt-client-ca uses server's stateful set address by default and passes ca cert" { + cd `chart_dir` + local command=$(helm template \ + -s templates/terminating-gateways-deployment.yaml \ + --set 'terminatingGateways.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'terminatingGateways.gateways[0].name=gateway1' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[] | select(.name == "get-auto-encrypt-client-ca").command | join(" ")' | tee /dev/stderr) + + # check server address + actual=$(echo $command | jq ' . | contains("-server-addr=release-name-consul-server")') + [ "${actual}" = "true" ] + + # check server port + actual=$(echo $command | jq ' . | contains("-server-port=8501")') + [ "${actual}" = "true" ] + + # check server's CA cert + actual=$(echo $command | jq ' . | contains("-ca-file=/consul/tls/ca/tls.crt")') + [ "${actual}" = "true" ] + + # check consul-api-timeout + actual=$(echo $command | jq ' . | contains("-consul-api-timeout=5s")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault -@test "terminatingGateways/Deployment: configures server CA to come from vault when vault is enabled" { +@test "terminatingGateway/Deployment: configures server CA to come from vault when vault is enabled" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ @@ -1098,30 +1571,16 @@ key2: value2' \ local actual=$(echo $object | jq -r '.metadata.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr) [ "${actual}" = $'{{- with secret \"foo\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' ] - - actual=$(echo $object | jq -r '.spec.volumes[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $object | jq -r '.spec.containers[0].volumeMounts[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $object | jq -r '.spec.initContainers[0].volumeMounts[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) - [ "${actual}" = "" ] - - actual=$(echo $object | jq -r '.spec.initContainers[0].env[] | select(.name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) - [ "${actual}" = "/vault/secrets/serverca.crt" ] - - actual=$(echo $object | jq -r '.spec.containers[0].args | any(contains("-ca-certs=/vault/secrets/serverca.crt"))' | tee /dev/stderr) - [ "${actual}" = "true" ] } -@test "terminatingGateways/Deployment: vault CA is not configured by default" { +@test "terminatingGateway/Deployment: vault CA is not configured by default" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1135,13 +1594,14 @@ key2: value2' \ [ "${actual}" = "false" ] } -@test "terminatingGateways/Deployment: vault CA is not configured when secretName is set but secretKey is not" { +@test "terminatingGateway/Deployment: vault CA is not configured when secretName is set but secretKey is not" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1156,13 +1616,14 @@ key2: value2' \ [ "${actual}" = "false" ] } -@test "terminatingGateways/Deployment: vault CA is not configured when secretKey is set but secretName is not" { +@test "terminatingGateway/Deployment: vault CA is not configured when secretKey is set but secretName is not" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1177,13 +1638,14 @@ key2: value2' \ [ "${actual}" = "false" ] } -@test "terminatingGateways/Deployment: vault CA is configured when both secretName and secretKey are set" { +@test "terminatingGateway/Deployment: vault CA is configured when both secretName and secretKey are set" { cd `chart_dir` local object=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ @@ -1202,7 +1664,7 @@ key2: value2' \ #-------------------------------------------------------------------- # Vault agent annotations -@test "terminatingGateways/Deployment: no vault agent annotations defined by default" { +@test "terminatingGateway/Deployment: no vault agent annotations defined by default" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ @@ -1214,17 +1676,18 @@ key2: value2' \ --set 'global.tls.caCert.secretName=foo' \ --set 'global.secretsBackend.vault.consulCARole=carole' \ . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role") | del(."consul.hashicorp.com/gateway-consul-service-name") | del(."consul.hashicorp.com/gateway-kind")' | tee /dev/stderr) + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) [ "${actual}" = "{}" ] } -@test "terminatingGateways/Deployment: vault agent annotations can be set" { +@test "terminatingGateway/Deployment: vault agent annotations can be set" { cd `chart_dir` local actual=$(helm template \ -s templates/terminating-gateways-deployment.yaml \ --set 'terminatingGateways.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -1236,228 +1699,6 @@ key2: value2' \ [ "${actual}" = "bar" ] } -#-------------------------------------------------------------------- -# global.cloud - -@test "terminatingGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.clientId.secretName is not set but global.cloud.clientSecret.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientSecret.secretName=client-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=client-resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=client-resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.clientSecret.secretName is not set but global.cloud.clientId.secretName and global.cloud.resourceId.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.enabled is true and global.cloud.resourceId.secretName is not set but global.cloud.clientId.secretName and global.cloud.clientSecret.secretName is set" { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When global.cloud.enabled is true, global.cloud.resourceId.secretName, global.cloud.clientId.secretName, and global.cloud.clientSecret.secretName must also be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.resourceId.secretName is set but global.cloud.resourceId.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - . - [ "$status" -eq 1 ] - [[ "$output" =~ "When either global.cloud.resourceId.secretName or global.cloud.resourceId.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.authURL.secretName is set but global.cloud.authURL.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.authURL.secretKey is set but global.cloud.authURL.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.authUrl.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.authUrl.secretName or global.cloud.authUrl.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.apiHost.secretName is set but global.cloud.apiHost.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretName=auth-url-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.apiHost.secretKey is set but global.cloud.apiHost.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/mesh-gateway-deployment.yaml \ - --set 'connectInject.enabled=true' \ - --set 'meshGateway.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.apiHost.secretKey=auth-url-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.apiHost.secretName or global.cloud.apiHost.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.scadaAddress.secretName is set but global.cloud.scadaAddress.secretKey is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretName=scada-address-name' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: fails when global.cloud.scadaAddress.secretKey is set but global.cloud.scadaAddress.secretName is not set." { - cd `chart_dir` - run helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - --set 'global.cloud.scadaAddress.secretKey=scada-address-key' \ - . - [ "$status" -eq 1 ] - - [[ "$output" =~ "When either global.cloud.scadaAddress.secretName or global.cloud.scadaAddress.secretKey is defined, both must be set." ]] -} - -@test "terminatingGateways/Deployment: sets TLS server name if global.cloud.enabled is set" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].args | any(contains("-tls-server-name=server.dc1.consul"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "terminatingGateways/Deployment: can provide a TLS server name for the sidecar-injector when global.cloud.enabled is set" { - cd `chart_dir` - local env=$(helm template \ - -s templates/terminating-gateways-deployment.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.cloud.enabled=true' \ - --set 'global.cloud.clientId.secretName=client-id-name' \ - --set 'global.cloud.clientId.secretKey=client-id-key' \ - --set 'global.cloud.clientSecret.secretName=client-secret-id-name' \ - --set 'global.cloud.clientSecret.secretKey=client-secret-id-key' \ - --set 'global.cloud.resourceId.secretName=resource-id-name' \ - --set 'global.cloud.resourceId.secretKey=resource-id-key' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0].env[]' | tee /dev/stderr) - - local actual=$(echo "$env" | - jq -r '. | select( .name == "CONSUL_TLS_SERVER_NAME").value' | tee /dev/stderr) - [ "${actual}" = "server.dc1.consul" ] -} - #-------------------------------------------------------------------- # extraLabels diff --git a/charts/consul/test/unit/terminating-gateways-service.bats b/charts/consul/test/unit/terminating-gateways-service.bats deleted file mode 100644 index d831e512e6..0000000000 --- a/charts/consul/test/unit/terminating-gateways-service.bats +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "terminatingGateways/Service: disabled by default" { - cd `chart_dir` - assert_empty helm template \ - -s templates/terminating-gateways-service.yaml \ - . -} - -@test "terminatingGateways/Service: enabled with terminatingGateways and connectInject enabled" { - cd `chart_dir` - local object=$(helm template \ - -s templates/terminating-gateways-service.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - . | tee /dev/stderr | - yq -s '.[0]' | tee /dev/stderr) - - local actual=$(echo $object | yq '. | length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# multiple gateways - -@test "terminatingGateways/Service: multiple gateways" { - cd `chart_dir` - local object=$(helm template \ - -s templates/terminating-gateways-service.yaml \ - --set 'terminatingGateways.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'terminatingGateways.gateways[0].name=gateway1' \ - --set 'terminatingGateways.gateways[1].name=gateway2' \ - . | tee /dev/stderr | - yq -s -r '.' | tee /dev/stderr) - - local actual=$(echo $object | yq -r '.[0].metadata.name' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-gateway1" ] - - local actual=$(echo $object | yq -r '.[1].metadata.name' | tee /dev/stderr) - [ "${actual}" = "release-name-consul-gateway2" ] - - local actual=$(echo "$object" | - yq -r '.[2] | length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - diff --git a/charts/consul/test/unit/webhook-cert-manager-clusterrole.bats b/charts/consul/test/unit/webhook-cert-manager-clusterrole.bats index 4d1a4abdd2..5f7a03c319 100644 --- a/charts/consul/test/unit/webhook-cert-manager-clusterrole.bats +++ b/charts/consul/test/unit/webhook-cert-manager-clusterrole.bats @@ -2,19 +2,38 @@ load _helpers -@test "webhookCertManager/ClusterRole: enabled by default" { +@test "webhookCertManager/ClusterRole: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/webhook-cert-manager-clusterrole.yaml \ + . +} + +@test "webhookCertManager/ClusterRole: enabled with controller.enabled=true and connectInject.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/webhook-cert-manager-clusterrole.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "webhookCertManager/ClusterRole: enabled with connectInject.enabled=true and controller.enabled=false" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-clusterrole.yaml \ + --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "webhookCertManager/ClusterRole: enabled with connectInject.enabled=true" { +@test "webhookCertManager/ClusterRole: enabled with connectInject.enabled=true and controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-clusterrole.yaml \ + --set 'controller.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -28,7 +47,7 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/webhook-cert-manager-clusterrole.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | yq -r '.rules[0]' | tee /dev/stderr) @@ -64,7 +83,7 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/webhook-cert-manager-clusterrole.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | yq -r '.rules[1]' | tee /dev/stderr) @@ -91,7 +110,7 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/webhook-cert-manager-clusterrole.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | yq -r '.rules[2]' | tee /dev/stderr) @@ -115,7 +134,7 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/webhook-cert-manager-clusterrole.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ --set 'global.enablePodSecurityPolicies=true' \ . | tee /dev/stderr | yq -r '.rules[3]' | tee /dev/stderr) @@ -130,11 +149,11 @@ load _helpers #-------------------------------------------------------------------- # Vault -@test "webhookCertManager/ClusterRole: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, and global.secretsBackend.vault.connectInject.caCert.secretName" { +@test "webhookCertManager/ClusterRole: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and .global.secretsBackend.vault.controller.caCert.secretName" { cd `chart_dir` assert_empty helm template \ -s templates/webhook-cert-manager-clusterrole.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -142,6 +161,9 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ diff --git a/charts/consul/test/unit/webhook-cert-manager-clusterrolebinding.bats b/charts/consul/test/unit/webhook-cert-manager-clusterrolebinding.bats index 2e507d279d..ffabf41ee7 100644 --- a/charts/consul/test/unit/webhook-cert-manager-clusterrolebinding.bats +++ b/charts/consul/test/unit/webhook-cert-manager-clusterrolebinding.bats @@ -2,19 +2,38 @@ load _helpers -@test "webhookCertManager/ClusterRoleBinding: enabled by default" { +@test "webhookCertManager/ClusterRoleBinding: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/webhook-cert-manager-clusterrolebinding.yaml \ + . +} + +@test "webhookCertManager/ClusterRoleBinding: enabled with controller.enabled=true and connectInject.enabled=false" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/webhook-cert-manager-clusterrolebinding.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "webhookCertManager/ClusterRoleBinding: enabled with connectInject.enabled=true and controller.enabled=false" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-clusterrolebinding.yaml \ + --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "webhookCertManager/ClusterRoleBinding: enabled with connectInject.enabled=true" { +@test "webhookCertManager/ClusterRoleBinding: enabled with connectInject.enabled=true and controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-clusterrolebinding.yaml \ + --set 'controller.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) @@ -24,11 +43,11 @@ load _helpers #-------------------------------------------------------------------- # Vault -@test "webhookCertManager/ClusterRoleBinding: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, and global.secretsBackend.vault.connectInject.caCert.secretName" { +@test "webhookCertManager/ClusterRoleBinding: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and .global.secretsBackend.vault.controller.caCert.secretName" { cd `chart_dir` assert_empty helm template \ -s templates/webhook-cert-manager-clusterrolebinding.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -36,6 +55,9 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ diff --git a/charts/consul/test/unit/webhook-cert-manager-configmap.bats b/charts/consul/test/unit/webhook-cert-manager-configmap.bats index 196da220d4..7d7262b9af 100644 --- a/charts/consul/test/unit/webhook-cert-manager-configmap.bats +++ b/charts/consul/test/unit/webhook-cert-manager-configmap.bats @@ -2,33 +2,104 @@ load _helpers -@test "webhookCertManager/Configmap: enabled by default" { +@test "webhookCertManager/Configmap: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/webhook-cert-manager-configmap.yaml \ + . +} + +@test "webhookCertManager/Configmap: enabled with controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/webhook-cert-manager-configmap.yaml \ + --set 'controller.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "webhookCertManager/Configmap: enabled with connectInject.enabled=true and controller.enabled=false" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-configmap.yaml \ + --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "webhookCertManager/Configmap: enabled with connectInject.enabled=true" { +@test "webhookCertManager/Configmap: enabled with connectInject.enabled=true and controller.enabled=true" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-configmap.yaml \ + --set 'controller.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } +@test "webhookCertManager/Configmap: configuration has only controller webhook with controller.enabled=true" { + cd `chart_dir` + local cfg=$(helm template \ + -s templates/webhook-cert-manager-configmap.yaml \ + --set 'controller.enabled=true' \ + --set 'connectInject.enabled=false' \ + . | tee /dev/stderr | + yq -r '.data["webhook-config.json"]' | tee /dev/stderr) + + local actual=$(echo $cfg | jq '. | length == 1') + [ "${actual}" = "true" ] + + local actual=$(echo $cfg | jq '.[0].name | contains("controller")') + [ "${actual}" = "true" ] +} + +@test "webhookCertManager/Configmap: configuration has only controller webhook with connectInject.enabled=true" { + cd `chart_dir` + local cfg=$(helm template \ + -s templates/webhook-cert-manager-configmap.yaml \ + --set 'controller.enabled=false' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.data["webhook-config.json"]' | tee /dev/stderr) + + local actual=$(echo $cfg | jq '. | length == 1') + [ "${actual}" = "true" ] + + local actual=$(echo $cfg | jq '.[0].name | contains("controller")') + [ "${actual}" = "false" ] +} + +@test "webhookCertManager/Configmap: configuration contains both controller and connectInject webhook with connectInject.enabled=true and controller.enabled=true" { + cd `chart_dir` + local cfg=$(helm template \ + -s templates/webhook-cert-manager-configmap.yaml \ + --set 'controller.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq -r '.data["webhook-config.json"]' | tee /dev/stderr) + + + local actual=$(echo $cfg | jq '. | length == 2') + [ "${actual}" = "true" ] + + local actual=$(echo $cfg | jq '.[0].name | contains("connect-injector")') + [ "${actual}" = "true" ] + + local actual=$(echo $cfg | jq '.[1].name | contains("controller")') + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # Vault -@test "webhookCertManager/Configmap: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, and global.secretsBackend.vault.connectInject.caCert.secretName" { +@test "webhookCertManager/Configmap: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and .global.secretsBackend.vault.controller.caCert.secretName" { cd `chart_dir` assert_empty helm template \ -s templates/webhook-cert-manager-configmap.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -36,6 +107,9 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ diff --git a/charts/consul/test/unit/webhook-cert-manager-deployment.bats b/charts/consul/test/unit/webhook-cert-manager-deployment.bats index 7d1a028d20..cda0b25d68 100644 --- a/charts/consul/test/unit/webhook-cert-manager-deployment.bats +++ b/charts/consul/test/unit/webhook-cert-manager-deployment.bats @@ -2,16 +2,24 @@ load _helpers -@test "webhookCertManager/Deployment: enabled by default" { +@test "webhookCertManager/Deployment: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/webhook-cert-manager-deployment.yaml \ + . +} + +@test "webhookCertManager/Deployment: enabled with controller.enabled=true and connectInject.enabled=false" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "webhookCertManager/Deployment: enabled with connectInject.enabled=true" { +@test "webhookCertManager/Deployment: enabled with connectInject.enabled=true and controller.enabled=false" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ @@ -21,10 +29,22 @@ load _helpers [ "${actual}" = "true" ] } +@test "webhookCertManager/Deployment: enabled with connectInject.enabled=true and controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + @test "webhookCertManager/Deployment: no tolerations by default" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.tolerations' | tee /dev/stderr) @@ -35,6 +55,7 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'webhookCertManager.tolerations=- key: value' \ . | tee /dev/stderr | @@ -46,6 +67,7 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) @@ -56,6 +78,7 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ --set 'connectInject.enabled=true' \ --set 'webhookCertManager.nodeSelector=- key: value' \ . | tee /dev/stderr | @@ -66,11 +89,11 @@ load _helpers #-------------------------------------------------------------------- # Vault -@test "webhookCertManager/Deployment: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, and global.secretsBackend.vault.connectInject.caCert.secretName" { +@test "webhookCertManager/Deployment: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and .global.secretsBackend.vault.controller.caCert.secretName" { cd `chart_dir` assert_empty helm template \ -s templates/webhook-cert-manager-deployment.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -78,6 +101,9 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ @@ -91,6 +117,8 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'connectInject.enabled=true' \ . | tee /dev/stderr | yq -r '.spec.template.metadata.labels | del(."app") | del(."chart") | del(."release") | del(."component") | del(."heritage")' | tee /dev/stderr) [ "${actual}" = "{}" ] @@ -100,6 +128,8 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'connectInject.enabled=true' \ --set 'global.extraLabels.foo=bar' \ . | tee /dev/stderr) local actualBar=$(echo "${actual}" | yq -r '.metadata.labels.foo' | tee /dev/stderr) @@ -112,6 +142,8 @@ load _helpers cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-deployment.yaml \ + --set 'controller.enabled=true' \ + --set 'connectInject.enabled=true' \ --set 'global.extraLabels.foo=bar' \ --set 'global.extraLabels.baz=qux' \ . | tee /dev/stderr) diff --git a/charts/consul/test/unit/webhook-cert-manager-podsecuritypolicy.bats b/charts/consul/test/unit/webhook-cert-manager-podsecuritypolicy.bats index d8e16f867c..00bb55a2d4 100644 --- a/charts/consul/test/unit/webhook-cert-manager-podsecuritypolicy.bats +++ b/charts/consul/test/unit/webhook-cert-manager-podsecuritypolicy.bats @@ -2,25 +2,33 @@ load _helpers -@test "webhookCertManager/PodSecurityPolicy: disabled by default with connect disabled" { +@test "webhookCertManager/PodSecurityPolicy: disabled by default" { cd `chart_dir` assert_empty helm template \ -s templates/webhook-cert-manager-podsecuritypolicy.yaml \ - --set 'connectInject.enabled=false' \ - --set 'global.enablePodSecurityPolicies=true' \ . } -@test "webhookCertManager/PodSecurityPolicy: disabled by default with PSP disabled" { +@test "webhookCertManager/PodSecurityPolicy: disabled by default with controller enabled" { cd `chart_dir` assert_empty helm template \ -s templates/webhook-cert-manager-podsecuritypolicy.yaml \ - --set 'connectInject.enabled=true' \ - --set 'global.enablePodSecurityPolicies=false' \ + --set 'controller.enabled=true' \ . } -@test "webhookCertManager/PodSecurityPolicy: enabled with connectInject.enabled=true and global.enablePodSecurityPolicies=true" { +@test "webhookCertManager/PodSecurityPolicy: enabled with controller.enabled=true, connectInject.enabled=false and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/webhook-cert-manager-podsecuritypolicy.yaml \ + --set 'controller.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq -s 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + +@test "webhookCertManager/PodSecurityPolicy: enabled with connectInject.enabled=true, controller.enabled=false and global.enablePodSecurityPolicies=true" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-podsecuritypolicy.yaml \ @@ -31,16 +39,27 @@ load _helpers [ "${actual}" = "true" ] } +@test "webhookCertManager/PodSecurityPolicy: enabled with connectInject.enabled=true, controller.enabled=true and global.enablePodSecurityPolicies=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/webhook-cert-manager-podsecuritypolicy.yaml \ + --set 'controller.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.enablePodSecurityPolicies=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} #-------------------------------------------------------------------- # Vault -@test "webhookCertManager/PodSecurityPolicy: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, and global.secretsBackend.vault.connectInject.caCert.secretName" { +@test "webhookCertManager/PodSecurityPolicy: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and .global.secretsBackend.vault.controller.caCert.secretName" { cd `chart_dir` assert_empty helm template \ -s templates/webhook-cert-manager-podsecuritypolicy.yaml \ --set 'global.enablePodSecurityPolicies=true' \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -48,6 +67,9 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ diff --git a/charts/consul/test/unit/webhook-cert-manager-serviceaccount.bats b/charts/consul/test/unit/webhook-cert-manager-serviceaccount.bats index f420e7319c..e4307c9409 100644 --- a/charts/consul/test/unit/webhook-cert-manager-serviceaccount.bats +++ b/charts/consul/test/unit/webhook-cert-manager-serviceaccount.bats @@ -2,16 +2,24 @@ load _helpers -@test "webhookCertManager/ServiceAccount: enabled by default" { +@test "webhookCertManager/ServiceAccount: disabled by default" { + cd `chart_dir` + assert_empty helm template \ + -s templates/webhook-cert-manager-serviceaccount.yaml \ + . +} + +@test "webhookCertManager/ServiceAccount: enabled with controller.enabled=true and connectInject.enabled=false" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-serviceaccount.yaml \ + --set 'controller.enabled=true' \ . | tee /dev/stderr | yq 'length > 0' | tee /dev/stderr) [ "${actual}" = "true" ] } -@test "webhookCertManager/ServiceAccount: enabled with connectInject.enabled=true" { +@test "webhookCertManager/ServiceAccount: enabled with connectInject.enabled=true and controller.enabled=false" { cd `chart_dir` local actual=$(helm template \ -s templates/webhook-cert-manager-serviceaccount.yaml \ @@ -21,6 +29,17 @@ load _helpers [ "${actual}" = "true" ] } +@test "webhookCertManager/ServiceAccount: enabled with connectInject.enabled=true and controller.enabled=true" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/webhook-cert-manager-serviceaccount.yaml \ + --set 'controller.enabled=true' \ + --set 'connectInject.enabled=true' \ + . | tee /dev/stderr | + yq 'length > 0' | tee /dev/stderr) + [ "${actual}" = "true" ] +} + #-------------------------------------------------------------------- # global.imagePullSecrets @@ -28,7 +47,7 @@ load _helpers cd `chart_dir` local object=$(helm template \ -s templates/webhook-cert-manager-serviceaccount.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ --set 'global.imagePullSecrets[0].name=my-secret' \ --set 'global.imagePullSecrets[1].name=my-secret2' \ . | tee /dev/stderr) @@ -45,11 +64,11 @@ load _helpers #-------------------------------------------------------------------- # Vault -@test "webhookCertManager/ServiceAccount: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, and global.secretsBackend.vault.connectInject.caCert.secretName" { +@test "webhookCertManager/ServiceAccount: disabled when the following are configured - global.secretsBackend.vault.enabled, global.secretsBackend.vault.connectInjectRole, global.secretsBackend.vault.connectInject.tlsCert.secretName, global.secretsBackend.vault.connectInject.caCert.secretName, global.secretsBackend.vault.controllerRole, global.secretsBackend.vault.controller.tlsCert.secretName, and .global.secretsBackend.vault.controller.caCert.secretName" { cd `chart_dir` assert_empty helm template \ -s templates/webhook-cert-manager-serviceaccount.yaml \ - --set 'connectInject.enabled=true' \ + --set 'controller.enabled=true' \ --set 'global.secretsBackend.vault.enabled=true' \ --set 'global.secretsBackend.vault.consulClientRole=test' \ --set 'global.secretsBackend.vault.consulServerRole=foo' \ @@ -57,6 +76,9 @@ load _helpers --set 'global.secretsBackend.vault.connectInjectRole=inject-ca-role' \ --set 'global.secretsBackend.vault.connectInject.tlsCert.secretName=pki/issue/connect-webhook-cert-dc1' \ --set 'global.secretsBackend.vault.connectInject.caCert.secretName=pki/issue/connect-webhook-cert-dc1' \ + --set 'global.secretsBackend.vault.controllerRole=test' \ + --set 'global.secretsBackend.vault.controller.caCert.secretName=foo/ca' \ + --set 'global.secretsBackend.vault.controller.tlsCert.secretName=foo/tls' \ --set 'global.secretsBackend.vault.consulClientRole=foo' \ --set 'global.secretsBackend.vault.consulServerRole=bar' \ --set 'global.secretsBackend.vault.consulCARole=test2' \ diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 63c2a63cbf..ed49bad0d6 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -29,11 +29,27 @@ global: # Consul into Kubernetes will have, e.g. `service-name.service.consul`. domain: consul - # Configures the Cluster Peering feature. Requires Consul v1.14+ and Consul-K8s v1.0.0+. + # [Experimental] Configures the Cluster Peering feature. Requires Consul v1.13+ and Consul-K8s v0.45+. peering: # If true, the Helm chart enables Cluster Peering for the cluster. This option enables peering controllers and # allows use of the PeeringAcceptor and PeeringDialer CRDs for establishing service mesh peerings. enabled: false + tokenGeneration: + serverAddresses: + # Source can be set to "","consul" or "static". + # + # "" is the default source. If servers are enabled, it will check if `server.exposeService` is enabled, and read + # the addresses from that service to use as the peering token server addresses. If using admin partitions and + # only Consul client agents are enabled, the addresses in `externalServers.hosts` and `externalServers.grpcPort` + # will be used. + # + # "consul" will use the Consul advertise addresses in the peering token. + # + # "static" will use the addresses specified in `global.peering.tokenGeneration.serverAddresses.static`. + source: "" + # Static addresses must be formatted "hostname|ip:port" where the port is the Consul server(s)' grpc port. + # @type: array + static: [] # [Enterprise Only] Enabling `adminPartitions` allows creation of Admin Partitions in Kubernetes clusters. # It additionally indicates that you are running Consul Enterprise v1.11+ with a valid Consul Enterprise @@ -50,6 +66,36 @@ global: # Must be "default" in the server cluster ie the Kubernetes cluster that the Consul server pods are deployed onto. name: "default" + # Partition service properties. + service: + type: LoadBalancer + # Optionally set the nodePort value of the partition service if using a NodePort service. + # If not set and using a NodePort service, Kubernetes will automatically assign + # a port. + nodePort: + + # RPC node port + # @type: integer + rpc: null + + # Serf node port + # @type: integer + serf: null + + # HTTPS node port + # @type: integer + https: null + + # Annotations to apply to the partition service. + # + # ```yaml + # annotations: | + # "annotation-key": "annotation-value" + # ``` + # + # @type: string + annotations: null + # The name (and tag) of the Consul Docker image for clients and servers. # This can be overridden per component. This should be pinned to a specific # version tag, otherwise you may inadvertently upgrade your Consul version. @@ -63,7 +109,7 @@ global: # image: "hashicorp/consul-enterprise:1.10.0-ent" # ``` # @default: hashicorp/consul: - image: "hashicorp/consul:1.14.4" + image: "hashicorp/consul:1.13.6" # Array of objects containing image pull secret names that will be applied to each service account. # This can be used to reference image pull secrets if using a custom consul or consul-k8s-control-plane Docker image. @@ -77,13 +123,13 @@ global: # - name: pull-secret-name-2 # ``` # @type: array - imagePullSecrets: [ ] + imagePullSecrets: [] # The name (and tag) of the consul-k8s-control-plane Docker # image that is used for functionality such as catalog sync. # This can be overridden per component. # @default: hashicorp/consul-k8s-control-plane: - imageK8S: docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.1.0-dev + imageK8S: hashicorp/consul-k8s-control-plane:0.49.5-dev # The name of the datacenter that the agents should # register as. This can't be changed once the Consul cluster is up and running @@ -138,6 +184,17 @@ global: # and check the name of `metadata.name`. consulClientRole: "" + # [Enterprise Only] The Vault role for the Consul client snapshot agent. + # The role must be connected to the Consul client snapshot agent's service account. + # The role must also have a policy with read capabilities for the snapshot agent config + # defined by the `client.snapshotAgent.configSecret.secretName` value. + # To discover the service account name of the Consul client, run + # ```shell-session + # $ helm template --show-only templates/client-snapshot-agent-serviceaccount.yaml --set client.snapshotAgent.enabled=true hashicorp/consul + # ``` + # and check the name of `metadata.name`. + consulSnapshotAgentRole: "" + # A Vault role for the Consul `server-acl-init` job, which manages setting ACLs so that clients and components can obtain ACL tokens. # The role must be connected to the `server-acl-init` job's service account. # The role must also have a policy with read and write capabilities for the bootstrap, replication or partition tokens @@ -160,9 +217,15 @@ global: # and check the name of `metadata.name`. adminPartitionsRole: "" + # The Vault role to read Consul controller's webhook's + # CA and issue a certificate and private key. + # A Vault policy must be created which grants issue capabilities to + # `global.secretsBackend.vault.controller.tlsCert.secretName`. + controllerRole: "" + # The Vault role to read Consul connect-injector webhook's CA # and issue a certificate and private key. - # A Vault policy must be created which grants issue capabilities to + # A Vault policy must be created which grants issue capabilities to # `global.secretsBackend.vault.connectInject.tlsCert.secretName`. connectInjectRole: "" @@ -171,7 +234,7 @@ global: # will be used only against the `pki/cert/ca` endpoint which is unauthenticated. A policy must be created which grants # read capabilities to `global.tls.caCert.secretName`, which is usually `pki/cert/ca`. consulCARole: "" - + # This value defines additional annotations for # Vault agent on any pods where it'll be running. # This should be formatted as a multi-line string. @@ -235,17 +298,36 @@ global: additionalConfig: | {} + controller: + # Configuration to the Vault Secret that Kubernetes uses on + # Kubernetes CRD creation, deletion, and update, to get TLS certificates + # used issued from vault to send webhooks to the controller. + tlsCert: + # The Vault secret path that issues TLS certificates for controller + # webhooks. + # @type: string + secretName: null + + # Configuration to the Vault Secret that Kubernetes uses on + # Kubernetes CRD creation, deletion, and update, to get CA certificates + # used issued from vault to send webhooks to the controller. + caCert: + # The Vault secret path that contains the CA certificate for controller + # webhooks. + # @type: string + secretName: null + connectInject: - # Configuration to the Vault Secret that Kubernetes uses on + # Configuration to the Vault Secret that Kubernetes uses on # Kubernetes pod creation, deletion, and update, to get CA certificates # used issued from vault to send webhooks to the ConnectInject. - caCert: + caCert: # The Vault secret path that contains the CA certificate for # Connect Inject webhooks. # @type: string secretName: null - - # Configuration to the Vault Secret that Kubernetes uses on + + # Configuration to the Vault Secret that Kubernetes uses on # Kubernetes pod creation, deletion, and update, to get TLS certificates # used issued from vault to send webhooks to the ConnectInject. tlsCert: @@ -291,7 +373,7 @@ global: # See https://www.consul.io/docs/agent/config/cli-flags#_recursor for more details. # If this is an empty array (the default), then Consul DNS will only resolve queries for the Consul top level domain (by default `.consul`). # @type: array - recursors: [ ] + recursors: [] # Enables TLS (https://learn.hashicorp.com/tutorials/consul/tls-encryption-secure) # across the cluster to verify authenticity of the Consul servers and clients. @@ -300,7 +382,6 @@ global: # If true, the Helm chart will enable TLS for Consul # servers and clients and all consul-k8s-control-plane components, as well as generate certificate # authority (optional) and server and client certificates. - # This setting is required for [Cluster Peering](https://developer.hashicorp.com/consul/docs/connect/cluster-peering/k8s). enabled: false # If true, turns on the auto-encrypt feature on clients and servers. @@ -312,13 +393,13 @@ global: # in the server certificate. This is useful when you need to access the # Consul server(s) externally, for example, if you're using the UI. # @type: array - serverAdditionalDNSSANs: [ ] + serverAdditionalDNSSANs: [] # A list of additional IP addresses to set as Subject Alternative Names (SANs) # in the server certificate. This is useful when you need to access the # Consul server(s) externally, for example, if you're using the UI. # @type: array - serverAdditionalIPSANs: [ ] + serverAdditionalIPSANs: [] # If true, `verify_outgoing`, `verify_server_hostname`, # and `verify_incoming` for internal RPC communication will be set to `true` for Consul servers and clients. @@ -494,7 +575,7 @@ global: # A list of addresses of the primary mesh gateways in the form `:`. # (e.g. ["1.1.1.1:443", "2.3.4.5:443"] # @type: array - primaryGateways: [ ] + primaryGateways: [] # If you are setting `global.federation.enabled` to true and are in a secondary datacenter, # set `k8sAuthMethodHost` to the address of the Kubernetes API server of the secondary datacenter. @@ -539,10 +620,33 @@ global: # @type: boolean enableGatewayMetrics: true - # The name (and tag) of the consul-dataplane Docker image used for the + # For connect-injected pods, the consul sidecar is responsible for metrics merging. For ingress/mesh/terminating + # gateways, it additionally ensures the Consul services are always registered with their local Consul client. + # @type: map + consulSidecarContainer: + # Set default resources for consul sidecar. If null, that resource won't + # be set. + # These settings can be overridden on a per-pod basis via these annotations: + # + # - `consul.hashicorp.com/consul-sidecar-cpu-limit` + # - `consul.hashicorp.com/consul-sidecar-cpu-request` + # - `consul.hashicorp.com/consul-sidecar-memory-limit` + # - `consul.hashicorp.com/consul-sidecar-memory-request` + # @recurse: false + # @type: map + resources: + requests: + memory: "25Mi" + cpu: "20m" + limits: + memory: "50Mi" + cpu: "20m" + + # The name (and tag) of the Envoy Docker image used for the # connect-injected sidecar proxies and mesh, terminating, and ingress gateways. - # @default: hashicorp/consul-dataplane: - imageConsulDataplane: "hashicorp/consul-dataplane:1.0.1" + # See https://www.consul.io/docs/connect/proxies/envoy for full compatibility matrix between Consul and Envoy. + # @default: envoyproxy/envoy-alpine: + imageEnvoy: "envoyproxy/envoy:v1.23.1" # Configuration for running this Helm chart on the Red Hat OpenShift platform. # This Helm chart currently supports OpenShift v4.x+. @@ -551,77 +655,10 @@ global: # its components on OpenShift. enabled: false - # The time in seconds that the consul API client will wait for a response from + # The time in seconds that the consul API client waits for a response from # the API before cancelling the request. consulAPITimeout: 5s - # Enables installing an HCP Consul self-managed cluster. - # Requires Consul v1.14+. - cloud: - # If true, the Helm chart will enable the installation of an HCP Consul - # self-managed cluster. - enabled: false - - # The name of the Kubernetes secret that holds the HCP resource id. - # This is required when global.cloud.enabled is true. - resourceId: - # The name of the Kubernetes secret that holds the resource id. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the resource id. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud client id. - # This is required when global.cloud.enabled is true. - clientId: - # The name of the Kubernetes secret that holds the client id. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the client id. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud client secret. - # This is required when global.cloud.enabled is true. - clientSecret: - # The name of the Kubernetes secret that holds the client secret. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the client secret. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud client id. - # This is optional when global.cloud.enabled is true. - apiHost: - # The name of the Kubernetes secret that holds the api hostname. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the api hostname. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud authorization url. - # This is optional when global.cloud.enabled is true. - authUrl: - # The name of the Kubernetes secret that holds the authorization url. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the authorization url. - # @type: string - secretKey: null - - # The name of the Kubernetes secret that holds the HCP cloud scada address. - # This is optional when global.cloud.enabled is true. - scadaAddress: - # The name of the Kubernetes secret that holds the scada address. - # @type: string - secretName: null - # The key within the Kubernetes secret that holds the scada address. - # @type: string - secretKey: null - # Extra labels to attach to all pods, deployments, daemonsets, statefulsets, and jobs. This should be a YAML map. # # Example: @@ -655,7 +692,7 @@ server: # The number of server agents to run. This determines the fault tolerance of # the cluster. Please see the deployment table (https://consul.io/docs/internals/consensus#deployment-table) # for more information. - replicas: 1 + replicas: 3 # The number of servers that are expected to be running. # It defaults to server.replicas. @@ -694,7 +731,7 @@ server: # # Vault Secrets backend: # If you are using Vault as a secrets backend, a Vault Policy must be created which allows `["create", "update"]` - # capabilities on the PKI issuing endpoint, which is usually of the form `pki/issue/consul-server`. + # capabilities on the PKI issuing endpoint, which is usually of the form `pki/issue/consul-server`. # Please see the following guide for steps to generate a compatible certificate: # https://learn.hashicorp.com/tutorials/consul/vault-pki-consul-secure-tls # Note: when using TLS, both the `server.serverCert` and `global.tls.caCert` which points to the CA endpoint of this PKI engine @@ -734,13 +771,13 @@ server: # The StorageClass to use for the servers' StatefulSet storage. It must be # able to be dynamically provisioned if you want the storage - # to be automatically created. For example, to use + # to be automatically created. For example, to use # local(https://kubernetes.io/docs/concepts/storage/storage-classes/#local) # storage classes, the PersistentVolumeClaims would need to be manually created. # A `null` value will use the Kubernetes cluster's default StorageClass. If a default # StorageClass does not exist, you will need to create one. - # Refer to the [Read/Write Tuning](https://www.consul.io/docs/install/performance#read-write-tuning) - # section of the Server Performance Requirements documentation for considerations + # Refer to the [Read/Write Tuning](https://developer.hashicorp.com/consul/docs/install/performance#read-write-tuning) + # section of the Server Performance Requirements documentation for considerations # around choosing a performant storage class. # # ~> **Note:** The [Reference Architecture](https://learn.hashicorp.com/tutorials/consul/reference-architecture#hardware-sizing-for-consul-servers) @@ -893,7 +930,7 @@ server: # with `-config-dir`. This defaults to false. # # @type: array - extraVolumes: [ ] + extraVolumes: [] # A list of sidecar containers. # Example: @@ -906,7 +943,7 @@ server: # - ... # ``` # @type: array - extraContainers: [ ] + extraContainers: [] # This value defines the affinity (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) # for server pods. It defaults to allowing only a single server pod on each node, which @@ -1060,57 +1097,7 @@ server: # feature, in case kubernetes cluster is behind egress http proxies. Additionally, # it could be used to configure custom consul parameters. # @type: map - extraEnvironmentVars: { } - - # [Enterprise Only] Values for setting up and running snapshot agents - # (https://consul.io/commands/snapshot/agent) - # within the Consul clusters. They run as a sidecar with Consul servers. - snapshotAgent: - # If true, the chart will install resources necessary to run the snapshot agent. - enabled: false - - # Interval at which to perform snapshots. - # See https://www.consul.io/commands/snapshot/agent#interval - # @type: string - interval: 1h - - # A Kubernetes or Vault secret that should be manually created to contain the entire - # config to be used on the snapshot agent. - # This is the preferred method of configuration since there are usually storage - # credentials present. Please see Snapshot agent config (https://consul.io/commands/snapshot/agent#config-file-options) - # for details. - configSecret: - # The name of the Kubernetes secret or Vault secret path that holds the snapshot agent config. - # @type: string - secretName: null - # The key within the Kubernetes secret or Vault secret key that holds the snapshot agent config. - # @type: string - secretKey: null - - # The resource settings for snapshot agent pods. - # @recurse: false - # @type: map - resources: - requests: - memory: "50Mi" - cpu: "50m" - limits: - memory: "50Mi" - cpu: "50m" - - # Optional PEM-encoded CA certificate that will be added to the trusted system CAs. - # Useful if using an S3-compatible storage exposing a self-signed certificate. - # - # Example: - # - # ```yaml - # caCert: | - # -----BEGIN CERTIFICATE----- - # MIIC7jCCApSgAwIBAgIRAIq2zQEVexqxvtxP6J0bXAwwCgYIKoZIzj0EAwIwgbkx - # ... - # ``` - # @type: string - caCert: null + extraEnvironmentVars: {} # Configuration for Consul servers when the servers are running outside of Kubernetes. # When running external servers, configuring these values is recommended @@ -1123,21 +1110,20 @@ externalServers: # An array of external Consul server hosts that are used to make # HTTPS connections from the components in this Helm chart. - # Valid values include an IP, a DNS name, or an [exec=](https://github.com/hashicorp/go-netaddrs) string. + # Valid values include IPs, DNS names, or Cloud auto-join string. # The port must be provided separately below. - # Note: This slice can only contain a single element. - # Note: If enabling clients, `client.join` must also be set to the hosts that should be + # Note: `client.join` must also be set to the hosts that should be # used to join the cluster. In most cases, the `client.join` values # should be the same, however, they may be different if you # wish to use separate hosts for the HTTPS connections. # @type: array - hosts: [ ] + hosts: [] # The HTTPS port of the Consul servers. httpsPort: 8501 # The GRPC port of the Consul servers. - grpcPort: 8502 + grpcPort: 8503 # The server name to use as the SNI host header when connecting with HTTPS. # @type: string @@ -1167,17 +1153,14 @@ externalServers: # @type: string k8sAuthMethodHost: null - # If true, setting this prevents the consul-dataplane and consul-k8s components from watching the Consul servers for changes. This is - # useful for situations where Consul servers are behind a load balancer. - skipServerWatch: false - # Values that configure running a Consul client on Kubernetes nodes. client: # If true, the chart will install all # the resources necessary for a Consul client on every Kubernetes node. This _does not_ require # `server.enabled`, since the agents can be configured to join an external cluster. + # @default: global.enabled # @type: boolean - enabled: false + enabled: "-" # The name of the Docker image (including any tag) for the containers # running Consul client agents. @@ -1329,7 +1312,7 @@ client: # with `-config-dir`. This defaults to false. # # @type: array - extraVolumes: [ ] + extraVolumes: [] # A list of sidecar containers. # Example: @@ -1342,7 +1325,7 @@ client: # - ... # ``` # @type: array - extraContainers: [ ] + extraContainers: [] # Toleration Settings for Client pods # This should be a multi-line string matching the Toleration array @@ -1420,7 +1403,7 @@ client: # feature, in case kubernetes cluster is behind egress http proxies. Additionally, # it could be used to configure custom consul parameters. # @type: map - extraEnvironmentVars: { } + extraEnvironmentVars: {} # This value defines the Pod DNS policy (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) # for client pods to use. @@ -1450,6 +1433,73 @@ client: # @type: string updateStrategy: null + # [Enterprise Only] Values for setting up and running snapshot agents + # (https://consul.io/commands/snapshot/agent) + # within the Consul clusters. They are required to be co-located with Consul clients, + # so will inherit the clients' nodeSelector, tolerations and affinity. + snapshotAgent: + # If true, the chart will install resources necessary to run the snapshot agent. + enabled: false + + # The number of snapshot agents to run. + replicas: 2 + + # Interval at which to perform snapshots. + # See https://www.consul.io/commands/snapshot/agent#interval + # @type: string + interval: 1h + + # A Kubernetes or Vault secret that should be manually created to contain the entire + # config to be used on the snapshot agent. + # This is the preferred method of configuration since there are usually storage + # credentials present. Please see Snapshot agent config (https://consul.io/commands/snapshot/agent#config-file-options) + # for details. + configSecret: + # The name of the Kubernetes secret or Vault secret path that holds the snapshot agent config. + # @type: string + secretName: null + # The key within the Kubernetes secret or Vault secret key that holds the snapshot agent config. + # @type: string + secretKey: null + + serviceAccount: + # This value defines additional annotations for the snapshot agent service account. This should be formatted as a + # multi-line string. + # + # ```yaml + # annotations: | + # "sample/annotation1": "foo" + # "sample/annotation2": "bar" + # ``` + # + # @type: string + annotations: null + + # The resource settings for snapshot agent pods. + # @recurse: false + # @type: map + resources: + requests: + memory: "50Mi" + cpu: "50m" + limits: + memory: "50Mi" + cpu: "50m" + + # Optional PEM-encoded CA certificate that will be added to the trusted system CAs. + # Useful if using an S3-compatible storage exposing a self-signed certificate. + # + # Example: + # + # ```yaml + # caCert: | + # -----BEGIN CERTIFICATE----- + # MIIC7jCCApSgAwIBAgIRAIq2zQEVexqxvtxP6J0bXAwwCgYIKoZIzj0EAwIwgbkx + # ... + # ``` + # @type: string + caCert: null + # Configuration for DNS configuration within the Kubernetes cluster. # This creates a service that routes to all agents (client or server) # for serving DNS requests. This DOES NOT automatically configure kube-dns @@ -1464,7 +1514,7 @@ dns: # for default DNS resolution. The DNS lookups fall back to the nameserver IPs # listed in /etc/resolv.conf if not found in Consul. # @type: boolean - enableRedirection: "-" + enableRedirection: false # Used to control the type of service created. For # example, setting this to "LoadBalancer" will create an external load @@ -1577,7 +1627,7 @@ ui: # ``` # # @type: array - hosts: [ ] + hosts: [] # tls is a list of hosts and secret name in an Ingress # which tells the Ingress controller to secure the channel. @@ -1589,7 +1639,7 @@ ui: # secretName: testsecret-tls # ``` # @type: array - tls: [ ] + tls: [] # Annotations to apply to the UI ingress. # @@ -1679,7 +1729,7 @@ syncCatalog: # # Note: `k8sDenyNamespaces` takes precedence over values defined here. # @type: array - k8sAllowNamespaces: [ "*" ] + k8sAllowNamespaces: ["*"] # List of k8s namespaces that should not have their # services synced. This list takes precedence over `k8sAllowNamespaces`. @@ -1689,7 +1739,7 @@ syncCatalog: # `["namespace1", "namespace2"]`, then all k8s namespaces besides `namespace1` # and `namespace2` will be synced. # @type: array - k8sDenyNamespaces: [ "kube-system", "kube-public" ] + k8sDenyNamespaces: ["kube-system", "kube-public"] # [DEPRECATED] Use k8sAllowNamespaces and k8sDenyNamespaces instead. For # backwards compatibility, if both this and the allow/deny lists are set, @@ -1715,10 +1765,10 @@ syncCatalog: # already exist, it will be created. Turning this on overrides the # `consulDestinationNamespace` setting. # `addK8SNamespaceSuffix` may no longer be needed if enabling this option. - # If mirroring is enabled, avoid creating any Consul resources in the following - # Kubernetes namespaces, as Consul currently reserves these namespaces for - # system use: "system", "universal", "operator", "root". - mirroringK8S: true + # If mirroring is enabled, avoid creating any Consul resources in the following + # Kubernetes namespaces, as Consul currently reserves these namespaces for + # system use: "system", "universal", "operator", "root". + mirroringK8S: false # If `mirroringK8S` is set to true, `mirroringK8SPrefix` allows each Consul namespace # to be given a prefix. For example, if `mirroringK8SPrefix` is set to "k8s-", a @@ -1864,15 +1914,15 @@ syncCatalog: # # @type: string annotations: null - + # Configures the automatic Connect sidecar injector. connectInject: # True if you want to enable connect injection. Set to "-" to inherit from # global.enabled. - enabled: true + enabled: false # The number of deployment replicas. - replicas: 1 + replicas: 2 # Image for consul-k8s-control-plane that contains the injector. # @type: string @@ -1903,8 +1953,8 @@ connectInject: # This configures the PodDisruptionBudget (https://kubernetes.io/docs/tasks/run-application/configure-pdb/) # for the service mesh sidecar injector. - disruptionBudget: - # This will enable/disable registering a PodDisruptionBudget for the + disruptionBudget: + # This will enable/disable registering a PodDisruptionBudget for the # service mesh sidecar injector. If this is enabled, it will only register the budget so long as # the service mesh is enabled. enabled: true @@ -1917,27 +1967,17 @@ connectInject: # @type: integer maxUnavailable: null - # The minimum number of available pods. - # Takes precedence over maxUnavailable if set. - # @type: integer - minAvailable: null - # Configures consul-cni plugin for Consul Service mesh services cni: - # If true, then all traffic redirection setup uses the consul-cni plugin. + # If true, then all traffic redirection setup uses the consul-cni plugin. # Requires connectInject.enabled to also be true. # @type: boolean enabled: false # Log level for the installer and plugin. Overrides global.logLevel # @type: string - logLevel: null - - # Set the namespace to install the CNI plugin into. Overrides global namespace settings for CNI resources. - # Ex: "kube-system" - # @type: string - namespace: null - + logLevel: null + # Location on the kubernetes node where the CNI plugin is installed. Shoud be the absolute path and start with a '/' # Example on GKE: # @@ -1954,15 +1994,15 @@ connectInject: # If multus CNI plugin is enabled with consul-cni. When enabled, consul-cni will not be installed as a chained # CNI plugin. Instead, a NetworkAttachementDefinition CustomResourceDefinition (CRD) will be created in the helm # release namespace. Following multus plugin standards, an annotation is required in order for the consul-cni plugin - # to be executed and for your service to be added to the Consul Service Mesh. + # to be executed and for your service to be added to the Consul Service Mesh. # # Add the annotation `'k8s.v1.cni.cncf.io/networks': '[{ "name":"consul-cni","namespace": "consul" }]'` to your pod # to use the default installed NetworkAttachementDefinition CRD. # # Please refer to the [Multus Quickstart Guide](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/quickstart.md) - # for more information about using multus. + # for more information about using multus. # @type: string - multus: false + multus: false # The resource settings for CNI installer daemonset. # @recurse: false @@ -2007,20 +2047,6 @@ connectInject: # @type: string updateStrategy: null - consulNode: - # meta specifies an arbitrary metadata key/value pair to associate with the node. - # - # Example: - # - # ```yaml - # meta: - # cluster: test-cluster - # persistent: true - # ``` - # - # @type: map - meta: null - # Configures metrics for Consul Connect services. All values are overridable # via annotations on a per-pod basis. @@ -2030,18 +2056,18 @@ connectInject: # add a listener on the Envoy sidecar to expose metrics. The exposed # metrics will depend on whether metrics merging is enabled: # - If metrics merging is enabled: - # the consul-dataplane will run a merged metrics server + # the Consul sidecar will run a merged metrics server # combining Envoy sidecar and Connect service metrics, # i.e. if your service exposes its own Prometheus metrics. # - If metrics merging is disabled: # the listener will just expose Envoy sidecar metrics. # This will inherit from `global.metrics.enabled`. defaultEnabled: "-" - # Configures the consul-dataplane to run a merged metrics server + # Configures the Consul sidecar to run a merged metrics server # to combine and serve both Envoy and Connect service metrics. # This feature is available only in Consul v1.10.0 or greater. defaultEnableMerging: false - # Configures the port at which the consul-dataplane will listen on to return + # Configures the port at which the Consul sidecar will listen on to return # combined metrics. This port only needs to be changed if it conflicts with # the application's ports. defaultMergedMetricsPort: 20100 @@ -2069,19 +2095,6 @@ connectInject: # Optional priorityClassName. priorityClassName: "" - # Extra labels to attach to the connect inject pods. This should be a YAML map. - # - # Example: - # - # ```yaml - # extraLabels: - # labelKey: label-value - # anotherLabelKey: another-label-value - # ``` - # - # @type: map - extraLabels: null - # This value defines additional annotations for # connect inject pods. This should be formatted as a multi-line string. # @@ -2093,7 +2106,7 @@ connectInject: # # @type: string annotations: null - + # The Docker image for Consul to use when performing Connect injection. # Defaults to global.image. # @type: string @@ -2135,7 +2148,7 @@ connectInject: # This setting can be safely disabled by setting to "Ignore". failurePolicy: "Fail" - # Selector for restricting the webhook to only specific namespaces. + # Selector for restricting the webhook to only specific namespaces. # Use with `connectInject.default: true` to automatically inject all pods in namespaces that match the selector. This should be set to a multiline string. # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector # for more details. @@ -2174,7 +2187,7 @@ connectInject: # `namespaceSelector` takes precedence over both since it is applied first. # `kube-system` and `kube-public` are never injected, even if included here. # @type: array - k8sAllowNamespaces: [ "*" ] + k8sAllowNamespaces: ["*"] # List of k8s namespaces that should not allow Connect # sidecar injection. This list takes precedence over `k8sAllowNamespaces`. @@ -2187,7 +2200,7 @@ connectInject: # Note: `namespaceSelector` takes precedence over this since it is applied first. # `kube-system` and `kube-public` are never injected. # @type: array - k8sDenyNamespaces: [ ] + k8sDenyNamespaces: [] # [Enterprise Only] These settings manage the connect injector's interaction with # Consul namespaces (requires consul-ent v1.7+). @@ -2202,10 +2215,10 @@ connectInject: # of the same name as their k8s namespace, optionally prefixed if # `mirroringK8SPrefix` is set below. If the Consul namespace does not # already exist, it will be created. Turning this on overrides the - # `consulDestinationNamespace` setting. If mirroring is enabled, avoid creating any Consul - # resources in the following Kubernetes namespaces, as Consul currently reserves these - # namespaces for system use: "system", "universal", "operator", "root". - mirroringK8S: true + # `consulDestinationNamespace` setting. If mirroring is enabled, avoid creating any Consul + # resources in the following Kubernetes namespaces, as Consul currently reserves these + # namespaces for system use: "system", "universal", "operator", "root". + mirroringK8S: false # If `mirroringK8S` is set to true, `mirroringK8SPrefix` allows each Consul namespace # to be given a prefix. For example, if `mirroringK8SPrefix` is set to "k8s-", a @@ -2318,16 +2331,93 @@ connectInject: memory: "150Mi" cpu: "50m" -# [Mesh Gateways](https://developer.hashicorp.com/consul/docs/connect/gateways/mesh-gateway) enable Consul Connect to work across Consul datacenters. +# Controller handles config entry custom resources. +# Requires consul >= 1.8.4. +# ServiceIntentions require consul 1.9+. +controller: + # Enables the controller for managing custom resources. + enabled: false + + # The number of deployment replicas. + replicas: 1 + + # Log verbosity level. One of "debug", "info", "warn", or "error". + # @type: string + logLevel: "" + + serviceAccount: + # This value defines additional annotations for the controller service account. This should be formatted as a + # multi-line string. + # + # ```yaml + # annotations: | + # "sample/annotation1": "foo" + # "sample/annotation2": "bar" + # ``` + # + # @type: string + annotations: null + + # The resource settings for controller pods. + # @recurse: false + # @type: map + resources: + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + + # Optional YAML string to specify a nodeSelector config. + # @type: string + nodeSelector: null + + # Optional YAML string to specify tolerations. + # @type: string + tolerations: null + + # Affinity Settings + # This should be a multi-line string matching the affinity object + # @type: string + affinity: null + + # Optional priorityClassName. + priorityClassName: "" + + # Refers to a Kubernetes secret that you have created that contains + # an ACL token for your Consul cluster which grants the controller process the correct + # permissions. This is only needed if you are managing ACLs yourself (i.e. not using + # `global.acls.manageSystemACLs`). + # + # If running Consul OSS, requires permissions: + # ```hcl + # operator = "write" + # service_prefix "" { + # policy = "write" + # intentions = "write" + # } + # ``` + # If running Consul Enterprise, talk to your account manager for assistance. + aclToken: + # The name of the Vault secret that holds the ACL token. + # @type: string + secretName: null + # The key within the Vault secret that holds the ACL token. + # @type: string + secretKey: null + +# Mesh Gateways enable Consul Connect to work across Consul datacenters. meshGateway: - # If [mesh gateways](https://developer.hashicorp.com/consul/docs/connect/gateways/mesh-gateway) are enabled, a Deployment will be created that runs + # If mesh gateways are enabled, a Deployment will be created that runs # gateways and Consul Connect will be configured to use gateways. - # This setting is required for [Cluster Peering](https://developer.hashicorp.com/consul/docs/connect/cluster-peering/k8s). - # Requirements: consul 1.6.0+ if using `global.acls.manageSystemACLs``. + # See https://www.consul.io/docs/connect/mesh_gateway.html + # Requirements: consul 1.6.0+ if using + # global.acls.manageSystemACLs. enabled: false # Number of replicas for the Deployment. - replicas: 1 + replicas: 2 # What gets registered as WAN address for the gateway. wanAddress: @@ -2370,6 +2460,9 @@ meshGateway: # The service option configures the Service that fronts the Gateway Deployment. service: + # Whether to create a Service or not. + enabled: true + # Type of service, ex. LoadBalancer, ClusterIP. type: LoadBalancer @@ -2450,6 +2543,18 @@ meshGateway: memory: "100Mi" cpu: "100m" + # The resource settings for the `copy-consul-bin` init container. + # @recurse: false + # @type: map + initCopyConsulContainer: + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "150Mi" + cpu: "50m" + # The resource settings for the `service-init` init container. # @recurse: false # @type: map @@ -2462,26 +2567,18 @@ meshGateway: memory: "50Mi" cpu: "50m" - # This value defines the affinity (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) - # for mesh gateway pods. It defaults to `null` thereby allowing multiple gateway pods on each node. But if one would prefer - # a mode which minimizes risk of the cluster becoming unusable if a node is lost, set this value - # to the value in the example below. - # - # Example: - # - # ```yaml - # affinity: | - # podAntiAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # - labelSelector: - # matchLabels: - # app: {{ template "consul.name" . }} - # release: "{{ .Release.Name }}" - # component: mesh-gateway - # topologyKey: kubernetes.io/hostname - # ``` - # @type: string - affinity: null + # By default, we set an anti-affinity so that two gateway pods won't be + # on the same node. NOTE: Gateways require that Consul client agents are + # also running on the nodes alongside each gateway pod. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: mesh-gateway + topologyKey: kubernetes.io/hostname # Optional YAML string to specify tolerations. # @type: string @@ -2545,7 +2642,7 @@ ingressGateways: # for a specific gateway. defaults: # Number of replicas for each ingress gateway defined. - replicas: 1 + replicas: 2 # The service options configure the Service that fronts the gateway Deployment. service: @@ -2563,10 +2660,10 @@ ingressGateways: # @default: [{port: 8080, port: 8443}] # @recurse: false ports: - - port: 8080 - nodePort: null - - port: 8443 - nodePort: null + - port: 8080 + nodePort: null + - port: 8443 + nodePort: null # Annotations to apply to the ingress gateway service. Annotations defined # here will be applied to all ingress gateway services in addition to any @@ -2609,26 +2706,30 @@ ingressGateways: memory: "100Mi" cpu: "100m" - # This value defines the affinity (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) - # for ingress gateway pods. It defaults to `null` thereby allowing multiple gateway pods on each node. But if one would prefer - # a mode which minimizes risk of the cluster becoming unusable if a node is lost, set this value - # to the value in the example below. - # - # Example: - # - # ```yaml - # affinity: | - # podAntiAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # - labelSelector: - # matchLabels: - # app: {{ template "consul.name" . }} - # release: "{{ .Release.Name }}" - # component: ingress-gateway - # topologyKey: kubernetes.io/hostname - # ``` - # @type: string - affinity: null + # The resource settings for the `copy-consul-bin` init container. + # @recurse: false + # @type: map + initCopyConsulContainer: + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "150Mi" + cpu: "50m" + + # By default, we set an anti-affinity so that two of the same gateway pods + # won't be on the same node. NOTE: Gateways require that Consul client agents are + # also running on the nodes alongside each gateway pod. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: ingress-gateway + topologyKey: kubernetes.io/hostname # Optional YAML string to specify tolerations. # @type: string @@ -2690,7 +2791,7 @@ ingressGateways: # case of annotations where both will be applied. # @type: array gateways: - - name: ingress-gateway + - name: ingress-gateway # Configuration options for terminating gateways. Default values for all # terminating gateways are defined in `terminatingGateways.defaults`. Any of @@ -2711,7 +2812,7 @@ terminatingGateways: # for a specific gateway. defaults: # Number of replicas for each terminating gateway defined. - replicas: 1 + replicas: 2 # A list of extra volumes to mount. These will be exposed to Consul in the path `/consul/userconfig//`. # @@ -2726,7 +2827,7 @@ terminatingGateways: # path: path # secret will now mount to /consul/userconfig/my-secret/path # ``` # @type: array - extraVolumes: [ ] + extraVolumes: [] # Resource limits for all terminating gateway pods # @recurse: false @@ -2739,26 +2840,30 @@ terminatingGateways: memory: "100Mi" cpu: "100m" - # This value defines the affinity (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) - # for terminating gateway pods. It defaults to `null` thereby allowing multiple gateway pods on each node. But if one would prefer - # a mode which minimizes risk of the cluster becoming unusable if a node is lost, set this value - # to the value in the example below. - # - # Example: - # - # ```yaml - # affinity: | - # podAntiAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # - labelSelector: - # matchLabels: - # app: {{ template "consul.name" . }} - # release: "{{ .Release.Name }}" - # component: terminating-gateway - # topologyKey: kubernetes.io/hostname - # ``` - # @type: string - affinity: null + # The resource settings for the `copy-consul-bin` init container. + # @recurse: false + # @type: map + initCopyConsulContainer: + resources: + requests: + memory: "25Mi" + cpu: "50m" + limits: + memory: "150Mi" + cpu: "50m" + + # By default, we set an anti-affinity so that two of the same gateway pods + # won't be on the same node. NOTE: Gateways require that Consul client agents are + # also running on the nodes alongside each gateway pod. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "consul.name" . }} + release: "{{ .Release.Name }}" + component: terminating-gateway + topologyKey: kubernetes.io/hostname # Optional YAML string to specify tolerations. # @type: string @@ -2831,7 +2936,7 @@ terminatingGateways: # case of annotations where both will be applied. # @type: array gateways: - - name: terminating-gateway + - name: terminating-gateway # Configuration settings for the Consul API Gateway integration apiGateway: @@ -2839,16 +2944,9 @@ apiGateway: enabled: false # Image to use for the api-gateway-controller pods and gateway instances - # - # ~> **Note:** Using API Gateway <= 0.4 with external servers requires setting `client.enabled: true`. # @type: string image: null - # The name (and tag) of the Envoy Docker image used for the - # apiGateway. For other Consul compoenents, imageEnvoy has been replaced with Consul Dataplane. - # @default: envoyproxy/envoy: - imageEnvoy: "envoyproxy/envoy:v1.23.1" - # Override global log verbosity level for api-gateway-controller pods. One of "debug", "info", "warn", or "error". # @type: string logLevel: info diff --git a/charts/demo/.helmignore b/charts/demo/.helmignore deleted file mode 100644 index 0e8a0eb36f..0000000000 --- a/charts/demo/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/charts/demo/Chart.yaml b/charts/demo/Chart.yaml deleted file mode 100644 index 82fc51d2df..0000000000 --- a/charts/demo/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: consul-demo -description: A Helm chart for Consul demo app - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.1.0" diff --git a/charts/demo/templates/frontend.yaml b/charts/demo/templates/frontend.yaml deleted file mode 100644 index 38d466e87e..0000000000 --- a/charts/demo/templates/frontend.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: frontend - labels: - app: frontend -spec: - type: ClusterIP - ports: - - port: 3000 - targetPort: 3000 - selector: - app: frontend ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: frontend -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: frontend -spec: - protocol: "http" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: frontend -spec: - replicas: 1 - selector: - matchLabels: - service: frontend - app: frontend - template: - metadata: - labels: - service: frontend - app: frontend - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9102" - consul.hashicorp.com/connect-inject: "true" - spec: - serviceAccountName: frontend - containers: - - name: frontend - image: hashicorpdemoapp/frontend:v1.0.3 - imagePullPolicy: Always - env: - - name: NEXT_PUBLIC_PUBLIC_API_URL - value: "/" - ports: - - containerPort: 3000 - # Added for debugging purposes - NOT RECOMMENDED - # securityContext: - # allowPrivilegeEscalation: false - # runAsUser: 0 diff --git a/charts/demo/templates/intentions.yaml b/charts/demo/templates/intentions.yaml deleted file mode 100644 index e0a0a0a5b1..0000000000 --- a/charts/demo/templates/intentions.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: public-api -spec: - sources: - - name: nginx - action: allow - destination: - name: public-api ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: products-api -spec: - sources: - - name: public-api - action: allow - destination: - name: products-api ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: payments -spec: - sources: - - name: public-api - action: allow - destination: - name: payments ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: postgres -spec: - sources: - - name: products-api - action: allow - destination: - name: postgres ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: frontend -spec: - sources: - - name: nginx - action: allow - destination: - name: frontend ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceIntentions -metadata: - name: deny-all -spec: - destination: - name: '*' - sources: - - name: '*' - action: deny diff --git a/charts/demo/templates/nginx.yaml b/charts/demo/templates/nginx.yaml deleted file mode 100644 index ebca16f2a0..0000000000 --- a/charts/demo/templates/nginx.yaml +++ /dev/null @@ -1,124 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: nginx -spec: - selector: - app: nginx - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 80 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nginx -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: nginx -spec: - protocol: "http" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: nginx-configmap -data: - config: | - - # /etc/nginx/conf.d/default.conf - proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=STATIC:10m inactive=7d use_temp_path=off; - - upstream frontend_upstream { - server localhost:3000; - } - - server { - listen 80; - server_name localhost; - - server_tokens off; - - gzip on; - gzip_proxied any; - gzip_comp_level 4; - gzip_types text/css application/javascript image/svg+xml; - - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_cache_bypass $http_upgrade; - - location /_next/static { - proxy_cache STATIC; - proxy_pass http://frontend_upstream; - - # For testing cache - remove before deploying to production - # add_header X-Cache-Status $upstream_cache_status; - } - - location /static { - proxy_cache STATIC; - proxy_ignore_headers Cache-Control; - proxy_cache_valid 60m; - proxy_pass http://frontend_upstream; - - # For testing cache - remove before deploying to production - # add_header X-Cache-Status $upstream_cache_status; - } - - location / { - proxy_pass http://frontend_upstream; - } - - location /api { - proxy_pass http://localhost:8080; - } - } ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx - labels: - app: nginx -spec: - replicas: 1 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - annotations: - consul.hashicorp.com/connect-inject: "true" - # BEGIN CONSUL-K8S MODIFICATION - consul.hashicorp.com/connect-service-upstreams: 'public-api:8080, frontend:3000' - # END CONSUL-K8S MODIFICATION - spec: - serviceAccountName: nginx - volumes: - - name: config - configMap: - name: nginx-configmap - items: - - key: config - path: default.conf - containers: - - name: nginx - image: nginx:alpine - imagePullPolicy: Always - ports: - - containerPort: 80 - volumeMounts: - - name: config - mountPath: /etc/nginx/conf.d - readOnly: true diff --git a/charts/demo/templates/payments.yaml b/charts/demo/templates/payments.yaml deleted file mode 100644 index 362a7ec1e1..0000000000 --- a/charts/demo/templates/payments.yaml +++ /dev/null @@ -1,52 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: payments -spec: - selector: - app: payments - ports: - - name: http - protocol: TCP - port: 1800 - targetPort: 8080 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: payments -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: payments -spec: - protocol: "http" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: payments - labels: - app: payments -spec: - replicas: 1 - selector: - matchLabels: - app: payments - template: - metadata: - labels: - app: payments - annotations: - consul.hashicorp.com/connect-inject: "true" - spec: - serviceAccountName: payments - containers: - - name: payments - image: hashicorpdemoapp/payments:v0.0.16 - imagePullPolicy: Always - ports: - - containerPort: 8080 diff --git a/charts/demo/templates/postgres.yaml b/charts/demo/templates/postgres.yaml deleted file mode 100644 index 5c7c903b7c..0000000000 --- a/charts/demo/templates/postgres.yaml +++ /dev/null @@ -1,68 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: postgres - labels: - app: postgres -spec: - ports: - - port: 5432 - targetPort: 5432 - protocol: TCP - selector: - app: postgres ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgres -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: postgres -spec: - protocol: "tcp" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: postgres -spec: - replicas: 1 - selector: - matchLabels: - service: postgres - app: postgres - template: - metadata: - labels: - service: postgres - app: postgres - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9102" - consul.hashicorp.com/connect-inject: "true" - spec: - serviceAccountName: postgres - containers: - - name: postgres - image: hashicorpdemoapp/product-api-db:v0.0.21 - imagePullPolicy: Always - ports: - - containerPort: 5432 - env: - - name: POSTGRES_DB - value: products - - name: POSTGRES_USER - value: postgres - - name: POSTGRES_PASSWORD - value: password - volumeMounts: - - mountPath: "/var/lib/postgresql/data" - name: "pgdata" - volumes: - - name: pgdata - emptyDir: {} diff --git a/charts/demo/templates/products-api.yaml b/charts/demo/templates/products-api.yaml deleted file mode 100644 index 4e2fc4bea8..0000000000 --- a/charts/demo/templates/products-api.yaml +++ /dev/null @@ -1,92 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: products-api -spec: - selector: - app: products-api - ports: - - name: http - protocol: TCP - port: 9090 - targetPort: 9090 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: products-api -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: products-api -spec: - protocol: "http" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: db-configmap -data: - config: | - { - "db_connection": "host=localhost port=5432 user=postgres password=password dbname=products sslmode=disable", - "bind_address": ":9090", - "metrics_address": ":9103" - } ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: products-api - labels: - app: products-api -spec: - replicas: 1 - selector: - matchLabels: - app: products-api - template: - metadata: - labels: - app: products-api - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9102" - consul.hashicorp.com/connect-inject: "true" - # BEGIN CONSUL-K8S MODIFICATION - consul.hashicorp.com/connect-service-upstreams: 'postgres:5432' - # END CONSUL-K8S MODIFICATION - spec: - serviceAccountName: products-api - volumes: - - name: config - configMap: - name: db-configmap - items: - - key: config - path: conf.json - containers: - - name: products-api - image: hashicorpdemoapp/product-api:v0.0.21 - imagePullPolicy: Always - ports: - - containerPort: 9090 - - containerPort: 9103 - env: - - name: "CONFIG_FILE" - value: "/config/conf.json" - livenessProbe: - httpGet: - path: /health - port: 9090 - initialDelaySeconds: 15 - timeoutSeconds: 1 - periodSeconds: 10 - failureThreshold: 30 - volumeMounts: - - name: config - mountPath: /config - readOnly: true diff --git a/charts/demo/templates/public-api.yaml b/charts/demo/templates/public-api.yaml deleted file mode 100644 index 14d4369ff8..0000000000 --- a/charts/demo/templates/public-api.yaml +++ /dev/null @@ -1,86 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: public-api - labels: - app: public-api -spec: - type: ClusterIP - ports: - - port: 8080 - targetPort: 8080 - selector: - app: public-api ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: public-api -automountServiceAccountToken: true ---- -apiVersion: consul.hashicorp.com/v1alpha1 -kind: ServiceDefaults -metadata: - name: public-api -spec: - protocol: "http" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: public-api -spec: - replicas: 1 - selector: - matchLabels: - service: public-api - app: public-api - template: - metadata: - labels: - service: public-api - app: public-api - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9102" - consul.hashicorp.com/connect-inject: "true" - # BEGIN CONSUL-K8S MODIFICATION - consul.hashicorp.com/connect-service-upstreams: 'products-api:9090, payments:1800' - # END CONSUL-K8S MODIFICATION - spec: - serviceAccountName: public-api - containers: - - name: public-api - image: hashicorpdemoapp/public-api:v0.0.6 - imagePullPolicy: Always - ports: - - containerPort: 8080 - env: - - name: BIND_ADDRESS - value: ":8080" - - name: PRODUCT_API_URI - value: "http://localhost:9090" - - name: PAYMENT_API_URI - value: "http://localhost:1800" - - name: jaeger-agent - image: jaegertracing/jaeger-agent:latest - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5775 - name: zk-compact-trft - protocol: UDP - - containerPort: 5778 - name: config-rest - protocol: TCP - - containerPort: 6831 - name: jg-compact-trft - protocol: UDP - - containerPort: 6832 - name: jg-binary-trft - protocol: UDP - - containerPort: 14271 - name: admin-http - protocol: TCP - args: - - --reporter.grpc.host-port=dns:///jaeger-collector-headless.default:14250 - - --reporter.type=grpc diff --git a/charts/demo/values.yaml b/charts/demo/values.yaml deleted file mode 100644 index 2dd99602c7..0000000000 --- a/charts/demo/values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Default values for demo. diff --git a/charts/embed_chart.go b/charts/embed_chart.go index 29e7e9635e..6393508ebb 100644 --- a/charts/embed_chart.go +++ b/charts/embed_chart.go @@ -14,6 +14,3 @@ import "embed" // explicitly embedded. //go:embed consul/Chart.yaml consul/values.yaml consul/templates consul/templates/_helpers.tpl var ConsulHelmChart embed.FS - -//go:embed demo/Chart.yaml demo/values.yaml demo/templates -var DemoHelmChart embed.FS diff --git a/charts/go.mod b/charts/go.mod index f76282d756..9c001ddad8 100644 --- a/charts/go.mod +++ b/charts/go.mod @@ -1,3 +1,3 @@ module github.com/hashicorp/consul-k8s/charts -go 1.20 +go 1.18 diff --git a/cli/cmd/install/install.go b/cli/cmd/install/install.go index 7b5d5bb31c..61742cebbe 100644 --- a/cli/cmd/install/install.go +++ b/cli/cmd/install/install.go @@ -3,7 +3,6 @@ package install import ( "errors" "fmt" - "net/http" "os" "strings" "sync" @@ -15,12 +14,9 @@ import ( "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/hashicorp/consul-k8s/cli/config" "github.com/hashicorp/consul-k8s/cli/helm" - "github.com/hashicorp/consul-k8s/cli/preset" "github.com/hashicorp/consul-k8s/cli/release" "github.com/hashicorp/consul-k8s/cli/validation" "github.com/posener/complete" - "golang.org/x/text/cases" - "golang.org/x/text/language" "helm.sh/helm/v3/pkg/action" helmCLI "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/cli/values" @@ -29,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/utils/strings/slices" "sigs.k8s.io/yaml" ) @@ -61,11 +56,6 @@ const ( flagNameContext = "context" flagNameKubeconfig = "kubeconfig" - - flagNameHCPResourceID = "hcp-resource-id" - - flagNameDemo = "demo" - defaultDemo = false ) type Command struct { @@ -73,26 +63,20 @@ type Command struct { kubernetes kubernetes.Interface - helmActionsRunner helm.HelmActionsRunner - - httpClient *http.Client - set *flag.Sets - flagPreset string - flagNamespace string - flagDryRun bool - flagAutoApprove bool - flagValueFiles []string - flagSetStringValues []string - flagSetValues []string - flagFileValues []string - flagTimeout string - timeoutDuration time.Duration - flagVerbose bool - flagWait bool - flagDemo bool - flagNameHCPResourceID string + flagPreset string + flagNamespace string + flagDryRun bool + flagAutoApprove bool + flagValueFiles []string + flagSetStringValues []string + flagSetValues []string + flagFileValues []string + flagTimeout string + timeoutDuration time.Duration + flagVerbose bool + flagWait bool flagKubeConfig string flagKubeContext string @@ -102,6 +86,12 @@ type Command struct { } func (c *Command) init() { + // Store all the possible preset values in 'presetList'. Printed in the help message. + var presetList []string + for name := range config.Presets { + presetList = append(presetList, name) + } + c.set = flag.NewSets() f := c.set.NewSet("Command Options") f.BoolVar(&flag.BoolVar{ @@ -132,7 +122,7 @@ func (c *Command) init() { Name: flagNamePreset, Target: &c.flagPreset, Default: defaultPreset, - Usage: fmt.Sprintf("Use an installation preset, one of %s. Defaults to none", strings.Join(preset.Presets, ", ")), + Usage: fmt.Sprintf("Use an installation preset, one of %s. Defaults to none", strings.Join(presetList, ", ")), }) f.StringSliceVar(&flag.StringSliceVar{ Name: flagNameSetValues, @@ -169,19 +159,6 @@ func (c *Command) init() { Default: defaultWait, Usage: "Wait for Kubernetes resources in installation to be ready before exiting command.", }) - f.BoolVar(&flag.BoolVar{ - Name: flagNameDemo, - Target: &c.flagDemo, - Default: defaultDemo, - Usage: fmt.Sprintf("Install %s immediately after installing %s.", - common.ReleaseTypeConsulDemo, common.ReleaseTypeConsul), - }) - f.StringVar(&flag.StringVar{ - Name: flagNameHCPResourceID, - Target: &c.flagNameHCPResourceID, - Default: "", - Usage: "Set the HCP resource_id when using the 'cloud' preset.", - }) f = c.set.NewSet("Global Options") f.StringVar(&flag.StringVar{ @@ -204,9 +181,6 @@ func (c *Command) init() { // Run installs Consul into a Kubernetes cluster. func (c *Command) Run(args []string) int { c.once.Do(c.init) - if c.helmActionsRunner == nil { - c.helmActionsRunner = &helm.ActionRunner{} - } // The logger is initialized in main with the name cli. Here, we reset the name to install so log lines would be prefixed with install. c.Log.ResetNamed("install") @@ -269,11 +243,7 @@ func (c *Command) Run(args []string) int { c.UI.Output("Checking if Consul can be installed", terminal.WithHeaderStyle()) // Ensure there is not an existing Consul installation which would cause a conflict. - if found, name, ns, _ := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.DefaultReleaseName, - DebugLog: uiLogger, - }); found { + if name, ns, err := common.CheckForInstallations(settings, uiLogger); err == nil { c.UI.Output("Cannot install Consul. A Consul cluster is already installed in namespace %s with name %s.", ns, name, terminal.WithErrorStyle()) c.UI.Output("Use the command `consul-k8s uninstall` to uninstall Consul from the cluster.", terminal.WithInfoStyle()) return 1 @@ -287,38 +257,6 @@ func (c *Command) Run(args []string) int { } c.UI.Output("No existing Consul persistent volume claims found", terminal.WithSuccessStyle()) - release := release.Release{ - Name: common.DefaultReleaseName, - Namespace: c.flagNamespace, - } - - msg, err := c.checkForPreviousSecrets(release) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 - } - c.UI.Output(msg, terminal.WithSuccessStyle()) - - if c.flagDemo { - c.UI.Output("Checking if %s can be installed", - cases.Title(language.English).String(common.ReleaseTypeConsulDemo), - terminal.WithHeaderStyle()) - - // Ensure there is not an existing Consul demo installation which would cause a conflict. - if found, name, ns, _ := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.ConsulDemoAppReleaseName, - DebugLog: uiLogger, - }); found { - c.UI.Output("Cannot install %s. A %s cluster is already installed in namespace %s with name %s.", - common.ReleaseTypeConsulDemo, common.ReleaseTypeConsulDemo, ns, name, terminal.WithErrorStyle()) - c.UI.Output("Use the command `consul-k8s uninstall` to uninstall the %s from the cluster.", - common.ReleaseTypeConsulDemo, terminal.WithInfoStyle()) - return 1 - } - c.UI.Output("No existing %s installations found.", common.ReleaseTypeConsulDemo, terminal.WithSuccessStyle()) - } - // Handle preset, value files, and set values logic. vals, err := c.mergeValuesFlagsWithPrecedence(settings) if err != nil { @@ -338,104 +276,104 @@ func (c *Command) Run(args []string) int { return 1 } - release.Configuration = helmVals - - // If an enterprise license secret was provided, check that the secret exists and that the enterprise Consul image is set. - if helmVals.Global.EnterpriseLicense.SecretName != "" { - if err := c.checkValidEnterprise(release.Configuration.Global.EnterpriseLicense.SecretName); err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 - } - c.UI.Output("Valid enterprise Consul secret found.", terminal.WithSuccessStyle()) + rel := release.Release{ + Name: "consul", + Namespace: c.flagNamespace, + Configuration: helmVals, } - err = c.installConsul(valuesYaml, vals, settings, uiLogger) + msg, err := c.checkForPreviousSecrets(rel) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } + c.UI.Output(msg, terminal.WithSuccessStyle()) - if c.flagDemo { - timeout, err := time.ParseDuration(c.flagTimeout) - if err != nil { + // If an enterprise license secret was provided, check that the secret exists and that the enterprise Consul image is set. + if helmVals.Global.EnterpriseLicense.SecretName != "" { + if err := c.checkValidEnterprise(rel.Configuration.Global.EnterpriseLicense.SecretName); err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - options := &helm.InstallOptions{ - ReleaseName: common.ConsulDemoAppReleaseName, - ReleaseType: common.ReleaseTypeConsulDemo, - Namespace: c.flagNamespace, - Values: make(map[string]interface{}), - Settings: settings, - EmbeddedChart: consulChart.DemoHelmChart, - ChartDirName: "demo", - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } - err = helm.InstallDemoApp(options) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 + c.UI.Output("Valid enterprise Consul secret found.", terminal.WithSuccessStyle()) + } + + // Print out the installation summary. + if !c.flagAutoApprove { + c.UI.Output("Consul Installation Summary", terminal.WithHeaderStyle()) + c.UI.Output("Name: %s", common.DefaultReleaseName, terminal.WithInfoStyle()) + c.UI.Output("Namespace: %s", c.flagNamespace, terminal.WithInfoStyle()) + + if len(vals) == 0 { + c.UI.Output("\nNo overrides provided, using the default Helm values.", terminal.WithInfoStyle()) + } else { + c.UI.Output("\nHelm value overrides\n-------------------\n"+string(valuesYaml), terminal.WithInfoStyle()) } } + // Without informing the user, default global.name to consul if it hasn't been set already. We don't allow setting + // the release name, and since that is hardcoded to "consul", setting global.name to "consul" makes it so resources + // aren't double prefixed with "consul-consul-...". + vals = common.MergeMaps(config.Convert(config.GlobalNameConsul), vals) + if c.flagDryRun { c.UI.Output("Dry run complete. No changes were made to the Kubernetes cluster.\n"+ "Installation can proceed with this configuration.", terminal.WithInfoStyle()) + return 0 } - return 0 -} + if !c.flagAutoApprove { + confirmation, err := c.UI.Input(&terminal.Input{ + Prompt: "Proceed with installation? (y/N)", + Style: terminal.InfoStyle, + Secret: false, + }) -func (c *Command) installConsul(valuesYaml []byte, vals map[string]interface{}, settings *helmCLI.EnvSettings, uiLogger action.DebugLog) error { - // Print out the installation summary. - c.UI.Output("Consul Installation Summary", terminal.WithHeaderStyle()) - c.UI.Output("Name: %s", common.DefaultReleaseName, terminal.WithInfoStyle()) - c.UI.Output("Namespace: %s", c.flagNamespace, terminal.WithInfoStyle()) - - if len(vals) == 0 { - c.UI.Output("\nNo overrides provided, using the default Helm values.", terminal.WithInfoStyle()) - } else { - c.UI.Output("\nHelm value overrides\n--------------------\n"+string(valuesYaml), terminal.WithInfoStyle()) + if err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + if common.Abort(confirmation) { + c.UI.Output("Install aborted. Use the command `consul-k8s install -help` to learn how to customize your installation.", + terminal.WithInfoStyle()) + return 1 + } } - // Without informing the user, default global.name to consul if it hasn't been set already. We don't allow setting - // the release name, and since that is hardcoded to "consul", setting global.name to "consul" makes it so resources - // aren't double prefixed with "consul-consul-...". - vals = common.MergeMaps(config.ConvertToMap(config.GlobalNameConsul), vals) + c.UI.Output("Installing Consul", terminal.WithHeaderStyle()) - timeout, err := time.ParseDuration(c.flagTimeout) + // Setup action configuration for Helm Go SDK function calls. + actionConfig := new(action.Configuration) + actionConfig, err = helm.InitActionConfig(actionConfig, c.flagNamespace, settings, uiLogger) if err != nil { - return err + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 } - installOptions := &helm.InstallOptions{ - ReleaseName: common.DefaultReleaseName, - ReleaseType: common.ReleaseTypeConsul, - Namespace: c.flagNamespace, - Values: vals, - Settings: settings, - EmbeddedChart: consulChart.ConsulHelmChart, - ChartDirName: common.TopLevelChartDirName, - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } - - err = helm.InstallHelmRelease(installOptions) + + // Setup the installation action. + install := action.NewInstall(actionConfig) + install.ReleaseName = common.DefaultReleaseName + install.Namespace = c.flagNamespace + install.CreateNamespace = true + install.Wait = c.flagWait + install.Timeout = c.timeoutDuration + + // Load the Helm chart. + chart, err := helm.LoadChart(consulChart.ConsulHelmChart, common.TopLevelChartDirName) if err != nil { - return err + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 } + c.UI.Output("Downloaded charts", terminal.WithSuccessStyle()) - return nil + // Run the install. + if _, err = install.Run(chart, vals); err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + + c.UI.Output("Consul installed in namespace %q.", c.flagNamespace, terminal.WithSuccessStyle()) + return 0 } // Help returns a description of the command and how it is used. @@ -467,8 +405,6 @@ func (c *Command) AutocompleteFlags() complete.Flags { fmt.Sprintf("-%s", flagNameWait): complete.PredictNothing, fmt.Sprintf("-%s", flagNameContext): complete.PredictNothing, fmt.Sprintf("-%s", flagNameKubeconfig): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameDemo): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameHCPResourceID): complete.PredictNothing, } } @@ -564,14 +500,7 @@ func (c *Command) mergeValuesFlagsWithPrecedence(settings *helmCLI.EnvSettings) } if c.flagPreset != defaultPreset { // Note the ordering of the function call, presets have lower precedence than set vals. - p, err := c.getPreset(c.flagPreset) - if err != nil { - return nil, fmt.Errorf("error getting preset provider: %s", err) - } - presetMap, err := p.GetValueMap() - if err != nil { - return nil, fmt.Errorf("error getting preset values: %s", err) - } + presetMap := config.Presets[c.flagPreset].(map[string]interface{}) vals = common.MergeMaps(presetMap, vals) } return vals, err @@ -588,28 +517,13 @@ func (c *Command) validateFlags(args []string) error { if len(c.flagValueFiles) != 0 && c.flagPreset != defaultPreset { return fmt.Errorf("cannot set both -%s and -%s", flagNameConfigFile, flagNamePreset) } - if ok := slices.Contains(preset.Presets, c.flagPreset); c.flagPreset != defaultPreset && !ok { + if _, ok := config.Presets[c.flagPreset]; c.flagPreset != defaultPreset && !ok { return fmt.Errorf("'%s' is not a valid preset", c.flagPreset) } if !common.IsValidLabel(c.flagNamespace) { return fmt.Errorf("'%s' is an invalid namespace. Namespaces follow the RFC 1123 label convention and must "+ "consist of a lower case alphanumeric character or '-' and must start/end with an alphanumeric character", c.flagNamespace) } - - if c.flagPreset == preset.PresetCloud { - clientID := os.Getenv(preset.EnvHCPClientID) - clientSecret := os.Getenv(preset.EnvHCPClientSecret) - if clientID == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' environment variable must also be set", preset.PresetCloud, preset.EnvHCPClientID) - } else if clientSecret == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' environment variable must also be set", preset.PresetCloud, preset.EnvHCPClientSecret) - } else if c.flagNameHCPResourceID == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' flag must also be provided", preset.PresetCloud, flagNameHCPResourceID) - } - } else if c.flagNameHCPResourceID != "" { - return fmt.Errorf("The '%s' flag can only be used with the '%s' preset", flagNameHCPResourceID, preset.PresetCloud) - } - duration, err := time.ParseDuration(c.flagTimeout) if err != nil { return fmt.Errorf("unable to parse -%s: %s", flagNameTimeout, err) @@ -638,22 +552,3 @@ func (c *Command) checkValidEnterprise(secretName string) error { } return nil } - -// getPreset is a factory function that, given a string, produces a struct that -// implements the Preset interface. If the string is not recognized an error is -// returned. -func (c *Command) getPreset(name string) (preset.Preset, error) { - hcpConfig := preset.GetHCPPresetFromEnv(c.flagNameHCPResourceID) - getPresetConfig := &preset.GetPresetConfig{ - Name: name, - CloudPreset: &preset.CloudPreset{ - KubernetesClient: c.kubernetes, - KubernetesNamespace: c.flagNamespace, - HCPConfig: hcpConfig, - UI: c.UI, - HTTPClient: c.httpClient, - Context: c.Ctx, - }, - } - return preset.GetPreset(getPresetConfig) -} diff --git a/cli/cmd/install/install_test.go b/cli/cmd/install/install_test.go index 07c04defef..a66febc336 100644 --- a/cli/cmd/install/install_test.go +++ b/cli/cmd/install/install_test.go @@ -1,41 +1,40 @@ package install import ( - "bytes" "context" - "errors" "flag" "fmt" - "io" "os" "testing" "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/hashicorp/consul-k8s/cli/helm" - "github.com/hashicorp/consul-k8s/cli/preset" "github.com/hashicorp/consul-k8s/cli/release" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmRelease "helm.sh/helm/v3/pkg/release" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) func TestCheckForPreviousPVCs(t *testing.T) { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) c.kubernetes = fake.NewSimpleClientset() - - createPVC(t, "consul-server-test1", "default", c.kubernetes) - createPVC(t, "consul-server-test2", "default", c.kubernetes) - + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test1", + }, + } + pvc2 := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test2", + }, + } + c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc, metav1.CreateOptions{}) + c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc2, metav1.CreateOptions{}) err := c.checkForPreviousPVCs() require.Error(t, err) require.Equal(t, err.Error(), "found persistent volume claims from previous installations, delete before reinstalling: default/consul-server-test1,default/consul-server-test2") @@ -46,7 +45,12 @@ func TestCheckForPreviousPVCs(t *testing.T) { require.NoError(t, err) // Add a new irrelevant PVC and make sure the check continues to pass. - createPVC(t, "irrelevant-pvc", "default", c.kubernetes) + pvc = &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "irrelevant-pvc", + }, + } + c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc, metav1.CreateOptions{}) err = c.checkForPreviousPVCs() require.NoError(t, err) } @@ -142,7 +146,7 @@ func TestCheckForPreviousSecrets(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) c.kubernetes = fake.NewSimpleClientset() c.kubernetes.CoreV1().Secrets("consul").Create(context.Background(), tc.secret, metav1.CreateOptions{}) @@ -190,7 +194,7 @@ func TestValidateFlags(t *testing.T) { } for _, testCase := range testCases { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) t.Run(testCase.description, func(t *testing.T) { if err := c.validateFlags(testCase.input); err == nil { t.Errorf("Test case should have failed.") @@ -200,22 +204,16 @@ func TestValidateFlags(t *testing.T) { } // getInitializedCommand sets up a command struct for tests. -func getInitializedCommand(t *testing.T, buf io.Writer) *Command { +func getInitializedCommand(t *testing.T) *Command { t.Helper() log := hclog.New(&hclog.LoggerOptions{ Name: "cli", Level: hclog.Info, Output: os.Stdout, }) - var ui terminal.UI - if buf != nil { - ui = terminal.NewUI(context.Background(), buf) - } else { - ui = terminal.NewBasicUI(context.Background()) - } + baseCommand := &common.BaseCommand{ Log: log, - UI: ui, } c := &Command{ @@ -226,7 +224,7 @@ func getInitializedCommand(t *testing.T, buf io.Writer) *Command { } func TestCheckValidEnterprise(t *testing.T) { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) c.kubernetes = fake.NewSimpleClientset() secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -240,7 +238,7 @@ func TestCheckValidEnterprise(t *testing.T) { } // Enterprise secret is valid. - createSecret(t, secret, "consul", c.kubernetes) + c.kubernetes.CoreV1().Secrets("consul").Create(context.Background(), secret, metav1.CreateOptions{}) err := c.checkValidEnterprise(secret.Name) require.NoError(t, err) @@ -258,7 +256,7 @@ func TestCheckValidEnterprise(t *testing.T) { func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { t.Parallel() - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) predictor := cmd.AutocompleteFlags() @@ -281,437 +279,7 @@ func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { } func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -// TestValidateCloudPresets tests the validate flags function when passed the cloud preset. -func TestValidateCloudPresets(t *testing.T) { - testCases := []struct { - description string - input []string - preProcessingFunc func() - postProcessingFunc func() - expectError bool - }{ - { - "Should not error on cloud preset when HCP_CLIENT_ID and HCP_CLIENT_SECRET envvars are present and hcp-resource-id parameter is provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - false, - }, - { - "Should error on cloud preset when HCP_CLIENT_ID is not provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error on cloud preset when HCP_CLIENT_SECRET is not provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error on cloud preset when -hcp-resource-id flag is not provided.", - []string{"-preset=cloud"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error when -hcp-resource-id flag is provided but cloud preset is not specified.", - []string{"-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - } - - for _, testCase := range testCases { - testCase.preProcessingFunc() - c := getInitializedCommand(t, nil) - t.Run(testCase.description, func(t *testing.T) { - err := c.validateFlags(testCase.input) - if testCase.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - defer testCase.postProcessingFunc() - } -} - -func TestGetPreset(t *testing.T) { - testCases := []struct { - description string - presetName string - }{ - { - "'cloud' should return a CloudPreset'.", - preset.PresetCloud, - }, - { - "'quickstart' should return a QuickstartPreset'.", - preset.PresetQuickstart, - }, - { - "'secure' should return a SecurePreset'.", - preset.PresetSecure, - }, - } - - for _, tc := range testCases { - c := getInitializedCommand(t, nil) - t.Run(tc.description, func(t *testing.T) { - p, err := c.getPreset(tc.presetName) - require.NoError(t, err) - switch p.(type) { - case *preset.CloudPreset: - require.Equal(t, preset.PresetCloud, tc.presetName) - case *preset.QuickstartPreset: - require.Equal(t, preset.PresetQuickstart, tc.presetName) - case *preset.SecurePreset: - require.Equal(t, preset.PresetSecure, tc.presetName) - } - }) - } -} - -func TestInstall(t *testing.T) { - var k8s kubernetes.Interface - licenseSecretName := "consul-license" - cases := map[string]struct { - input []string - messages []string - helmActionsRunner *helm.MockActionRunner - preProcessingFunc func() - expectedReturnCode int - expectCheckedForConsulInstallations bool - expectCheckedForConsulDemoInstallations bool - expectConsulInstalled bool - expectConsulDemoInstalled bool - }{ - "install with no arguments returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "install when consul installation errors returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - InstallFunc: func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*helmRelease.Release, error) { - return nil, errors.New("Helm returned an error.") - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install with no arguments when consul installation already exists returns error": { - input: []string{ - "--auto-approve", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ! Cannot install Consul. A Consul cluster is already installed in namespace consul with name consul.\n Use the command `consul-k8s uninstall` to uninstall Consul from the cluster.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - return true, "consul", "consul", nil - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install with no arguments when PVCs exist returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ! found persistent volume claims from previous installations, delete before reinstalling: consul/consul-server-test1\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - preProcessingFunc: func() { - createPVC(t, "consul-server-test1", "consul", k8s) - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install with no arguments when secrets exist returns error": { - input: []string{ - "--auto-approve", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ! Found Consul secrets, possibly from a previous installation.\nDelete existing Consul secrets from Kubernetes:\n\nkubectl delete secret consul-secret --namespace consul\n\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - preProcessingFunc: func() { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "consul-secret", - Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, - }, - } - createSecret(t, secret, "consul", k8s) - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "enterprise install when license secret exists returns success": { - input: []string{ - "--set", fmt.Sprintf("global.enterpriseLicense.secretName=%s", licenseSecretName), - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n ✓ Valid enterprise Consul secret found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n Helm value overrides\n --------------------\n global:\n enterpriseLicense:\n secretName: consul-license\n \n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - preProcessingFunc: func() { - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: licenseSecretName, - }, - } - createSecret(t, secret, "consul", k8s) - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "enterprise install when license secret does not exist returns error": { - input: []string{ - "--set", fmt.Sprintf("global.enterpriseLicense.secretName=%s", licenseSecretName), - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n ! enterprise license secret \"consul-license\" is not found in the \"consul\" namespace; please make sure that the secret exists in the \"consul\" namespace\n"}, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install for quickstart preset returns success": { - input: []string{ - "-preset", "quickstart", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n Helm value overrides\n --------------------\n connectInject:\n enabled: true\n metrics:\n defaultEnableMerging: true\n defaultEnabled: true\n enableGatewayMetrics: true\n global:\n metrics:\n enableAgentMetrics: true\n enabled: true\n name: consul\n prometheus:\n enabled: true\n server:\n replicas: 1\n ui:\n enabled: true\n service:\n enabled: true\n \n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "install for secure preset returns success": { - input: []string{ - "-preset", "secure", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n Helm value overrides\n --------------------\n connectInject:\n enabled: true\n global:\n acls:\n manageSystemACLs: true\n gossipEncryption:\n autoGenerate: true\n name: consul\n tls:\n enableAutoEncrypt: true\n enabled: true\n server:\n replicas: 1\n \n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "install with demo flag returns success": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Checking if Consul Demo Application can be installed\n ✓ No existing Consul demo application installations found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: consul\n \n \n", - "\n==> Installing Consul demo application\n ✓ Downloaded charts.\n ✓ Consul demo application installed in namespace \"consul\".\n", - "\n==> Accessing Consul Demo Application UI\n kubectl port-forward service/nginx 8080:80 --namespace consul\n Browse to http://localhost:8080.\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulInstalled: true, - expectConsulDemoInstalled: true, - }, - "install with demo flag when consul demo installation errors returns error": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Checking if Consul Demo Application can be installed\n ✓ No existing Consul demo application installations found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul\".\n", - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: consul\n \n \n", - "\n==> Installing Consul demo application\n ✓ Downloaded charts.\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - InstallFunc: func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*helmRelease.Release, error) { - if install.ReleaseName == "consul" { - return &helmRelease.Release{Name: install.ReleaseName}, nil - } - return nil, errors.New("Helm returned an error.") - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulInstalled: true, - expectConsulDemoInstalled: false, - }, - "install with demo flag when demo is already installed returns error and does not install consul or the demo": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Checking if Consul Demo Application can be installed\n ! Cannot install Consul demo application. A Consul demo application cluster is already installed in namespace consul-demo with name consul-demo.\n Use the command `consul-k8s uninstall` to uninstall the Consul demo application from the cluster.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return false, "", "", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - "install with --dry-run flag returns success": { - input: []string{ - "--dry-run", - }, - messages: []string{ - "\n==> Performing dry run install. No changes will be made to the cluster.\n", - "\n==> Checking if Consul can be installed\n ✓ No existing Consul installations found.\n ✓ No existing Consul persistent volume claims found\n ✓ No existing Consul secrets found.\n", - "\n==> Consul Installation Summary\n Name: consul\n Namespace: consul\n \n No overrides provided, using the default Helm values.\n Dry run complete. No changes were made to the Kubernetes cluster.\n Installation can proceed with this configuration.\n", - }, - helmActionsRunner: &helm.MockActionRunner{}, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulInstalled: false, - expectConsulDemoInstalled: false, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - k8s = fake.NewSimpleClientset() - c.kubernetes = k8s - mock := tc.helmActionsRunner - c.helmActionsRunner = mock - if tc.preProcessingFunc != nil { - tc.preProcessingFunc() - } - input := append([]string{ - "--auto-approve", - }, tc.input...) - returnCode := c.Run(input) - require.Equal(t, tc.expectedReturnCode, returnCode) - require.Equal(t, tc.expectCheckedForConsulInstallations, mock.CheckedForConsulInstallations) - require.Equal(t, tc.expectCheckedForConsulDemoInstallations, mock.CheckedForConsulDemoInstallations) - require.Equal(t, tc.expectConsulInstalled, mock.ConsulInstalled) - require.Equal(t, tc.expectConsulDemoInstalled, mock.ConsulDemoInstalled) - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) - } -} - -func createPVC(t *testing.T, name string, namespace string, k8s kubernetes.Interface) { - t.Helper() - - pvc := &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - } - _, err := k8s.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), pvc, metav1.CreateOptions{}) - require.NoError(t, err) -} - -func createSecret(t *testing.T, secret *v1.Secret, namespace string, k8s kubernetes.Interface) { - t.Helper() - _, err := k8s.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) - require.NoError(t, err) -} diff --git a/cli/cmd/proxy/loglevel/command.go b/cli/cmd/proxy/loglevel/command.go deleted file mode 100644 index 4355997ed3..0000000000 --- a/cli/cmd/proxy/loglevel/command.go +++ /dev/null @@ -1,346 +0,0 @@ -package loglevel - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - - "github.com/posener/complete" - helmCLI "helm.sh/helm/v3/pkg/cli" - "k8s.io/apimachinery/pkg/api/validation" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/envoy" - "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" -) - -const ( - defaultAdminPort = 19000 - flagNameNamespace = "namespace" - flagNameUpdateLevel = "update-level" - flagNameReset = "reset" - flagNameKubeConfig = "kubeconfig" - flagNameKubeContext = "context" -) - -var ErrIncorrectArgFormat = errors.New("Exactly one positional argument is required: ") - -type LoggerConfig map[string]string - -var levelToColor = map[string]string{ - "trace": terminal.Green, - "debug": terminal.HiWhite, - "info": terminal.Blue, - "warning": terminal.Yellow, - "error": terminal.Red, - "critical": terminal.Magenta, - "off": "", -} - -type LogLevelCommand struct { - *common.BaseCommand - - kubernetes kubernetes.Interface - set *flag.Sets - - // Command Flags - podName string - namespace string - level string - reset bool - kubeConfig string - kubeContext string - - once sync.Once - help string - restConfig *rest.Config - envoyLoggingCaller func(context.Context, common.PortForwarder, *envoy.LoggerParams) (map[string]string, error) -} - -func (l *LogLevelCommand) init() { - l.Log.ResetNamed("loglevel") - l.set = flag.NewSets() - f := l.set.NewSet("Command Options") - f.StringVar(&flag.StringVar{ - Name: flagNameNamespace, - Target: &l.namespace, - Usage: "The namespace where the target Pod can be found.", - Aliases: []string{"n"}, - }) - - f.StringVar(&flag.StringVar{ - Name: flagNameUpdateLevel, - Target: &l.level, - Usage: "Update the level for the logger. Can be either `-update-level warning` to change all loggers to warning, or a comma delineated list of loggers with level can be passed like `-update-level grpc:warning,http:info` to only modify specific loggers.", - Aliases: []string{"u"}, - }) - - f.BoolVar(&flag.BoolVar{ - Name: flagNameReset, - Target: &l.reset, - Usage: "Reset the log level for all loggers in a pod to the Envoy default (info).", - Aliases: []string{"r"}, - }) - - f = l.set.NewSet("Global Options") - f.StringVar(&flag.StringVar{ - Name: flagNameKubeConfig, - Aliases: []string{"c"}, - Target: &l.kubeConfig, - Usage: "Set the path to kubeconfig file.", - }) - f.StringVar(&flag.StringVar{ - Name: flagNameKubeContext, - Target: &l.kubeContext, - Usage: "Set the Kubernetes context to use.", - }) - - l.help = l.set.Help() -} - -func (l *LogLevelCommand) Run(args []string) int { - l.once.Do(l.init) - defer common.CloseWithError(l.BaseCommand) - - err := l.parseFlags(args) - if err != nil { - return l.logOutputAndDie(err) - } - err = l.validateFlags() - if err != nil { - return l.logOutputAndDie(err) - } - - // if we're resetting the default log level for envoy is info: https://www.envoyproxy.io/docs/envoy/latest/start/quick-start/run-envoy#debugging-envoy - if l.reset { - l.level = "info" - } - - if l.envoyLoggingCaller == nil { - l.envoyLoggingCaller = envoy.CallLoggingEndpoint - } - - err = l.initKubernetes() - if err != nil { - return l.logOutputAndDie(err) - } - - adminPorts, err := l.fetchAdminPorts() - if err != nil { - return l.logOutputAndDie(err) - } - - err = l.fetchOrSetLogLevels(adminPorts) - if err != nil { - return l.logOutputAndDie(err) - } - - return 0 -} - -func (l *LogLevelCommand) parseFlags(args []string) error { - if len(args) == 0 { - return ErrIncorrectArgFormat - } - - positional := []string{} - // Separate positional args from keyed args - for _, arg := range args { - if strings.HasPrefix(arg, "-") { - break - } - positional = append(positional, arg) - } - keyed := args[len(positional):] - - if len(positional) != 1 { - return ErrIncorrectArgFormat - } - - l.podName = positional[0] - - err := l.set.Parse(keyed) - if err != nil { - return err - } - - return nil -} - -func (l *LogLevelCommand) validateFlags() error { - if l.level != "" && l.reset { - return fmt.Errorf("cannot set log level to %q and reset to 'info' at the same time", l.level) - } - if l.namespace == "" { - return nil - } - - errs := validation.ValidateNamespaceName(l.namespace, false) - if len(errs) > 0 { - return fmt.Errorf("invalid namespace name passed for -namespace/-n: %v", strings.Join(errs, "; ")) - } - - return nil -} - -func (l *LogLevelCommand) initKubernetes() error { - settings := helmCLI.New() - var err error - - if l.kubeConfig != "" { - settings.KubeConfig = l.kubeConfig - } - - if l.kubeContext != "" { - settings.KubeContext = l.kubeContext - } - - if l.restConfig == nil { - l.restConfig, err = settings.RESTClientGetter().ToRESTConfig() - if err != nil { - return fmt.Errorf("error creating Kubernetes REST config %v", err) - } - - } - - if l.kubernetes == nil { - l.kubernetes, err = kubernetes.NewForConfig(l.restConfig) - if err != nil { - return fmt.Errorf("error creating Kubernetes client %v", err) - } - } - if l.namespace == "" { - l.namespace = settings.Namespace() - } - - return nil -} - -// fetchAdminPorts retrieves all admin ports for Envoy Proxies running in a pod given namespace. -func (l *LogLevelCommand) fetchAdminPorts() (map[string]int, error) { - adminPorts := make(map[string]int, 0) - pod, err := l.kubernetes.CoreV1().Pods(l.namespace).Get(l.Ctx, l.podName, metav1.GetOptions{}) - if err != nil { - return adminPorts, err - } - - connectService, isMultiport := pod.Annotations["consul.hashicorp.com/connect-service"] - - if !isMultiport { - // Return the default port configuration. - adminPorts[l.podName] = defaultAdminPort - return adminPorts, nil - } - - for idx, svc := range strings.Split(connectService, ",") { - adminPorts[svc] = defaultAdminPort + idx - } - - return adminPorts, nil -} - -func (l *LogLevelCommand) fetchOrSetLogLevels(adminPorts map[string]int) error { - loggers := make(map[string]LoggerConfig, 0) - - for name, port := range adminPorts { - pf := common.PortForward{ - Namespace: l.namespace, - PodName: l.podName, - RemotePort: port, - KubeClient: l.kubernetes, - RestConfig: l.restConfig, - } - params, err := parseParams(l.level) - if err != nil { - return err - } - logLevels, err := l.envoyLoggingCaller(l.Ctx, &pf, params) - if err != nil { - return err - } - loggers[name] = logLevels - } - - l.outputLevels(loggers) - return nil -} - -func parseParams(params string) (*envoy.LoggerParams, error) { - loggerParams := envoy.NewLoggerParams() - if len(params) == 0 { - return loggerParams, nil - } - - // contains global log level change - if !strings.Contains(params, ":") { - err := loggerParams.SetGlobalLoggerLevel(params) - if err != nil { - return nil, err - } - return loggerParams, nil - } - - // contains changes to at least 1 specific log level - loggerChanges := strings.Split(params, ",") - - for _, logger := range loggerChanges { - levelValues := strings.Split(logger, ":") - err := loggerParams.SetLoggerLevel(levelValues[0], levelValues[1]) - if err != nil { - return nil, err - } - } - return loggerParams, nil -} - -func (l *LogLevelCommand) outputLevels(logLevels map[string]LoggerConfig) { - l.UI.Output(fmt.Sprintf("Envoy log configuration for %s in namespace default:", l.podName)) - for n, levels := range logLevels { - l.UI.Output(fmt.Sprintf("Log Levels for %s", n), terminal.WithHeaderStyle()) - table := terminal.NewTable("Name", "Level") - for name, level := range levels { - table.AddRow([]string{name, level}, []string{"", levelToColor[level]}) - } - l.UI.Table(table) - l.UI.Output("") - } -} - -func (l *LogLevelCommand) Help() string { - l.once.Do(l.init) - return fmt.Sprintf("%s\n\nUsage: consul-k8s proxy log [flags]\n\n%s", l.Synopsis(), l.help) -} - -func (l *LogLevelCommand) Synopsis() string { - return "Inspect and Modify the Envoy Log configuration for a given Pod." -} - -// AutocompleteFlags returns a mapping of supported flags and autocomplete -// options for this command. The map key for the Flags map should be the -// complete flag such as "-foo" or "--foo". -func (l *LogLevelCommand) AutocompleteFlags() complete.Flags { - return complete.Flags{ - fmt.Sprintf("-%s", flagNameNamespace): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameKubeConfig): complete.PredictFiles("*"), - fmt.Sprintf("-%s", flagNameKubeContext): complete.PredictNothing, - } -} - -// AutocompleteArgs returns the argument predictor for this command. -// Since argument completion is not supported, this will return -// complete.PredictNothing. -func (l *LogLevelCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} - -func (l *LogLevelCommand) logOutputAndDie(err error) int { - l.UI.Output(err.Error(), terminal.WithErrorStyle()) - l.UI.Output(fmt.Sprintf("\n%s", l.Help())) - return 1 -} diff --git a/cli/cmd/proxy/loglevel/command_test.go b/cli/cmd/proxy/loglevel/command_test.go deleted file mode 100644 index 1c5387fd5f..0000000000 --- a/cli/cmd/proxy/loglevel/command_test.go +++ /dev/null @@ -1,293 +0,0 @@ -package loglevel - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "regexp" - "testing" - - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/envoy" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/go-hclog" -) - -func TestFlagParsingFails(t *testing.T) { - t.Parallel() - testCases := map[string]struct { - args []string - out int - }{ - "No args": { - args: []string{}, - out: 1, - }, - "Multiple podnames passed": { - args: []string{"podname", "podname2"}, - out: 1, - }, - "Nonexistent flag passed, -foo bar": { - args: []string{"podName", "-foo", "bar"}, - out: 1, - }, - "Invalid argument passed, -namespace YOLO": { - args: []string{"podName", "-namespace", "YOLO"}, - out: 1, - }, - } - podName := "now-this-is-pod-racing" - fakePod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: "default", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - c := setupCommand(bytes.NewBuffer([]byte{})) - c.kubernetes = fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{fakePod}}) - c.envoyLoggingCaller = func(context.Context, common.PortForwarder, *envoy.LoggerParams) (map[string]string, error) { - return testLogConfig, nil - } - - out := c.Run(tc.args) - require.Equal(t, tc.out, out) - }) - } -} - -func TestFlagParsingSucceeds(t *testing.T) { - t.Parallel() - podName := "now-this-is-pod-racing" - testCases := map[string]struct { - args []string - podNamespace string - out int - }{ - "With single pod name": { - args: []string{podName}, - podNamespace: "default", - out: 0, - }, - "With single pod name and namespace": { - args: []string{podName, "-n", "another"}, - podNamespace: "another", - out: 0, - }, - "With single pod name and blanket level": { - args: []string{podName, "-u", "warning"}, - podNamespace: "default", - out: 0, - }, - "With single pod name and single level": { - args: []string{podName, "-u", "grpc:warning"}, - podNamespace: "default", - out: 0, - }, - "With single pod name and multiple levels": { - args: []string{podName, "-u", "grpc:warning,http:info"}, - podNamespace: "default", - out: 0, - }, - "With single pod name and blanket level full flag": { - args: []string{podName, "-update-level", "warning"}, - podNamespace: "default", - out: 0, - }, - "With single pod name and single level full flag": { - args: []string{podName, "-update-level", "grpc:warning"}, - podNamespace: "default", - out: 0, - }, - "With single pod name and multiple levels full flag": { - args: []string{podName, "-update-level", "grpc:warning,http:info"}, - podNamespace: "default", - out: 0, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - fakePod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: tc.podNamespace, - }, - } - - c := setupCommand(bytes.NewBuffer([]byte{})) - c.kubernetes = fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{fakePod}}) - c.envoyLoggingCaller = func(context.Context, common.PortForwarder, *envoy.LoggerParams) (map[string]string, error) { - return testLogConfig, nil - } - - out := c.Run(tc.args) - require.Equal(t, tc.out, out) - }) - } -} - -func TestOutputForGettingLogLevels(t *testing.T) { - t.Parallel() - podName := "now-this-is-pod-racing" - expectedHeader := fmt.Sprintf("Envoy log configuration for %s in namespace default:", podName) - fakePod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: "default", - }, - } - - buf := bytes.NewBuffer([]byte{}) - c := setupCommand(buf) - newLogLevel := "warning" - config := make(map[string]string, len(testLogConfig)) - for logger := range testLogConfig { - config[logger] = newLogLevel - } - - c.envoyLoggingCaller = func(context.Context, common.PortForwarder, *envoy.LoggerParams) (map[string]string, error) { - return config, nil - } - c.kubernetes = fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{fakePod}}) - - args := []string{podName, "-u", newLogLevel} - out := c.Run(args) - require.Equal(t, 0, out) - - actual := buf.String() - - require.Regexp(t, expectedHeader, actual) - require.Regexp(t, "Log Levels for now-this-is-pod-racing", actual) - for logger, level := range config { - require.Regexp(t, regexp.MustCompile(logger+`.*`+level), actual) - } -} - -func TestOutputForSettingLogLevels(t *testing.T) { - t.Parallel() - podName := "now-this-is-pod-racing" - expectedHeader := fmt.Sprintf("Envoy log configuration for %s in namespace default:", podName) - fakePod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: "default", - }, - } - - buf := bytes.NewBuffer([]byte{}) - c := setupCommand(buf) - c.envoyLoggingCaller = func(context.Context, common.PortForwarder, *envoy.LoggerParams) (map[string]string, error) { - return testLogConfig, nil - } - c.kubernetes = fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{fakePod}}) - - args := []string{podName, "-u", "warning"} - out := c.Run(args) - require.Equal(t, 0, out) - - actual := buf.String() - - require.Regexp(t, expectedHeader, actual) - require.Regexp(t, "Log Levels for now-this-is-pod-racing", actual) - for logger, level := range testLogConfig { - require.Regexp(t, regexp.MustCompile(logger+`.*`+level), actual) - } -} - -func TestHelp(t *testing.T) { - t.Parallel() - buf := bytes.NewBuffer([]byte{}) - c := setupCommand(buf) - expectedSynposis := "Inspect and Modify the Envoy Log configuration for a given Pod." - expectedUsage := `Usage: consul-k8s proxy log \[flags\]` - actual := c.Help() - require.Regexp(t, expectedSynposis, actual) - require.Regexp(t, expectedUsage, actual) -} - -func setupCommand(buf io.Writer) *LogLevelCommand { - log := hclog.New(&hclog.LoggerOptions{ - Name: "test", - Level: hclog.Debug, - Output: os.Stdout, - }) - - command := &LogLevelCommand{ - BaseCommand: &common.BaseCommand{ - Log: log, - UI: terminal.NewUI(context.Background(), buf), - }, - } - command.init() - return command -} - -var testLogConfig = map[string]string{ - "admin": "debug", - "alternate_protocols_cache": "debug", - "aws": "debug", - "assert": "debug", - "backtrace": "debug", - "cache_filter": "debug", - "client": "debug", - "config": "debug", - "connection": "debug", - "conn_handler": "debug", - "decompression": "debug", - "dns": "debug", - "dubbo": "debug", - "envoy_bug": "debug", - "ext_authz": "debug", - "ext_proc": "debug", - "rocketmq": "debug", - "file": "debug", - "filter": "debug", - "forward_proxy": "debug", - "grpc": "debug", - "happy_eyeballs": "debug", - "hc": "debug", - "health_checker": "debug", - "http": "debug", - "http2": "debug", - "hystrix": "debug", - "init": "debug", - "io": "debug", - "jwt": "debug", - "kafka": "debug", - "key_value_store": "debug", - "lua": "debug", - "main": "debug", - "matcher": "debug", - "misc": "debug", - "mongo": "debug", - "multi_connection": "debug", - "oauth2": "debug", - "quic": "debug", - "quic_stream": "debug", - "pool": "debug", - "rbac": "debug", - "rds": "debug", - "redis": "debug", - "router": "debug", - "runtime": "debug", - "stats": "debug", - "secret": "debug", - "tap": "debug", - "testing": "debug", - "thrift": "debug", - "tracing": "debug", - "upstream": "debug", - "udp": "debug", - "wasm": "debug", - "websocket": "debug", -} diff --git a/cli/cmd/proxy/read/command.go b/cli/cmd/proxy/read/command.go index e85d7166e9..ad2bb96303 100644 --- a/cli/cmd/proxy/read/command.go +++ b/cli/cmd/proxy/read/command.go @@ -7,6 +7,9 @@ import ( "strings" "sync" + "github.com/hashicorp/consul-k8s/cli/common" + "github.com/hashicorp/consul-k8s/cli/common/flag" + "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/posener/complete" helmCLI "helm.sh/helm/v3/pkg/cli" "k8s.io/apimachinery/pkg/api/validation" @@ -14,11 +17,6 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/utils/strings/slices" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/envoy" - "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" ) // defaultAdminPort is the port where the Envoy admin API is exposed. @@ -69,7 +67,7 @@ type ReadCommand struct { flagKubeConfig string flagKubeContext string - fetchConfig func(context.Context, common.PortForwarder) (*envoy.EnvoyConfig, error) + fetchConfig func(context.Context, common.PortForwarder) (*EnvoyConfig, error) restConfig *rest.Config @@ -79,7 +77,7 @@ type ReadCommand struct { func (c *ReadCommand) init() { if c.fetchConfig == nil { - c.fetchConfig = envoy.FetchConfig + c.fetchConfig = FetchConfig } c.set = flag.NewSets() @@ -322,8 +320,8 @@ func (c *ReadCommand) fetchAdminPorts() (map[string]int, error) { return adminPorts, nil } -func (c *ReadCommand) fetchConfigs(adminPorts map[string]int) (map[string]*envoy.EnvoyConfig, error) { - configs := make(map[string]*envoy.EnvoyConfig, 0) +func (c *ReadCommand) fetchConfigs(adminPorts map[string]int) (map[string]*EnvoyConfig, error) { + configs := make(map[string]*EnvoyConfig, 0) for name, adminPort := range adminPorts { pf := common.PortForward{ @@ -345,7 +343,7 @@ func (c *ReadCommand) fetchConfigs(adminPorts map[string]int) (map[string]*envoy return configs, nil } -func (c *ReadCommand) outputConfigs(configs map[string]*envoy.EnvoyConfig) error { +func (c *ReadCommand) outputConfigs(configs map[string]*EnvoyConfig) error { switch c.flagOutput { case Table: return c.outputTables(configs) @@ -398,7 +396,7 @@ func (c *ReadCommand) filterWarnings() []string { return warnings } -func (c *ReadCommand) outputTables(configs map[string]*envoy.EnvoyConfig) error { +func (c *ReadCommand) outputTables(configs map[string]*EnvoyConfig) error { if c.flagFQDN != "" || c.flagAddress != "" || c.flagPort != -1 { c.UI.Output("Filters applied", terminal.WithHeaderStyle()) @@ -433,7 +431,7 @@ func (c *ReadCommand) outputTables(configs map[string]*envoy.EnvoyConfig) error return nil } -func (c *ReadCommand) outputJSON(configs map[string]*envoy.EnvoyConfig) error { +func (c *ReadCommand) outputJSON(configs map[string]*EnvoyConfig) error { cfgs := make(map[string]interface{}) for name, config := range configs { cfg := make(map[string]interface{}) @@ -469,11 +467,11 @@ func (c *ReadCommand) outputJSON(configs map[string]*envoy.EnvoyConfig) error { return nil } -func (c *ReadCommand) outputRaw(configs map[string]*envoy.EnvoyConfig) error { +func (c *ReadCommand) outputRaw(configs map[string]*EnvoyConfig) error { cfgs := make(map[string]interface{}, 0) for name, config := range configs { var cfg interface{} - if err := json.Unmarshal(config.RawCfg, &cfg); err != nil { + if err := json.Unmarshal(config.rawCfg, &cfg); err != nil { return err } @@ -490,7 +488,7 @@ func (c *ReadCommand) outputRaw(configs map[string]*envoy.EnvoyConfig) error { return nil } -func (c *ReadCommand) outputClustersTable(clusters []envoy.Cluster) { +func (c *ReadCommand) outputClustersTable(clusters []Cluster) { if !c.shouldPrintTable(c.flagClusters) { return } @@ -498,16 +496,14 @@ func (c *ReadCommand) outputClustersTable(clusters []envoy.Cluster) { c.UI.Output(fmt.Sprintf("Clusters (%d)", len(clusters)), terminal.WithHeaderStyle()) table := terminal.NewTable("Name", "FQDN", "Endpoints", "Type", "Last Updated") for _, cluster := range clusters { - table.AddRow([]string{ - cluster.Name, cluster.FullyQualifiedDomainName, strings.Join(cluster.Endpoints, ", "), - cluster.Type, cluster.LastUpdated, - }, []string{}) + table.AddRow([]string{cluster.Name, cluster.FullyQualifiedDomainName, strings.Join(cluster.Endpoints, ", "), + cluster.Type, cluster.LastUpdated}, []string{}) } c.UI.Table(table) c.UI.Output("") } -func (c *ReadCommand) outputEndpointsTable(endpoints []envoy.Endpoint) { +func (c *ReadCommand) outputEndpointsTable(endpoints []Endpoint) { if !c.shouldPrintTable(c.flagEndpoints) { return } @@ -516,7 +512,7 @@ func (c *ReadCommand) outputEndpointsTable(endpoints []envoy.Endpoint) { c.UI.Table(formatEndpoints(endpoints)) } -func (c *ReadCommand) outputListenersTable(listeners []envoy.Listener) { +func (c *ReadCommand) outputListenersTable(listeners []Listener) { if !c.shouldPrintTable(c.flagListeners) { return } @@ -525,7 +521,7 @@ func (c *ReadCommand) outputListenersTable(listeners []envoy.Listener) { c.UI.Table(formatListeners(listeners)) } -func (c *ReadCommand) outputRoutesTable(routes []envoy.Route) { +func (c *ReadCommand) outputRoutesTable(routes []Route) { if !c.shouldPrintTable(c.flagRoutes) { return } @@ -534,7 +530,7 @@ func (c *ReadCommand) outputRoutesTable(routes []envoy.Route) { c.UI.Table(formatRoutes(routes)) } -func (c *ReadCommand) outputSecretsTable(secrets []envoy.Secret) { +func (c *ReadCommand) outputSecretsTable(secrets []Secret) { if !c.shouldPrintTable(c.flagSecrets) { return } diff --git a/cli/cmd/proxy/read/command_test.go b/cli/cmd/proxy/read/command_test.go index 1e439bce91..27f19e7370 100644 --- a/cli/cmd/proxy/read/command_test.go +++ b/cli/cmd/proxy/read/command_test.go @@ -9,18 +9,16 @@ import ( "os" "testing" + "github.com/hashicorp/consul-k8s/cli/common" + cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" + "github.com/hashicorp/consul-k8s/cli/common/terminal" + "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/envoy" - cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/go-hclog" ) func TestFlagParsing(t *testing.T) { @@ -67,48 +65,38 @@ func TestReadCommandOutput(t *testing.T) { // These regular expressions must be present in the output. expectedHeader := fmt.Sprintf("Envoy configuration for %s in namespace default:", podName) expected := map[string][]string{ - "-clusters": { - "==> Clusters \\(5\\)", + "-clusters": {"==> Clusters \\(5\\)", "Name.*FQDN.*Endpoints.*Type.*Last Updated", "local_agent.*192\\.168\\.79\\.187:8502.*STATIC.*2022-05-13T04:22:39\\.553Z", "local_app.*127\\.0\\.0\\.1:8080.*STATIC.*2022-05-13T04:22:39\\.655Z", "client.*client\\.default\\.dc1\\.internal\\.bc3815c2-1a0f-f3ff-a2e9-20d791f08d00\\.consul.*EDS", "frontend.*frontend\\.default\\.dc1\\.internal\\.bc3815c2-1a0f-f3ff-a2e9-20d791f08d00\\.consul", - "original-destination.*ORIGINAL_DST", - }, + "original-destination.*ORIGINAL_DST"}, - "-endpoints": { - "==> Endpoints \\(6\\)", + "-endpoints": {"==> Endpoints \\(6\\)", "Address:Port.*Cluster.*Weight.*Status", "192.168.79.187:8502.*local_agent.*1.00.*HEALTHY", "127.0.0.1:8080.*local_app.*1.00.*HEALTHY", "192.168.18.110:20000.*client.*1.00.*HEALTHY", "192.168.52.101:20000.*client.*1.00.*HEALTHY", "192.168.65.131:20000.*client.*1.00.*HEALTHY", - "192.168.63.120:20000.*frontend.*1.00.*HEALTHY", - }, + "192.168.63.120:20000.*frontend.*1.00.*HEALTHY"}, - "-listeners": { - "==> Listeners \\(2\\)", + "-listeners": {"==> Listeners \\(2\\)", "Name.*Address:Port.*Direction.*Filter Chain Match.*Filters.*Last Updated", "public_listener.*192\\.168\\.69\\.179:20000.*INBOUND.*Any.*\\* -> local_app/", "outbound_listener.*127.0.0.1:15001.*OUTBOUND.*10\\.100\\.134\\.173/32, 240\\.0\\.0\\.3/32.*TCP: -> client", "10\\.100\\.31\\.2/32, 240\\.0\\.0\\.5/32.*TCP: -> frontend", - "Any.*TCP: -> original-destination", - }, + "Any.*TCP: -> original-destination"}, - "-routes": { - "==> Routes \\(1\\)", + "-routes": {"==> Routes \\(1\\)", "Name.*Destination Cluster.*Last Updated", - "public_listener.*local_app/", - }, + "public_listener.*local_app/"}, - "-secrets": { - "==> Secrets \\(2\\)", + "-secrets": {"==> Secrets \\(2\\)", "Name.*Type.*Last Updated", "default.*Dynamic Active", - "ROOTCA.*Dynamic Warming", - }, + "ROOTCA.*Dynamic Warming"}, } cases := map[string][]string{ @@ -134,7 +122,7 @@ func TestReadCommandOutput(t *testing.T) { c.kubernetes = fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{fakePod}}) // A fetchConfig function that just returns the test Envoy config. - c.fetchConfig = func(context.Context, common.PortForwarder) (*envoy.EnvoyConfig, error) { + c.fetchConfig = func(context.Context, common.PortForwarder) (*EnvoyConfig, error) { return testEnvoyConfig, nil } @@ -242,7 +230,7 @@ func TestFilterWarnings(t *testing.T) { buf := new(bytes.Buffer) c := setupCommand(buf) c.kubernetes = fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{fakePod}}) - c.fetchConfig = func(context.Context, common.PortForwarder) (*envoy.EnvoyConfig, error) { + c.fetchConfig = func(context.Context, common.PortForwarder) (*EnvoyConfig, error) { return testEnvoyConfig, nil } @@ -307,73 +295,3 @@ func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -// testEnvoyConfig is what we expect the config at `test_config_dump.json` to be. - -var testEnvoyConfig = &envoy.EnvoyConfig{ - Clusters: []envoy.Cluster{ - {Name: "local_agent", FullyQualifiedDomainName: "local_agent", Endpoints: []string{"192.168.79.187:8502"}, Type: "STATIC", LastUpdated: "2022-05-13T04:22:39.553Z"}, - - {Name: "client", FullyQualifiedDomainName: "client.default.dc1.internal.bc3815c2-1a0f-f3ff-a2e9-20d791f08d00.consul", Endpoints: []string{"192.168.18.110:20000", "192.168.52.101:20000", "192.168.65.131:20000"}, Type: "EDS", LastUpdated: "2022-08-10T12:30:32.326Z"}, - - {Name: "frontend", FullyQualifiedDomainName: "frontend.default.dc1.internal.bc3815c2-1a0f-f3ff-a2e9-20d791f08d00.consul", Endpoints: []string{"192.168.63.120:20000"}, Type: "EDS", LastUpdated: "2022-08-10T12:30:32.233Z"}, - - {Name: "local_app", FullyQualifiedDomainName: "local_app", Endpoints: []string{"127.0.0.1:8080"}, Type: "STATIC", LastUpdated: "2022-05-13T04:22:39.655Z"}, - - {Name: "original-destination", FullyQualifiedDomainName: "original-destination", Endpoints: []string{}, Type: "ORIGINAL_DST", LastUpdated: "2022-05-13T04:22:39.743Z"}, - }, - - Endpoints: []envoy.Endpoint{ - {Address: "192.168.79.187:8502", Cluster: "local_agent", Weight: 1, Status: "HEALTHY"}, - - {Address: "192.168.18.110:20000", Cluster: "client", Weight: 1, Status: "HEALTHY"}, - - {Address: "192.168.52.101:20000", Cluster: "client", Weight: 1, Status: "HEALTHY"}, - - {Address: "192.168.65.131:20000", Cluster: "client", Weight: 1, Status: "HEALTHY"}, - - {Address: "192.168.63.120:20000", Cluster: "frontend", Weight: 1, Status: "HEALTHY"}, - - {Address: "127.0.0.1:8080", Cluster: "local_app", Weight: 1, Status: "HEALTHY"}, - }, - - Listeners: []envoy.Listener{ - {Name: "public_listener", Address: "192.168.69.179:20000", FilterChain: []envoy.FilterChain{{Filters: []string{"HTTP: * -> local_app/"}, FilterChainMatch: "Any"}}, Direction: "INBOUND", LastUpdated: "2022-08-10T12:30:47.142Z"}, - - {Name: "outbound_listener", Address: "127.0.0.1:15001", FilterChain: []envoy.FilterChain{ - {Filters: []string{"TCP: -> client"}, FilterChainMatch: "10.100.134.173/32, 240.0.0.3/32"}, - - {Filters: []string{"TCP: -> frontend"}, FilterChainMatch: "10.100.31.2/32, 240.0.0.5/32"}, - - {Filters: []string{"TCP: -> original-destination"}, FilterChainMatch: "Any"}, - }, Direction: "OUTBOUND", LastUpdated: "2022-07-18T15:31:03.246Z"}, - }, - - Routes: []envoy.Route{ - { - Name: "public_listener", - - DestinationCluster: "local_app/", - - LastUpdated: "2022-08-10T12:30:47.141Z", - }, - }, - - Secrets: []envoy.Secret{ - { - Name: "default", - - Type: "Dynamic Active", - - LastUpdated: "2022-05-24T17:41:59.078Z", - }, - - { - Name: "ROOTCA", - - Type: "Dynamic Warming", - - LastUpdated: "2022-03-15T05:14:22.868Z", - }, - }, -} diff --git a/cli/common/envoy/http.go b/cli/cmd/proxy/read/config.go similarity index 91% rename from cli/common/envoy/http.go rename to cli/cmd/proxy/read/config.go index 88299d6724..e7e6bcad34 100644 --- a/cli/common/envoy/http.go +++ b/cli/cmd/proxy/read/config.go @@ -1,10 +1,8 @@ -package envoy +package read import ( - "bytes" "context" "encoding/json" - "errors" "fmt" "io" "net" @@ -14,13 +12,11 @@ import ( "github.com/hashicorp/consul-k8s/cli/common" ) -var ErrNoLoggersReturned = errors.New("No loggers were returned from Envoy") - // EnvoyConfig represents the configuration retrieved from a config dump at the // admin endpoint. It wraps the Envoy ConfigDump struct to give us convenient // access to the different sections of the config. type EnvoyConfig struct { - RawCfg []byte + rawCfg []byte Clusters []Cluster Endpoints []Endpoint Listeners []Listener @@ -73,54 +69,6 @@ type Secret struct { LastUpdated string } -// CallLoggingEndpoint requests the logging endpoint from Envoy Admin Interface for a given port -// This is used to both read and update the logging levels (the envoy admin interface uses the same endpoint for both) -// more can be read about that endpoint https://www.envoyproxy.io/docs/envoy/latest/operations/admin#post--logging -func CallLoggingEndpoint(ctx context.Context, portForward common.PortForwarder, params *LoggerParams) (map[string]string, error) { - endpoint, err := portForward.Open(ctx) - if err != nil { - return nil, err - } - - defer portForward.Close() - - // this endpoint does not support returning json, so we've gotta parse the plain text - response, err := http.Post(fmt.Sprintf("http://%s/logging%s", endpoint, params), "text/plain", bytes.NewBuffer([]byte{})) - if err != nil { - return nil, err - } - - body, err := io.ReadAll(response.Body) - if err != nil { - return nil, fmt.Errorf("failed to reach envoy: %v", err) - } - - if response.StatusCode >= 400 { - return nil, fmt.Errorf("call to envoy failed with status code: %d, and message: %s", response.StatusCode, body) - } - - loggers := strings.Split(string(body), "\n") - if len(loggers) == 0 { - return nil, ErrNoLoggersReturned - } - - logLevels := make(map[string]string) - var name string - var level string - - // the first line here is just a header - for _, logger := range loggers[1:] { - if len(logger) == 0 { - continue - } - fmt.Sscanf(logger, "%s %s", &name, &level) - name = strings.TrimRight(name, ":") - logLevels[name] = level - } - - return logLevels, nil -} - // FetchConfig opens a port forward to the Envoy admin API and fetches the // configuration from the config dump endpoint. func FetchConfig(ctx context.Context, portForward common.PortForwarder) (*EnvoyConfig, error) { @@ -169,7 +117,7 @@ func FetchConfig(ctx context.Context, portForward common.PortForwarder) (*EnvoyC // JSON returns the original JSON Envoy config dump data which was used to create // the Config object. func (c *EnvoyConfig) JSON() []byte { - return c.RawCfg + return c.rawCfg } // UnmarshalJSON implements the json.Unmarshaler interface to unmarshal the raw @@ -178,7 +126,7 @@ func (c *EnvoyConfig) JSON() []byte { func (c *EnvoyConfig) UnmarshalJSON(b []byte) error { // Save the original config dump bytes for marshalling. We should treat this // struct as immutable so this should be safe. - c.RawCfg = b + c.rawCfg = b var root root err := json.Unmarshal(b, &root) diff --git a/cli/common/envoy/http_test.go b/cli/cmd/proxy/read/config_test.go similarity index 93% rename from cli/common/envoy/http_test.go rename to cli/cmd/proxy/read/config_test.go index 291eb2c22b..6b0e425794 100644 --- a/cli/common/envoy/http_test.go +++ b/cli/cmd/proxy/read/config_test.go @@ -1,38 +1,21 @@ -package envoy +package read import ( "bytes" "context" + "embed" "encoding/json" "fmt" "net/http" "net/http/httptest" - "os" "strings" "testing" "github.com/stretchr/testify/require" ) -func TestCallLoggingEndpoint(t *testing.T) { - t.Parallel() - rawLogLevels, err := os.ReadFile("testdata/fetch_debug_levels.txt") - require.NoError(t, err) - mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write(rawLogLevels) - })) - - defer mockServer.Close() - - mpf := &mockPortForwarder{ - openBehavior: func(ctx context.Context) (string, error) { - return strings.Replace(mockServer.URL, "http://", "", 1), nil - }, - } - logLevels, err := CallLoggingEndpoint(context.Background(), mpf, NewLoggerParams()) - require.NoError(t, err) - require.Equal(t, testLogConfig(), logLevels) -} +//go:embed test_config_dump.json test_clusters.json +var fs embed.FS const ( testConfigDump = "test_config_dump.json" @@ -52,7 +35,7 @@ func TestUnmarshaling(t *testing.T) { } func TestJSON(t *testing.T) { - raw, err := os.ReadFile(fmt.Sprintf("testdata/%s", testConfigDump)) + raw, err := fs.ReadFile(testConfigDump) require.NoError(t, err) expected := bytes.TrimSpace(raw) @@ -66,10 +49,10 @@ func TestJSON(t *testing.T) { } func TestFetchConfig(t *testing.T) { - configDump, err := os.ReadFile(fmt.Sprintf("testdata/%s", testConfigDump)) + configDump, err := fs.ReadFile(testConfigDump) require.NoError(t, err) - clusters, err := os.ReadFile(fmt.Sprintf("testdata/%s", testClusters)) + clusters, err := fs.ReadFile(testClusters) require.NoError(t, err) mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -463,11 +446,18 @@ func TestClusterParsingEndpoints(t *testing.T) { require.Equal(t, expected, actual) } +type mockPortForwarder struct { + openBehavior func(context.Context) (string, error) +} + +func (m *mockPortForwarder) Open(ctx context.Context) (string, error) { return m.openBehavior(ctx) } +func (m *mockPortForwarder) Close() {} + func rawEnvoyConfig(t *testing.T) []byte { - configDump, err := os.ReadFile(fmt.Sprintf("testdata/%s", testConfigDump)) + configDump, err := fs.ReadFile(testConfigDump) require.NoError(t, err) - clusters, err := os.ReadFile(fmt.Sprintf("testdata/%s", testClusters)) + clusters, err := fs.ReadFile(testClusters) require.NoError(t, err) return []byte(fmt.Sprintf("{\n\"config_dump\":%s,\n\"clusters\":%s}", string(configDump), string(clusters))) @@ -518,18 +508,3 @@ var testEnvoyConfig = &EnvoyConfig{ }, }, } - -type mockPortForwarder struct { - openBehavior func(context.Context) (string, error) -} - -func (m *mockPortForwarder) Open(ctx context.Context) (string, error) { return m.openBehavior(ctx) } -func (m *mockPortForwarder) Close() {} - -func testLogConfig() map[string]string { - cfg := make(map[string]string, len(EnvoyLoggers)) - for k := range EnvoyLoggers { - cfg[k] = "debug" - } - return cfg -} diff --git a/cli/common/envoy/types.go b/cli/cmd/proxy/read/envoy_types.go similarity index 99% rename from cli/common/envoy/types.go rename to cli/cmd/proxy/read/envoy_types.go index 505248e983..cc1ffcf7e2 100644 --- a/cli/common/envoy/types.go +++ b/cli/cmd/proxy/read/envoy_types.go @@ -1,4 +1,4 @@ -package envoy +package read /* Envoy Types These types are based on the JSON returned from the Envoy Config Dump API on the diff --git a/cli/cmd/proxy/read/filters.go b/cli/cmd/proxy/read/filters.go index ab3e21b4d8..dc65172f32 100644 --- a/cli/cmd/proxy/read/filters.go +++ b/cli/cmd/proxy/read/filters.go @@ -3,8 +3,6 @@ package read import ( "strconv" "strings" - - "github.com/hashicorp/consul-k8s/cli/common/envoy" ) // FilterClusters takes a slice of clusters along with parameters for filtering @@ -19,7 +17,7 @@ import ( // // The filters are applied in combination such that a cluster must adhere to // all of the filtering values which are passed in. -func FilterClusters(clusters []envoy.Cluster, fqdn, address string, port int) []envoy.Cluster { +func FilterClusters(clusters []Cluster, fqdn, address string, port int) []Cluster { // No filtering no-op. if fqdn == "" && address == "" && port == -1 { return clusters @@ -27,7 +25,7 @@ func FilterClusters(clusters []envoy.Cluster, fqdn, address string, port int) [] portStr := ":" + strconv.Itoa(port) - filtered := make([]envoy.Cluster, 0) + filtered := make([]Cluster, 0) for _, cluster := range clusters { if !strings.Contains(cluster.FullyQualifiedDomainName, fqdn) { continue @@ -60,14 +58,14 @@ func FilterClusters(clusters []envoy.Cluster, fqdn, address string, port int) [] // // The filters are applied in combination such that an endpoint must adhere to // all of the filtering values which are passed in. -func FilterEndpoints(endpoints []envoy.Endpoint, address string, port int) []envoy.Endpoint { +func FilterEndpoints(endpoints []Endpoint, address string, port int) []Endpoint { if address == "" && port == -1 { return endpoints } portStr := ":" + strconv.Itoa(port) - filtered := make([]envoy.Endpoint, 0) + filtered := make([]Endpoint, 0) for _, endpoint := range endpoints { if strings.Contains(endpoint.Address, address) && (port == -1 || strings.Contains(endpoint.Address, portStr)) { filtered = append(filtered, endpoint) @@ -87,14 +85,14 @@ func FilterEndpoints(endpoints []envoy.Endpoint, address string, port int) []env // // The filters are applied in combination such that an listener must adhere to // all of the filtering values which are passed in. -func FilterListeners(listeners []envoy.Listener, address string, port int) []envoy.Listener { +func FilterListeners(listeners []Listener, address string, port int) []Listener { if address == "" && port == -1 { return listeners } portStr := ":" + strconv.Itoa(port) - filtered := make([]envoy.Listener, 0) + filtered := make([]Listener, 0) for _, listener := range listeners { if strings.Contains(listener.Address, address) && (port == -1 || strings.Contains(listener.Address, portStr)) { filtered = append(filtered, listener) diff --git a/cli/cmd/proxy/read/filters_test.go b/cli/cmd/proxy/read/filters_test.go index 7340b6ae0d..48ff3a97da 100644 --- a/cli/cmd/proxy/read/filters_test.go +++ b/cli/cmd/proxy/read/filters_test.go @@ -4,12 +4,10 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul-k8s/cli/common/envoy" ) func TestFilterClusters(t *testing.T) { - given := []envoy.Cluster{ + given := []Cluster{ { FullyQualifiedDomainName: "local_agent", Endpoints: []string{"192.168.79.187:8502"}, @@ -44,13 +42,13 @@ func TestFilterClusters(t *testing.T) { fqdn string address string port int - expected []envoy.Cluster + expected []Cluster }{ "No filter": { fqdn: "", address: "", port: -1, - expected: []envoy.Cluster{ + expected: []Cluster{ { FullyQualifiedDomainName: "local_agent", Endpoints: []string{"192.168.79.187:8502"}, @@ -85,7 +83,7 @@ func TestFilterClusters(t *testing.T) { fqdn: "default", address: "", port: -1, - expected: []envoy.Cluster{ + expected: []Cluster{ { FullyQualifiedDomainName: "client.default.dc1.internal.bc3815c2-1a0f-f3ff-a2e9-20d791f08d00.consul", Endpoints: []string{}, @@ -104,7 +102,7 @@ func TestFilterClusters(t *testing.T) { fqdn: "", address: "127.0.", port: -1, - expected: []envoy.Cluster{ + expected: []Cluster{ { FullyQualifiedDomainName: "local_app", Endpoints: []string{"127.0.0.1:8080"}, @@ -119,7 +117,7 @@ func TestFilterClusters(t *testing.T) { fqdn: "", address: "", port: 8080, - expected: []envoy.Cluster{ + expected: []Cluster{ { FullyQualifiedDomainName: "local_app", Endpoints: []string{"127.0.0.1:8080"}, @@ -134,7 +132,7 @@ func TestFilterClusters(t *testing.T) { fqdn: "local", address: "127.0.0.1", port: -1, - expected: []envoy.Cluster{ + expected: []Cluster{ { FullyQualifiedDomainName: "local_app", Endpoints: []string{"127.0.0.1:8080"}, @@ -149,7 +147,7 @@ func TestFilterClusters(t *testing.T) { fqdn: "local", address: "", port: 8080, - expected: []envoy.Cluster{ + expected: []Cluster{ { FullyQualifiedDomainName: "local_app", Endpoints: []string{"127.0.0.1:8080"}, @@ -160,7 +158,7 @@ func TestFilterClusters(t *testing.T) { fqdn: "", address: "127.0.0.1", port: 8080, - expected: []envoy.Cluster{ + expected: []Cluster{ { FullyQualifiedDomainName: "local_app", Endpoints: []string{"127.0.0.1:8080"}, @@ -171,7 +169,7 @@ func TestFilterClusters(t *testing.T) { fqdn: "local", address: "192.168.79.187", port: 8502, - expected: []envoy.Cluster{ + expected: []Cluster{ { FullyQualifiedDomainName: "local_agent", Endpoints: []string{"192.168.79.187:8502"}, @@ -189,7 +187,7 @@ func TestFilterClusters(t *testing.T) { } func TestFilterEndpoints(t *testing.T) { - given := []envoy.Endpoint{ + given := []Endpoint{ { Address: "192.168.79.187:8502", }, @@ -210,12 +208,12 @@ func TestFilterEndpoints(t *testing.T) { cases := map[string]struct { address string port int - expected []envoy.Endpoint + expected []Endpoint }{ "No filter": { address: "", port: -1, - expected: []envoy.Endpoint{ + expected: []Endpoint{ { Address: "192.168.79.187:8502", }, @@ -236,7 +234,7 @@ func TestFilterEndpoints(t *testing.T) { "Filter address": { address: "127.0.0.1", port: -1, - expected: []envoy.Endpoint{ + expected: []Endpoint{ { Address: "127.0.0.1:8080", }, @@ -245,7 +243,7 @@ func TestFilterEndpoints(t *testing.T) { "Filter port": { address: "", port: 20000, - expected: []envoy.Endpoint{ + expected: []Endpoint{ { Address: "192.168.31.201:20000", }, @@ -260,7 +258,7 @@ func TestFilterEndpoints(t *testing.T) { "Filter address and port": { address: "235", port: 20000, - expected: []envoy.Endpoint{ + expected: []Endpoint{ { Address: "192.168.47.235:20000", }, @@ -277,7 +275,7 @@ func TestFilterEndpoints(t *testing.T) { } func TestFilterListeners(t *testing.T) { - given := []envoy.Listener{ + given := []Listener{ { Address: "192.168.69.179:20000", }, @@ -289,12 +287,12 @@ func TestFilterListeners(t *testing.T) { cases := map[string]struct { address string port int - expected []envoy.Listener + expected []Listener }{ "No filter": { address: "", port: -1, - expected: []envoy.Listener{ + expected: []Listener{ { Address: "192.168.69.179:20000", }, @@ -306,7 +304,7 @@ func TestFilterListeners(t *testing.T) { "Filter address": { address: "127.0.0.1", port: -1, - expected: []envoy.Listener{ + expected: []Listener{ { Address: "127.0.0.1:15001", }, @@ -315,7 +313,7 @@ func TestFilterListeners(t *testing.T) { "Filter port": { address: "", port: 20000, - expected: []envoy.Listener{ + expected: []Listener{ { Address: "192.168.69.179:20000", }, @@ -324,7 +322,7 @@ func TestFilterListeners(t *testing.T) { "Filter address and port": { address: "192.168.69.179", port: 20000, - expected: []envoy.Listener{ + expected: []Listener{ { Address: "192.168.69.179:20000", }, diff --git a/cli/cmd/proxy/read/format.go b/cli/cmd/proxy/read/format.go index 90137db6a5..97d5ada86a 100644 --- a/cli/cmd/proxy/read/format.go +++ b/cli/cmd/proxy/read/format.go @@ -4,23 +4,20 @@ import ( "fmt" "strings" - "github.com/hashicorp/consul-k8s/cli/common/envoy" "github.com/hashicorp/consul-k8s/cli/common/terminal" ) -func formatClusters(clusters []envoy.Cluster) *terminal.Table { +func formatClusters(clusters []Cluster) *terminal.Table { table := terminal.NewTable("Name", "FQDN", "Endpoints", "Type", "Last Updated") for _, cluster := range clusters { - table.AddRow([]string{ - cluster.Name, cluster.FullyQualifiedDomainName, strings.Join(cluster.Endpoints, ", "), - cluster.Type, cluster.LastUpdated, - }, []string{}) + table.AddRow([]string{cluster.Name, cluster.FullyQualifiedDomainName, strings.Join(cluster.Endpoints, ", "), + cluster.Type, cluster.LastUpdated}, []string{}) } return table } -func formatEndpoints(endpoints []envoy.Endpoint) *terminal.Table { +func formatEndpoints(endpoints []Endpoint) *terminal.Table { table := terminal.NewTable("Address:Port", "Cluster", "Weight", "Status") for _, endpoint := range endpoints { var statusColor string @@ -38,7 +35,7 @@ func formatEndpoints(endpoints []envoy.Endpoint) *terminal.Table { return table } -func formatListeners(listeners []envoy.Listener) *terminal.Table { +func formatListeners(listeners []Listener) *terminal.Table { table := terminal.NewTable("Name", "Address:Port", "Direction", "Filter Chain Match", "Filters", "Last Updated") for _, listener := range listeners { for index, filter := range listener.FilterChain { @@ -60,7 +57,7 @@ func formatListeners(listeners []envoy.Listener) *terminal.Table { return table } -func formatRoutes(routes []envoy.Route) *terminal.Table { +func formatRoutes(routes []Route) *terminal.Table { table := terminal.NewTable("Name", "Destination Cluster", "Last Updated") for _, route := range routes { table.AddRow([]string{route.Name, route.DestinationCluster, route.LastUpdated}, []string{}) @@ -69,7 +66,7 @@ func formatRoutes(routes []envoy.Route) *terminal.Table { return table } -func formatSecrets(secrets []envoy.Secret) *terminal.Table { +func formatSecrets(secrets []Secret) *terminal.Table { table := terminal.NewTable("Name", "Type", "Last Updated") for _, secret := range secrets { table.AddRow([]string{secret.Name, secret.Type, secret.LastUpdated}, []string{}) diff --git a/cli/cmd/proxy/read/format_test.go b/cli/cmd/proxy/read/format_test.go index 95c562a0dd..7d6f975d39 100644 --- a/cli/cmd/proxy/read/format_test.go +++ b/cli/cmd/proxy/read/format_test.go @@ -5,10 +5,8 @@ import ( "context" "testing" - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul-k8s/cli/common/envoy" "github.com/hashicorp/consul-k8s/cli/common/terminal" + "github.com/stretchr/testify/require" ) func TestFormatClusters(t *testing.T) { @@ -23,7 +21,7 @@ func TestFormatClusters(t *testing.T) { "server.*server.default.dc1.internal.bc3815c2-1a0f-f3ff-a2e9-20d791f08d00.consul.*EDS.*2022-06-09T00:39:12\\.754Z", } - given := []envoy.Cluster{ + given := []Cluster{ { Name: "local_agent", FullyQualifiedDomainName: "local_agent", @@ -99,7 +97,7 @@ func TestFormatEndpoints(t *testing.T) { "192.168.65.131:20000.*1.00.*HEALTHY", } - given := []envoy.Endpoint{ + given := []Endpoint{ { Address: "192.168.79.187:8502", Cluster: "local_agent", @@ -176,11 +174,11 @@ func TestFormatListeners(t *testing.T) { "Any.*-> original-destination", } - given := []envoy.Listener{ + given := []Listener{ { Name: "public_listener", Address: "192.168.69.179:20000", - FilterChain: []envoy.FilterChain{ + FilterChain: []FilterChain{ { FilterChainMatch: "Any", Filters: []string{"* -> local_app/"}, @@ -192,7 +190,7 @@ func TestFormatListeners(t *testing.T) { { Name: "outbound_listener", Address: "127.0.0.1:15001", - FilterChain: []envoy.FilterChain{ + FilterChain: []FilterChain{ { FilterChainMatch: "10.100.134.173/32, 240.0.0.3/32", Filters: []string{"-> client.default.dc1.internal.bc3815c2-1a0f-f3ff-a2e9-20d791f08d00.consul"}, @@ -247,7 +245,7 @@ func TestFormatRoutes(t *testing.T) { "server.*server\\.default\\.dc1\\.internal\\.bc3815c2-1a0f-f3ff-a2e9-20d791f08d00\\.consul/.*2022-05-24T17:41:59\\.078Z", } - given := []envoy.Route{ + given := []Route{ { Name: "public_listener", DestinationCluster: "local_app/", @@ -284,7 +282,7 @@ func TestFormatSecrets(t *testing.T) { "ROOTCA.*Dynamic Warming.*2022-03-15T05:14:22.868Z", } - given := []envoy.Secret{ + given := []Secret{ { Name: "default", Type: "Dynamic Active", diff --git a/cli/common/envoy/testdata/test_clusters.json b/cli/cmd/proxy/read/test_clusters.json similarity index 100% rename from cli/common/envoy/testdata/test_clusters.json rename to cli/cmd/proxy/read/test_clusters.json diff --git a/cli/common/envoy/testdata/test_config_dump.json b/cli/cmd/proxy/read/test_config_dump.json similarity index 100% rename from cli/common/envoy/testdata/test_config_dump.json rename to cli/cmd/proxy/read/test_config_dump.json diff --git a/cli/cmd/status/status.go b/cli/cmd/status/status.go index c2108cc631..19f5a52398 100644 --- a/cli/cmd/status/status.go +++ b/cli/cmd/status/status.go @@ -28,8 +28,6 @@ const ( type Command struct { *common.BaseCommand - helmActionsRunner helm.HelmActionsRunner - kubernetes kubernetes.Interface set *flag.Sets @@ -65,11 +63,10 @@ func (c *Command) init() { // Run checks the status of a Consul installation on Kubernetes. func (c *Command) Run(args []string) int { c.once.Do(c.init) - if c.helmActionsRunner == nil { - c.helmActionsRunner = &helm.ActionRunner{} - } + // The logger is initialized in main with the name cli. Here, we reset the name to status so log lines would be prefixed with status. c.Log.ResetNamed("status") + defer common.CloseWithError(c.BaseCommand) if err := c.set.Parse(args); err != nil { @@ -104,11 +101,7 @@ func (c *Command) Run(args []string) int { c.UI.Output("Consul Status Summary", terminal.WithHeaderStyle()) - _, releaseName, namespace, err := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.DefaultReleaseName, - DebugLog: uiLogger, - }) + releaseName, namespace, err := common.CheckForInstallations(settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 @@ -119,9 +112,18 @@ func (c *Command) Run(args []string) int { return 1 } - if err := c.checkConsulServers(namespace); err != nil { - c.UI.Output("Unable to check Kubernetes cluster for Consul servers: %v", err) + if s, err := c.checkConsulServers(namespace); err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 + } else { + c.UI.Output(s, terminal.WithSuccessStyle()) + } + + if s, err := c.checkConsulClients(namespace); err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } else { + c.UI.Output(s, terminal.WithSuccessStyle()) } return 0 @@ -163,7 +165,7 @@ func (c *Command) checkHelmInstallation(settings *helmCLI.EnvSettings, uiLogger } statuser := action.NewStatus(statusConfig) - rel, err := c.helmActionsRunner.GetStatus(statuser, releaseName) + rel, err := statuser.Run(releaseName) if err != nil { return fmt.Errorf("couldn't check for installations: %s", err) } @@ -214,24 +216,43 @@ func validEvent(events []release.HookEvent) bool { return false } -// checkConsulServers prints the status of Consul servers if they -// are expected to be found in the Kubernetes cluster. It does not check for -// server status if they are not running within the Kubernetes cluster. -func (c *Command) checkConsulServers(namespace string) error { - servers, err := c.kubernetes.AppsV1().StatefulSets(namespace).List(c.Ctx, metav1.ListOptions{LabelSelector: "app=consul,chart=consul-helm,component=server"}) +// checkConsulServers uses the Kubernetes list function to report if the consul servers are healthy. +func (c *Command) checkConsulServers(namespace string) (string, error) { + servers, err := c.kubernetes.AppsV1().StatefulSets(namespace).List(c.Ctx, + metav1.ListOptions{LabelSelector: "app=consul,chart=consul-helm,component=server"}) if err != nil { - return err + return "", err + } else if len(servers.Items) == 0 { + return "", errors.New("no server stateful set found") + } else if len(servers.Items) > 1 { + return "", errors.New("found multiple server stateful sets") } - if len(servers.Items) != 0 { - desiredServers, readyServers := int(*servers.Items[0].Spec.Replicas), int(servers.Items[0].Status.ReadyReplicas) - if readyServers < desiredServers { - c.UI.Output("Consul servers healthy %d/%d", readyServers, desiredServers, terminal.WithErrorStyle()) - } else { - c.UI.Output("Consul servers healthy %d/%d", readyServers, desiredServers) - } + + desiredReplicas := int(*servers.Items[0].Spec.Replicas) + readyReplicas := int(servers.Items[0].Status.ReadyReplicas) + if readyReplicas < desiredReplicas { + return "", fmt.Errorf("%d/%d Consul servers unhealthy", desiredReplicas-readyReplicas, desiredReplicas) } + return fmt.Sprintf("Consul servers healthy (%d/%d)", readyReplicas, desiredReplicas), nil +} - return nil +// checkConsulClients uses the Kubernetes list function to report if the consul clients are healthy. +func (c *Command) checkConsulClients(namespace string) (string, error) { + clients, err := c.kubernetes.AppsV1().DaemonSets(namespace).List(c.Ctx, + metav1.ListOptions{LabelSelector: "app=consul,chart=consul-helm"}) + if err != nil { + return "", err + } else if len(clients.Items) == 0 { + return "", errors.New("no client daemon set found") + } else if len(clients.Items) > 1 { + return "", errors.New("found multiple client daemon sets") + } + desiredReplicas := int(clients.Items[0].Status.DesiredNumberScheduled) + readyReplicas := int(clients.Items[0].Status.NumberReady) + if readyReplicas < desiredReplicas { + return "", fmt.Errorf("%d/%d Consul clients unhealthy", desiredReplicas-readyReplicas, desiredReplicas) + } + return fmt.Sprintf("Consul clients healthy (%d/%d)", readyReplicas, desiredReplicas), nil } // setupKubeClient to use for non Helm SDK calls to the Kubernetes API The Helm SDK will use diff --git a/cli/cmd/status/status_test.go b/cli/cmd/status/status_test.go index 8666fd8493..b45ffef556 100644 --- a/cli/cmd/status/status_test.go +++ b/cli/cmd/status/status_test.go @@ -1,215 +1,197 @@ package status import ( - "bytes" "context" - "errors" "flag" "fmt" - "io" "os" "testing" "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/consul-k8s/cli/helm" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmRelease "helm.sh/helm/v3/pkg/release" - helmTime "helm.sh/helm/v3/pkg/time" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) +// TestCheckConsulServers creates a fake stateful set and tests the checkConsulServers function. func TestCheckConsulServers(t *testing.T) { - namespace := "default" - cases := map[string]struct { - desired int - healthy int - }{ - "No servers": {0, 0}, - "3 servers expected, 1 healthy": {3, 1}, - "3 servers expected, 3 healthy": {3, 3}, + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() + + // First check that no stateful sets causes an error. + _, err := c.checkConsulServers("default") + require.Error(t, err) + require.Contains(t, err.Error(), "no server stateful set found") + + // Next create a stateful set with 3 desired replicas and 3 ready replicas. + var replicas int32 = 3 + + ss := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test1", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + }, } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - c.kubernetes = fake.NewSimpleClientset() - - // Deploy servers - err := createServers("consul-servers", namespace, int32(tc.desired), int32(tc.healthy), c.kubernetes) - require.NoError(t, err) - - // Verify that the correct server statuses are seen. - err = c.checkConsulServers(namespace) - require.NoError(t, err) - - actual := buf.String() - if tc.desired != 0 { - require.Contains(t, actual, fmt.Sprintf("Consul servers healthy %d/%d", tc.healthy, tc.desired)) - } - buf.Reset() - }) + c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss, metav1.CreateOptions{}) + + // Now we run the checkConsulServers() function and it should succeed. + s, err := c.checkConsulServers("default") + require.NoError(t, err) + require.Equal(t, "Consul servers healthy (3/3)", s) + + // If you then create another stateful set it should error. + ss2 := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test2", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + }, } + c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss2, metav1.CreateOptions{}) + + _, err = c.checkConsulServers("default") + require.Error(t, err) + require.Contains(t, err.Error(), "found multiple server stateful sets") + + // Clear out the client and now run a test where the stateful set isn't ready. + c.kubernetes = fake.NewSimpleClientset() + + ss3 := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-server-test3", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas - 1, // Let's just set one of the servers to unhealthy + }, + } + c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss3, metav1.CreateOptions{}) + + _, err = c.checkConsulServers("default") + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("%d/%d Consul servers unhealthy", 1, replicas)) } -// TestStatus creates a fake stateful set and tests the checkConsulServers function. -func TestStatus(t *testing.T) { - nowTime := helmTime.Now() - timezone, _ := nowTime.Zone() - notImeStr := nowTime.Format("2006/01/02 15:04:05") + " " + timezone - cases := map[string]struct { - input []string - messages []string - preProcessingFunc func(k8s kubernetes.Interface) error - helmActionsRunner *helm.MockActionRunner - expectedReturnCode int - }{ - "status with servers returns success": { - input: []string{}, - messages: []string{ - fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr), - "\n==> Config:\n {}\n \nConsul servers healthy 3/3\n", - }, - preProcessingFunc: func(k8s kubernetes.Interface) error { - return createServers("consul-server-test1", "consul", 3, 3, k8s) - }, - - helmActionsRunner: &helm.MockActionRunner{ - GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { - return &helmRelease.Release{ - Name: "consul", Namespace: "consul", - Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"}, - Chart: &chart.Chart{ - Metadata: &chart.Metadata{ - Version: "1.0.0", - }, - }, - Config: make(map[string]interface{})}, nil - }, - }, - expectedReturnCode: 0, +// TestCheckConsulClients is very similar to TestCheckConsulServers() in structure. +func TestCheckConsulClients(t *testing.T) { + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() + + // No client daemon set should cause an error. + _, err := c.checkConsulClients("default") + require.Error(t, err) + require.Contains(t, err.Error(), "no client daemon set found") + + // Next create a daemon set. + var desired int32 = 3 + + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-client-test1", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm"}, }, - "status with pre-install and pre-upgrade hooks returns success and outputs hook status": { - input: []string{}, - messages: []string{ - fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr), - "\n==> Config:\n {}\n \n", - "\n==> Status Of Helm Hooks:\npre-install-hook pre-install: Succeeded\npre-upgrade-hook pre-upgrade: Succeeded\nConsul servers healthy 3/3\n", - }, - preProcessingFunc: func(k8s kubernetes.Interface) error { - return createServers("consul-server-test1", "consul", 3, 3, k8s) - }, - - helmActionsRunner: &helm.MockActionRunner{ - GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { - return &helmRelease.Release{ - Name: "consul", Namespace: "consul", - Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"}, - Chart: &chart.Chart{ - Metadata: &chart.Metadata{ - Version: "1.0.0", - }, - }, - Config: make(map[string]interface{}), - Hooks: []*helmRelease.Hook{ - { - Name: "pre-install-hook", - Kind: "pre-install", LastRun: helmRelease.HookExecution{ - Phase: helmRelease.HookPhaseSucceeded, - }, - Events: []helmRelease.HookEvent{ - "pre-install", - }, - }, - { - Name: "pre-upgrade-hook", - Kind: "pre-upgrade", LastRun: helmRelease.HookExecution{ - Phase: helmRelease.HookPhaseSucceeded, - }, - Events: []helmRelease.HookEvent{ - "pre-install", - }, - }, - { - Name: "post-delete-hook", - Kind: "post-delete", LastRun: helmRelease.HookExecution{ - Phase: helmRelease.HookPhaseSucceeded, - }, - Events: []helmRelease.HookEvent{ - "post-delete", - }, - }, - }}, nil - }, - }, - expectedReturnCode: 0, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: desired, + NumberReady: desired, }, - "status with CheckForInstallations error returns ": { - input: []string{}, - messages: []string{ - "\n==> Consul Status Summary\n ! kaboom!\n", - }, - preProcessingFunc: func(k8s kubernetes.Interface) error { - return createServers("consul-server-test1", "consul", 3, 3, k8s) - }, - - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - return false, "", "", errors.New("kaboom!") - }, - }, - expectedReturnCode: 1, + } + + c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds, metav1.CreateOptions{}) + + // Now run checkConsulClients() and make sure it succeeds. + s, err := c.checkConsulClients("default") + require.NoError(t, err) + require.Equal(t, "Consul clients healthy (3/3)", s) + + // Creating another daemon set should cause an error. + ds2 := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-client-test2", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm"}, }, - "status with GetStatus error returns ": { - input: []string{}, - messages: []string{ - "\n==> Consul Status Summary\n ! couldn't check for installations: kaboom!\n", - }, - preProcessingFunc: func(k8s kubernetes.Interface) error { - return createServers("consul-server-test1", "consul", 3, 3, k8s) - }, - - helmActionsRunner: &helm.MockActionRunner{ - GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { - return nil, errors.New("kaboom!") - }, - }, - expectedReturnCode: 1, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: desired, + NumberReady: desired, }, } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - c.kubernetes = fake.NewSimpleClientset() - c.helmActionsRunner = tc.helmActionsRunner - if tc.preProcessingFunc != nil { - err := tc.preProcessingFunc(c.kubernetes) - require.NoError(t, err) - } - returnCode := c.Run([]string{}) - require.Equal(t, tc.expectedReturnCode, returnCode) - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) + c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds2, metav1.CreateOptions{}) + + _, err = c.checkConsulClients("default") + require.Error(t, err) + require.Contains(t, err.Error(), "found multiple client daemon sets") + + // Clear out the client and run a test with fewer than desired daemon sets ready. + c.kubernetes = fake.NewSimpleClientset() + + ds3 := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-client-test2", + Namespace: "default", + Labels: map[string]string{"app": "consul", "chart": "consul-helm"}, + }, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: desired, + NumberReady: desired - 1, + }, + } + c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds3, metav1.CreateOptions{}) + + _, err = c.checkConsulClients("default") + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("%d/%d Consul clients unhealthy", 1, desired)) +} + +// getInitializedCommand sets up a command struct for tests. +func getInitializedCommand(t *testing.T) *Command { + t.Helper() + log := hclog.New(&hclog.LoggerOptions{ + Name: "cli", + Level: hclog.Info, + Output: os.Stdout, + }) + + baseCommand := &common.BaseCommand{ + Log: log, } + + c := &Command{ + BaseCommand: baseCommand, + } + c.init() + return c } func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { t.Parallel() - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) predictor := cmd.AutocompleteFlags() @@ -232,52 +214,7 @@ func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { } func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -// getInitializedCommand sets up a command struct for tests. -func getInitializedCommand(t *testing.T, buf io.Writer) *Command { - t.Helper() - log := hclog.New(&hclog.LoggerOptions{ - Name: "cli", - Level: hclog.Info, - Output: os.Stdout, - }) - var ui terminal.UI - if buf != nil { - ui = terminal.NewUI(context.Background(), buf) - } else { - ui = terminal.NewBasicUI(context.Background()) - } - baseCommand := &common.BaseCommand{ - Log: log, - UI: ui, - } - - c := &Command{ - BaseCommand: baseCommand, - } - c.init() - return c -} - -func createServers(name, namespace string, replicas, readyReplicas int32, k8s kubernetes.Interface) error { - servers := appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - }, - Status: appsv1.StatefulSetStatus{ - Replicas: replicas, - ReadyReplicas: readyReplicas, - }, - } - _, err := k8s.AppsV1().StatefulSets(namespace).Create(context.Background(), &servers, metav1.CreateOptions{}) - return err -} diff --git a/cli/cmd/troubleshoot/command.go b/cli/cmd/troubleshoot/command.go deleted file mode 100644 index d37c66e998..0000000000 --- a/cli/cmd/troubleshoot/command.go +++ /dev/null @@ -1,26 +0,0 @@ -package troubleshoot - -import ( - "fmt" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/mitchellh/cli" -) - -// TroubleshootCommand provides a synopsis for the troubleshoot subcommands (e.g. proxy, upstreams). -type TroubleshootCommand struct { - *common.BaseCommand -} - -// Run prints out information about the subcommands. -func (c *TroubleshootCommand) Run([]string) int { - return cli.RunResultHelp -} - -func (c *TroubleshootCommand) Help() string { - return fmt.Sprintf("%s\n\nUsage: consul-k8s troubleshoot ", c.Synopsis()) -} - -func (c *TroubleshootCommand) Synopsis() string { - return "Troubleshoot network and security configurations." -} diff --git a/cli/cmd/troubleshoot/proxy/proxy.go b/cli/cmd/troubleshoot/proxy/proxy.go deleted file mode 100644 index 624757fad3..0000000000 --- a/cli/cmd/troubleshoot/proxy/proxy.go +++ /dev/null @@ -1,282 +0,0 @@ -package proxy - -import ( - "fmt" - "net" - "strings" - "sync" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - troubleshoot "github.com/hashicorp/consul/troubleshoot/proxy" - "github.com/posener/complete" - helmCLI "helm.sh/helm/v3/pkg/cli" - "k8s.io/apimachinery/pkg/api/validation" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -const ( - defaultAdminPort int = 19000 - flagNameKubeConfig = "kubeconfig" - flagNameKubeContext = "context" - flagNameNamespace = "namespace" - flagNamePod = "pod" - flagNameUpstreamEnvoyID = "upstream-envoy-id" - flagNameUpstreamIP = "upstream-ip" - DebugColor = "\033[0;36m%s\033[0m" -) - -type ProxyCommand struct { - *common.BaseCommand - - kubernetes kubernetes.Interface - - set *flag.Sets - - flagKubeConfig string - flagKubeContext string - flagNamespace string - - flagPod string - flagUpstreamEnvoyID string - flagUpstreamIP string - - restConfig *rest.Config - - once sync.Once - help string -} - -// init sets up flags and help text for the command. -func (c *ProxyCommand) init() { - c.set = flag.NewSets() - f := c.set.NewSet("Command Options") - - f.StringVar(&flag.StringVar{ - Name: flagNamePod, - Target: &c.flagPod, - Usage: "The pod to port-forward to.", - Aliases: []string{"p"}, - }) - - f.StringVar(&flag.StringVar{ - Name: flagNameUpstreamEnvoyID, - Target: &c.flagUpstreamEnvoyID, - Usage: "The envoy identifier of the upstream service that receives the communication. (explicit upstreams only)", - Aliases: []string{"id"}, - }) - - f.StringVar(&flag.StringVar{ - Name: flagNameUpstreamIP, - Target: &c.flagUpstreamIP, - Usage: "The IP address of the upstream service that receives the communication. (transparent proxy only)", - Aliases: []string{"ip"}, - }) - - f = c.set.NewSet("Global Options") - f.StringVar(&flag.StringVar{ - Name: flagNameKubeConfig, - Aliases: []string{"c"}, - Target: &c.flagKubeConfig, - Default: "", - Usage: "Set the path to kubeconfig file.", - }) - f.StringVar(&flag.StringVar{ - Name: flagNameKubeContext, - Target: &c.flagKubeContext, - Default: "", - Usage: "Set the Kubernetes context to use.", - }) - - f.StringVar(&flag.StringVar{ - Name: flagNameNamespace, - Target: &c.flagNamespace, - Usage: "The namespace the pod is in.", - Aliases: []string{"n"}, - }) - - c.help = c.set.Help() -} - -// Run executes the list command. -func (c *ProxyCommand) Run(args []string) int { - c.once.Do(c.init) - c.Log.ResetNamed("list") - defer common.CloseWithError(c.BaseCommand) - - // Parse the command line flags. - if err := c.set.Parse(args); err != nil { - c.UI.Output("Error parsing arguments: %v", err.Error(), terminal.WithErrorStyle()) - return 1 - } - - // Validate the command line flags. - if err := c.validateFlags(); err != nil { - c.UI.Output("Invalid argument: %v", err.Error(), terminal.WithErrorStyle()) - return 1 - } - - if c.kubernetes == nil { - if err := c.initKubernetes(); err != nil { - c.UI.Output("Error initializing Kubernetes client: %v", err.Error(), terminal.WithErrorStyle()) - return 1 - } - } - - if err := c.Troubleshoot(); err != nil { - c.UI.Output("Error running troubleshoot: %v", err.Error(), terminal.WithErrorStyle()) - return 1 - } - - return 0 -} - -// validateFlags ensures that the flags passed in by the can be used. -func (c *ProxyCommand) validateFlags() error { - - if (c.flagUpstreamEnvoyID == "" && c.flagUpstreamIP == "") || (c.flagUpstreamEnvoyID != "" && c.flagUpstreamIP != "") { - return fmt.Errorf("-upstream-envoy-id OR -upstream-ip is required.\n Please run `consul troubleshoot upstreams` to find the corresponding upstream.") - } - - if c.flagPod == "" { - return fmt.Errorf("-pod flag is required") - } - - if errs := validation.ValidateNamespaceName(c.flagNamespace, false); c.flagNamespace != "" && len(errs) > 0 { - return fmt.Errorf("invalid namespace name passed for -namespace/-n: %v", strings.Join(errs, "; ")) - } - - return nil -} - -// initKubernetes initializes the Kubernetes client. -func (c *ProxyCommand) initKubernetes() (err error) { - settings := helmCLI.New() - - if c.flagKubeConfig != "" { - settings.KubeConfig = c.flagKubeConfig - } - - if c.flagKubeContext != "" { - settings.KubeContext = c.flagKubeContext - } - - if c.restConfig == nil { - if c.restConfig, err = settings.RESTClientGetter().ToRESTConfig(); err != nil { - return fmt.Errorf("error creating Kubernetes REST config %v", err) - } - } - - if c.kubernetes == nil { - if c.kubernetes, err = kubernetes.NewForConfig(c.restConfig); err != nil { - return fmt.Errorf("error creating Kubernetes client %v", err) - } - } - - if c.flagNamespace == "" { - c.flagNamespace = settings.Namespace() - } - - return nil -} - -func (c *ProxyCommand) Troubleshoot() error { - pf := common.PortForward{ - Namespace: c.flagNamespace, - PodName: c.flagPod, - RemotePort: defaultAdminPort, - KubeClient: c.kubernetes, - RestConfig: c.restConfig, - } - - endpoint, err := pf.Open(c.Ctx) - if err != nil { - return err - } - defer pf.Close() - - adminAddr, adminPort, err := net.SplitHostPort(endpoint) - if err != nil { - return err - } - - adminAddrIP, err := net.ResolveIPAddr("ip", adminAddr) - if err != nil { - return err - } - - t, err := troubleshoot.NewTroubleshoot(adminAddrIP, adminPort) - if err != nil { - return err - } - - // err = t.GetEnvoyConfigDump() - // if err != nil { - // return err - // } - - messages, err := t.RunAllTests(c.flagUpstreamEnvoyID, c.flagUpstreamIP) - if err != nil { - return err - } - - c.UI.Output("Validation", terminal.WithHeaderStyle()) - for _, o := range messages { - if o.Success { - c.UI.Output(o.Message, terminal.WithSuccessStyle()) - } else { - c.UI.Output(o.Message, terminal.WithErrorStyle()) - for _, action := range o.PossibleActions { - c.UI.Output(fmt.Sprintf("-> %s", action), terminal.WithInfoStyle()) - } - } - } - - return nil -} - -// AutocompleteFlags returns a mapping of supported flags and autocomplete -// options for this command. The map key for the Flags map should be the -// complete flag such as "-foo" or "--foo". -func (c *ProxyCommand) AutocompleteFlags() complete.Flags { - return complete.Flags{ - fmt.Sprintf("-%s", flagNameNamespace): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameKubeConfig): complete.PredictFiles("*"), - fmt.Sprintf("-%s", flagNameKubeContext): complete.PredictNothing, - } -} - -// AutocompleteArgs returns the argument predictor for this command. -// Since argument completion is not supported, this will return -// complete.PredictNothing. -func (c *ProxyCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} - -func (c *ProxyCommand) Synopsis() string { - return synopsis -} - -func (c *ProxyCommand) Help() string { - return help -} - -const ( - synopsis = "Troubleshoots service mesh issues." - help = ` -Usage: consul-k8s troubleshoot proxy [options] - - Connect to a pod with a proxy and troubleshoots service mesh communication issues. - - Requires a pod and upstream service SNI. - - Examples: - $ consul-k8s troubleshoot proxy -pod pod1 -upstream foo - - where 'pod1' is the pod running a consul proxy and 'foo' is the upstream envoy ID which - can be obtained by running: - $ consul-k8s troubleshoot upstreams [options] -` -) diff --git a/cli/cmd/troubleshoot/proxy/proxy_test.go b/cli/cmd/troubleshoot/proxy/proxy_test.go deleted file mode 100644 index 784cc7a136..0000000000 --- a/cli/cmd/troubleshoot/proxy/proxy_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package proxy - -import ( - "bytes" - "context" - "io" - "os" - "testing" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" - "k8s.io/client-go/kubernetes/fake" -) - -func TestFlagParsing(t *testing.T) { - cases := map[string]struct { - args []string - out int - }{ - "No args, should fail": { - args: []string{}, - out: 1, - }, - "Nonexistent flag passed, -foo bar, should fail": { - args: []string{"-foo", "bar"}, - out: 1, - }, - "Invalid argument passed, -namespace notaname, should fail": { - args: []string{"-namespace", "notaname"}, - out: 1, - }, - "Cannot pass both -upstream-envoy-id and -upstream-ip flags, should fail": { - args: []string{"-upstream-envoy-id", "1234", "-upstream-ip", "127.0.0.1"}, - out: 1, - }, - "Cannot pass empty -upstream-envoy-id and -upstream-ip flags, should fail": { - args: []string{"-upstream-envoy-id", "-upstream-ip"}, - out: 1, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - c := setupCommand(new(bytes.Buffer)) - c.kubernetes = fake.NewSimpleClientset() - out := c.Run(tc.args) - require.Equal(t, tc.out, out) - }) - } -} - -func setupCommand(buf io.Writer) *ProxyCommand { - // Log at a test level to standard out. - log := hclog.New(&hclog.LoggerOptions{ - Name: "test", - Level: hclog.Debug, - Output: os.Stdout, - }) - - // Setup and initialize the command struct - command := &ProxyCommand{ - BaseCommand: &common.BaseCommand{ - Log: log, - UI: terminal.NewUI(context.Background(), buf), - }, - } - command.init() - - return command -} diff --git a/cli/cmd/troubleshoot/upstreams/upstreams.go b/cli/cmd/troubleshoot/upstreams/upstreams.go deleted file mode 100644 index 7765b170a7..0000000000 --- a/cli/cmd/troubleshoot/upstreams/upstreams.go +++ /dev/null @@ -1,274 +0,0 @@ -package upstreams - -import ( - "fmt" - "net" - "sort" - "strconv" - "strings" - "sync" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - troubleshoot "github.com/hashicorp/consul/troubleshoot/proxy" - "github.com/posener/complete" - helmCLI "helm.sh/helm/v3/pkg/cli" - "k8s.io/apimachinery/pkg/api/validation" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -const ( - defaultAdminPort int = 19000 - flagNameKubeConfig = "kubeconfig" - flagNameKubeContext = "context" - flagNameNamespace = "namespace" - flagNamePod = "pod" -) - -type UpstreamsCommand struct { - *common.BaseCommand - - kubernetes kubernetes.Interface - - set *flag.Sets - - flagKubeConfig string - flagKubeContext string - flagNamespace string - - flagPod string - - restConfig *rest.Config - - once sync.Once - help string -} - -// init sets up flags and help text for the command. -func (c *UpstreamsCommand) init() { - c.set = flag.NewSets() - f := c.set.NewSet("Command Options") - - f.StringVar(&flag.StringVar{ - Name: flagNamePod, - Target: &c.flagPod, - Usage: "The pod to port-forward to.", - Aliases: []string{"p"}, - }) - - f = c.set.NewSet("Global Options") - f.StringVar(&flag.StringVar{ - Name: flagNameKubeConfig, - Aliases: []string{"c"}, - Target: &c.flagKubeConfig, - Default: "", - Usage: "Set the path to kubeconfig file.", - }) - f.StringVar(&flag.StringVar{ - Name: flagNameKubeContext, - Target: &c.flagKubeContext, - Default: "", - Usage: "Set the Kubernetes context to use.", - }) - - f.StringVar(&flag.StringVar{ - Name: flagNameNamespace, - Target: &c.flagNamespace, - Usage: "The namespace the pod is in.", - Aliases: []string{"n"}, - }) - - c.help = c.set.Help() -} - -// Run executes the list command. -func (c *UpstreamsCommand) Run(args []string) int { - c.once.Do(c.init) - c.Log.ResetNamed("list") - defer common.CloseWithError(c.BaseCommand) - - // Parse the command line flags. - if err := c.set.Parse(args); err != nil { - c.UI.Output("Error parsing arguments: %v", err.Error(), terminal.WithErrorStyle()) - return 1 - } - - // Validate the command line flags. - if err := c.validateFlags(); err != nil { - c.UI.Output("Invalid argument: %v", err.Error(), terminal.WithErrorStyle()) - return 1 - } - - if c.kubernetes == nil { - if err := c.initKubernetes(); err != nil { - c.UI.Output("Error initializing Kubernetes client: %v", err.Error(), terminal.WithErrorStyle()) - return 1 - } - } - - if err := c.Troubleshoot(); err != nil { - c.UI.Output("Error running troubleshoot: %v", err.Error(), terminal.WithErrorStyle()) - return 1 - } - - return 0 -} - -// validateFlags ensures that the flags passed in by the can be used. -func (c *UpstreamsCommand) validateFlags() error { - - if c.flagPod == "" { - return fmt.Errorf("-pod flag is required") - } - - if errs := validation.ValidateNamespaceName(c.flagNamespace, false); c.flagNamespace != "" && len(errs) > 0 { - return fmt.Errorf("invalid namespace name passed for -namespace/-n: %v", strings.Join(errs, "; ")) - } - - return nil -} - -// initKubernetes initializes the Kubernetes client. -func (c *UpstreamsCommand) initKubernetes() (err error) { - settings := helmCLI.New() - - if c.flagKubeConfig != "" { - settings.KubeConfig = c.flagKubeConfig - } - - if c.flagKubeContext != "" { - settings.KubeContext = c.flagKubeContext - } - - if c.restConfig == nil { - if c.restConfig, err = settings.RESTClientGetter().ToRESTConfig(); err != nil { - return fmt.Errorf("error creating Kubernetes REST config %v", err) - } - } - - if c.kubernetes == nil { - if c.kubernetes, err = kubernetes.NewForConfig(c.restConfig); err != nil { - return fmt.Errorf("error creating Kubernetes client %v", err) - } - } - - if c.flagNamespace == "" { - c.flagNamespace = settings.Namespace() - } - - return nil -} - -func (c *UpstreamsCommand) Troubleshoot() error { - pf := common.PortForward{ - Namespace: c.flagNamespace, - PodName: c.flagPod, - RemotePort: defaultAdminPort, - KubeClient: c.kubernetes, - RestConfig: c.restConfig, - } - - endpoint, err := pf.Open(c.Ctx) - if err != nil { - return fmt.Errorf("error opening endpoint: %v", err) - } - defer pf.Close() - - adminAddr, adminPort, err := net.SplitHostPort(endpoint) - if err != nil { - return fmt.Errorf("error splitting hostport: %v", err) - } - - adminAddrIP, err := net.ResolveIPAddr("ip", adminAddr) - if err != nil { - return fmt.Errorf("error resolving ip address: %v", err) - } - - t, err := troubleshoot.NewTroubleshoot(adminAddrIP, adminPort) - if err != nil { - return fmt.Errorf("error creating new troubleshoot: %v", err) - } - - envoyIDs, upstreamIPs, err := t.GetUpstreams() - if err != nil { - return fmt.Errorf("error getting upstreams: %v", err) - } - - c.UI.Output(fmt.Sprintf("Upstreams (explicit upstreams only) (%v)", len(envoyIDs)), terminal.WithHeaderStyle()) - for _, e := range envoyIDs { - c.UI.Output(e) - } - - c.UI.Output(fmt.Sprintf("Upstream IPs (transparent proxy only) (%v)", len(upstreamIPs)), terminal.WithHeaderStyle()) - table := terminal.NewTable("IPs ", "Virtual ", "Cluster Names") - for _, u := range upstreamIPs { - table.AddRow([]string{formatIPs(u.IPs), strconv.FormatBool(u.IsVirtual), formatClusterNames(u.ClusterNames)}, []string{}) - } - c.UI.Table(table) - - c.UI.Output("\nIf you cannot find the upstream address or cluster for a transparent proxy upstream:", terminal.WithInfoStyle()) - c.UI.Output("-> Check intentions: Transparent proxy upstreams are configured based on intentions. Make sure you "+ - "have configured intentions to allow traffic to your upstream.", terminal.WithInfoStyle()) - c.UI.Output("-> To check that the right cluster is being dialed, run a DNS lookup "+ - "for the upstream you are dialing. For example, run `dig backend.svc.consul` to return the IP address for the `backend` service. If the address you get from that is missing "+ - "from the upstream IPs, it means that your proxy may be misconfigured.", terminal.WithInfoStyle()) - - return nil -} - -// AutocompleteFlags returns a mapping of supported flags and autocomplete -// options for this command. The map key for the Flags map should be the -// complete flag such as "-foo" or "--foo". -func (c *UpstreamsCommand) AutocompleteFlags() complete.Flags { - return complete.Flags{ - fmt.Sprintf("-%s", flagNameNamespace): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameKubeConfig): complete.PredictFiles("*"), - fmt.Sprintf("-%s", flagNameKubeContext): complete.PredictNothing, - } -} - -// AutocompleteArgs returns the argument predictor for this command. -// Since argument completion is not supported, this will return -// complete.PredictNothing. -func (c *UpstreamsCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} - -func (c *UpstreamsCommand) Synopsis() string { - return synopsis -} - -func (c *UpstreamsCommand) Help() string { - return help -} - -func formatIPs(ips []string) string { - return strings.Join(ips, ", ") -} - -func formatClusterNames(names map[string]struct{}) string { - var out []string - for k := range names { - out = append(out, k) - } - sort.Strings(out) - return strings.Join(out, ", ") -} - -const ( - synopsis = "Connect to a pod with a proxy and gather upstream services." - help = ` -Usage: consul-k8s troubleshoot upstreams [options] - - Connect to a pod with a proxy and gather upstream services. - - Requires a pod. - - Examples: - $ consul-k8s troubleshoot upstreams -pod pod1 - - where 'pod1' is the pod running a consul proxy -` -) diff --git a/cli/cmd/troubleshoot/upstreams/upstreams_test.go b/cli/cmd/troubleshoot/upstreams/upstreams_test.go deleted file mode 100644 index f5ddefbd28..0000000000 --- a/cli/cmd/troubleshoot/upstreams/upstreams_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package upstreams - -import ( - "bytes" - "context" - "io" - "os" - "testing" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" - "k8s.io/client-go/kubernetes/fake" -) - -func TestFlagParsing(t *testing.T) { - cases := map[string]struct { - args []string - out int - }{ - "No args, should fail": { - args: []string{}, - out: 1, - }, - "Nonexistent flag passed, -foo bar, should fail": { - args: []string{"-foo", "bar"}, - out: 1, - }, - "Invalid argument passed, -namespace notaname, should fail": { - args: []string{"-namespace", "notaname"}, - out: 1, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - c := setupCommand(new(bytes.Buffer)) - c.kubernetes = fake.NewSimpleClientset() - out := c.Run(tc.args) - require.Equal(t, tc.out, out) - }) - } -} - -func TestFormatIPs(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - actual []string - expected string - }{ - { - name: "single IPs", - actual: []string{"1.1.1.1"}, - expected: "1.1.1.1", - }, - - { - name: "several IPs", - actual: []string{"1.1.1.1", "2.2.2.2"}, - expected: "1.1.1.1, 2.2.2.2", - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - got := formatIPs(c.actual) - if c.expected != got { - t.Errorf("expected %v, got %v", c.expected, got) - } - }) - } -} - -func TestFormatClusterNames(t *testing.T) { - cases := []struct { - name string - actual map[string]struct{} - expected string - }{ - { - name: "single cluster", - actual: map[string]struct{}{ - "cluster1": {}, - }, - expected: "cluster1", - }, - { - name: "several clusters", - actual: map[string]struct{}{ - "cluster1": {}, - "cluster2": {}, - "cluster3": {}, - }, - expected: "cluster1, cluster2, cluster3", - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - got := formatClusterNames(c.actual) - if c.expected != got { - t.Errorf("expected %v, got %v", c.expected, got) - } - }) - } -} - -func setupCommand(buf io.Writer) *UpstreamsCommand { - // Log at a test level to standard out. - log := hclog.New(&hclog.LoggerOptions{ - Name: "test", - Level: hclog.Debug, - Output: os.Stdout, - }) - - // Setup and initialize the command struct - command := &UpstreamsCommand{ - BaseCommand: &common.BaseCommand{ - Log: log, - UI: terminal.NewUI(context.Background(), buf), - }, - } - command.init() - - return command -} diff --git a/cli/cmd/uninstall/uninstall.go b/cli/cmd/uninstall/uninstall.go index 06bf4a19b3..07b945bf79 100644 --- a/cli/cmd/uninstall/uninstall.go +++ b/cli/cmd/uninstall/uninstall.go @@ -12,18 +12,9 @@ import ( "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/hashicorp/consul-k8s/cli/helm" "github.com/posener/complete" - "golang.org/x/text/cases" - "golang.org/x/text/language" "helm.sh/helm/v3/pkg/action" helmCLI "helm.sh/helm/v3/pkg/cli" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) @@ -41,7 +32,7 @@ const ( defaultWipeData = false flagTimeout = "timeout" - defaultTimeout = 10 * time.Minute + defaultTimeout = "10m" flagContext = "context" flagKubeconfig = "kubeconfig" @@ -50,12 +41,7 @@ const ( type Command struct { *common.BaseCommand - helmActionsRunner helm.HelmActionsRunner - - // Configuration for interacting with Kubernetes. - k8sClient kubernetes.Interface - dynamicK8sClient dynamic.Interface - apiextK8sClient apiext.Interface + kubernetes kubernetes.Interface set *flag.Sets @@ -63,7 +49,8 @@ type Command struct { flagReleaseName string flagAutoApprove bool flagWipeData bool - flagTimeout time.Duration + flagTimeout string + timeoutDuration time.Duration flagKubeConfig string flagKubeContext string @@ -99,7 +86,7 @@ func (c *Command) init() { Default: defaultAnyReleaseName, Usage: "Name of the installation. This can be used to uninstall and/or delete the resources of a specific Helm release.", }) - f.DurationVar(&flag.DurationVar{ + f.StringVar(&flag.StringVar{ Name: flagTimeout, Target: &c.flagTimeout, Default: defaultTimeout, @@ -137,10 +124,6 @@ func (c *Command) Run(args []string) int { } }() - if c.helmActionsRunner == nil { - c.helmActionsRunner = &helm.ActionRunner{} - } - if err := c.set.Parse(args); err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 @@ -153,6 +136,12 @@ func (c *Command) Run(args []string) int { c.UI.Output("Can't set -wipe-data alone. Omit this flag to interactively uninstall, or use it with -auto-approve to wipe all data during the uninstall.", terminal.WithErrorStyle()) return 1 } + duration, err := time.ParseDuration(c.flagTimeout) + if err != nil { + c.UI.Output("unable to parse -%s: %s", flagTimeout, err, terminal.WithErrorStyle()) + return 1 + } + c.timeoutDuration = duration // helmCLI.New() will create a settings object which is used by the Helm Go SDK calls. settings := helmCLI.New() @@ -163,8 +152,20 @@ func (c *Command) Run(args []string) int { settings.KubeContext = c.flagKubeContext } - if err := c.initKubernetes(settings); err != nil { - c.UI.Output("Could not initialize Kubernetes client: %v", err, terminal.WithErrorStyle()) + // Set up the kubernetes client to use for non Helm SDK calls to the Kubernetes API + // The Helm SDK will use settings.RESTClientGetter for its calls as well, so this will + // use a consistent method to target the right cluster for both Helm SDK and non Helm SDK calls. + if c.kubernetes == nil { + restConfig, err := settings.RESTClientGetter().ToRESTConfig() + if err != nil { + c.UI.Output("retrieving Kubernetes auth: %v", err, terminal.WithErrorStyle()) + return 1 + } + c.kubernetes, err = kubernetes.NewForConfig(restConfig) + if err != nil { + c.UI.Output("initializing Kubernetes client: %v", err, terminal.WithErrorStyle()) + return 1 + } } // Setup logger to stream Helm library logs. @@ -173,53 +174,61 @@ func (c *Command) Run(args []string) int { c.UI.Output(logMsg, terminal.WithLibraryStyle()) } - actionConfig := new(action.Configuration) - actionConfig, err := helm.InitActionConfig(actionConfig, c.flagNamespace, settings, uiLogger) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 - } + c.UI.Output("Existing Installation", terminal.WithHeaderStyle()) - c.UI.Output(fmt.Sprintf("Checking if %s can be uninstalled", common.ReleaseTypeConsulDemo), terminal.WithHeaderStyle()) - foundConsulDemo, foundDemoReleaseName, foundDemoReleaseNamespace, err := c.findExistingInstallation(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.ConsulDemoAppReleaseName, - DebugLog: uiLogger, - SkipErrorWhenNotFound: true, - }) + // Search for Consul installation by calling `helm list`. Depends on what's already specified. + actionConfig := new(action.Configuration) + actionConfig, err = helm.InitActionConfig(actionConfig, c.flagNamespace, settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 - } else if !foundConsulDemo { - c.UI.Output(fmt.Sprintf("No existing %s installation found.", common.ReleaseTypeConsulDemo), terminal.WithInfoStyle()) } - found, foundReleaseName, foundReleaseNamespace, err := - c.findExistingInstallation(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.DefaultReleaseName, - DebugLog: uiLogger, - }) + found, foundReleaseName, foundReleaseNamespace, err := c.findExistingInstallation(settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 - } + } else { + c.UI.Output("Existing Consul installation found.", terminal.WithSuccessStyle()) + c.UI.Output("Consul Uninstall Summary", terminal.WithHeaderStyle()) + c.UI.Output("Name: %s", foundReleaseName, terminal.WithInfoStyle()) + c.UI.Output("Namespace: %s", foundReleaseNamespace, terminal.WithInfoStyle()) + + // Prompt for approval to uninstall Helm release. + if !c.flagAutoApprove { + confirmation, err := c.UI.Input(&terminal.Input{ + Prompt: "Proceed with uninstall? (y/N)", + Style: terminal.InfoStyle, + Secret: false, + }) + if err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + if common.Abort(confirmation) { + c.UI.Output("Uninstall aborted. To learn how to customize the uninstall, run:\nconsul-k8s uninstall --help", terminal.WithInfoStyle()) + return 1 + } + } - if foundConsulDemo { - err = c.uninstallHelmRelease(foundDemoReleaseName, foundDemoReleaseNamespace, common.ReleaseTypeConsulDemo, settings, uiLogger, actionConfig) + // Actually call out to `helm delete`. + actionConfig, err = helm.InitActionConfig(actionConfig, foundReleaseNamespace, settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - } - c.UI.Output("Checking if Consul can be uninstalled", terminal.WithHeaderStyle()) - if found { - err = c.uninstallHelmRelease(foundReleaseName, foundReleaseNamespace, common.ReleaseTypeConsul, settings, uiLogger, actionConfig) + uninstaller := action.NewUninstall(actionConfig) + uninstaller.Timeout = c.timeoutDuration + res, err := uninstaller.Run(foundReleaseName) if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) + c.UI.Output("unable to uninstall: %s", err, terminal.WithErrorStyle()) return 1 } + if res != nil && res.Info != "" { + c.UI.Output("Uninstall result: %s", res.Info, terminal.WithInfoStyle()) + } + c.UI.Output("Successfully uninstalled Consul Helm release", terminal.WithSuccessStyle()) } // If -auto-approve=true and -wipe-data=false, we should only uninstall the release, and skip deleting resources. @@ -310,248 +319,6 @@ func (c *Command) Run(args []string) int { return 0 } -// initKubernetes sets up the kubernetes clients to use for non Helm SDK calls to the Kubernetes API. -// The Helm SDK will use settings.RESTClientGetter for its calls as well, so this will -// use a consistent method to target the right cluster for both Helm SDK and non Helm SDK calls. -func (c *Command) initKubernetes(settings *helmCLI.EnvSettings) error { - restConfig, err := settings.RESTClientGetter().ToRESTConfig() - if err != nil { - return err - } - - if c.k8sClient == nil { - if c.k8sClient, err = kubernetes.NewForConfig(restConfig); err != nil { - return err - } - } - - if c.dynamicK8sClient == nil { - if c.dynamicK8sClient, err = dynamic.NewForConfig(restConfig); err != nil { - return err - } - } - - if c.apiextK8sClient == nil { - if c.apiextK8sClient, err = apiext.NewForConfig(restConfig); err != nil { - return err - } - } - - return nil -} - -func (c *Command) uninstallHelmRelease(releaseName, namespace, releaseType string, settings *helmCLI.EnvSettings, - uiLogger action.DebugLog, actionConfig *action.Configuration) error { - c.UI.Output(fmt.Sprintf("Existing %s installation found.", releaseType), terminal.WithSuccessStyle()) - c.UI.Output(fmt.Sprintf("%s Uninstall Summary", cases.Title(language.English).String(releaseType)), terminal.WithHeaderStyle()) - c.UI.Output("Name: %s", releaseName, terminal.WithInfoStyle()) - c.UI.Output("Namespace: %s", namespace, terminal.WithInfoStyle()) - - // Prompt for approval to uninstall Helm release. - // Actually call out to `helm delete`. - if !c.flagAutoApprove { - confirmation, err := c.UI.Input(&terminal.Input{ - Prompt: "Proceed with uninstall? (y/N)", - Style: terminal.InfoStyle, - Secret: false, - }) - if err != nil { - return err - } - if common.Abort(confirmation) { - c.UI.Output("Uninstall aborted. To learn how to customize the uninstall, run:\nconsul-k8s uninstall --help", terminal.WithInfoStyle()) - return nil - } - } - - // Delete any custom resources managed by Consul. If they cannot be deleted, - // patch the finalizers to be empty on each one. - if releaseType == common.ReleaseTypeConsul { - if err := c.removeCustomResources(uiLogger); err != nil { - c.UI.Output("Error removing custom resources: %v", err.Error(), terminal.WithErrorStyle()) - } - } - - actionConfig, err := helm.InitActionConfig(actionConfig, namespace, settings, uiLogger) - if err != nil { - return err - } - - uninstall := action.NewUninstall(actionConfig) - uninstall.Timeout = c.flagTimeout - - res, err := c.helmActionsRunner.Uninstall(uninstall, releaseName) - if err != nil { - return err - } - if res != nil && res.Info != "" { - c.UI.Output("Uninstall result: %s", res.Info, terminal.WithInfoStyle()) - return nil - } - - c.UI.Output(fmt.Sprintf("Successfully uninstalled %s Helm release.", releaseType), terminal.WithSuccessStyle()) - return nil -} - -// removeCustomResources fetches a list of custom resource defintions managed -// by Consul and attempts to delete every custom resource for each definition. -// If the resources cannot be deleted directly, the finalizers on each resource -// are patched to be an empty list, freeing them to be deleted by Kubernetes. -func (c *Command) removeCustomResources(uiLogger action.DebugLog) error { - uiLogger("Deleting custom resources managed by Consul") - - crds, err := c.fetchCustomResourceDefinitions() - if err != nil { - return fmt.Errorf("unable to fetch Custom Resource Definitions for Consul deployment: %v", err) - } - kindToResource := mapCRKindToResourceName(crds) - - crs, err := c.fetchCustomResources(crds) - if err != nil { - return err - } - - if err = c.deleteCustomResources(crs, kindToResource, uiLogger); err != nil { - return err - } - - err = backoff.Retry(func() error { - crs, err := c.fetchCustomResources(crds) - if err != nil { - return err - } - if len(crs) != 0 { - return common.NewDanglingResourceError(fmt.Sprintf("%d custom resources remain after deletion request", len(crs))) - } - return nil - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 5)) - if !common.IsDanglingResourceError(err) { - return err - } - - // Custom resources could not be deleted directly, attempt to patch their finalizers to an empty array. - uiLogger("Patching finalizers on custom resources managed by Consul") - - crs, err = c.fetchCustomResources(crds) - if err != nil { - return err - } - - if err = c.patchCustomResources(crs, kindToResource, uiLogger); err != nil { - return err - } - - err = backoff.Retry(func() error { - crs, err := c.fetchCustomResources(crds) - if err != nil { - return err - } - if len(crs) != 0 { - return common.NewDanglingResourceError(fmt.Sprintf("%d custom resources remain after request to patch finalizers", len(crs))) - } - return nil - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 5)) - if err != nil { - return fmt.Errorf("unable to remove all custom resources managed by Consul. %d custom resources remain and will need to be removed manually. %v", len(crs), err) - } - - return nil -} - -// fetchCustomResourceDefinitions fetches all Custom Resource Definitions managed by Consul. -func (c *Command) fetchCustomResourceDefinitions() (*apiextv1.CustomResourceDefinitionList, error) { - return c.apiextK8sClient.ApiextensionsV1().CustomResourceDefinitions().List(c.Ctx, metav1.ListOptions{ - LabelSelector: "app=consul", - }) -} - -// fetchCustomResources gets a list of all custom resources deployed in the -// cluster that are managed by Consul. -func (c *Command) fetchCustomResources(crds *apiextv1.CustomResourceDefinitionList) ([]unstructured.Unstructured, error) { - crs := make([]unstructured.Unstructured, 0) - for _, crd := range crds.Items { - for _, version := range crd.Spec.Versions { - target := schema.GroupVersionResource{ - Group: crd.Spec.Group, - Version: version.Name, - Resource: crd.Spec.Names.Plural, - } - - crList, err := c.dynamicK8sClient.Resource(target).List(c.Ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - if crList != nil { - crs = append(crs, crList.Items...) - } - } - } - - return crs, nil -} - -// deleteCustomResources takes a list of unstructured custom resources and -// sends a request to each one to be deleted. -func (c *Command) deleteCustomResources(crs []unstructured.Unstructured, kindToResource map[string]string, uiLogger action.DebugLog) error { - for _, cr := range crs { - gv, err := schema.ParseGroupVersion(cr.GetAPIVersion()) - if err != nil { - return err - } - - target := schema.GroupVersionResource{ - Group: gv.Group, - Version: gv.Version, - Resource: kindToResource[cr.GetKind()], - } - - uiLogger(fmt.Sprintf("Starting delete for \"%s\" %s", cr.GetName(), cr.GetKind())) - err = c.dynamicK8sClient. - Resource(target). - Namespace(cr.GetNamespace()). - Delete(c.Ctx, cr.GetName(), metav1.DeleteOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return err - } - } - - return nil -} - -// patchCustomResources takes a list of unstructured custom resources and -// sends a request to each one to patch its finalizers to an empty list. -func (c *Command) patchCustomResources(crs []unstructured.Unstructured, kindToResource map[string]string, uiLogger action.DebugLog) error { - finalizerPatch := []byte(`[{ - "op": "replace", - "path": "/metadata/finalizers", - "value": [] - }]`) - - for _, cr := range crs { - gv, err := schema.ParseGroupVersion(cr.GetAPIVersion()) - if err != nil { - return err - } - - target := schema.GroupVersionResource{ - Group: gv.Group, - Version: gv.Version, - Resource: kindToResource[cr.GetKind()], - } - - uiLogger(fmt.Sprintf("Patching finalizers for \"%s\" %s", cr.GetName(), cr.GetKind())) - _, err = c.dynamicK8sClient. - Resource(target). - Namespace(cr.GetNamespace()). - Patch(c.Ctx, cr.GetName(), types.JSONPatchType, finalizerPatch, metav1.PatchOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return err - } - } - - return nil -} - func (c *Command) Help() string { c.once.Do(c.init) s := "Usage: consul-k8s uninstall [flags]" + "\n" + "Uninstall Consul with options to delete data and resources associated with Consul installation." + "\n\n" + c.help @@ -584,18 +351,14 @@ func (c *Command) AutocompleteArgs() complete.Predictor { return complete.PredictNothing } -func (c *Command) findExistingInstallation(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - found, releaseName, namespace, err := c.helmActionsRunner.CheckForInstallations(options) +func (c *Command) findExistingInstallation(settings *helmCLI.EnvSettings, uiLogger action.DebugLog) (bool, string, string, error) { + releaseName, namespace, err := common.CheckForInstallations(settings, uiLogger) if err != nil { return false, "", "", err - } else if found && (c.flagNamespace == defaultAllNamespaces || c.flagNamespace == namespace) { + } else if c.flagNamespace == defaultAllNamespaces || c.flagNamespace == namespace { return true, releaseName, namespace, nil } else { - var notFoundError error - if !options.SkipErrorWhenNotFound { - notFoundError = fmt.Errorf("could not find %s installation in cluster", common.ReleaseTypeConsul) - } - return false, "", "", notFoundError + return false, "", "", fmt.Errorf("could not find consul installation in namespace %s", c.flagNamespace) } } @@ -603,7 +366,7 @@ func (c *Command) findExistingInstallation(options *helm.CheckForInstallationsOp func (c *Command) deletePVCs(foundReleaseName, foundReleaseNamespace string) error { var pvcNames []string pvcSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - pvcs, err := c.k8sClient.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).List(c.Ctx, pvcSelector) + pvcs, err := c.kubernetes.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).List(c.Ctx, pvcSelector) if err != nil { return fmt.Errorf("deletePVCs: %s", err) } @@ -612,14 +375,14 @@ func (c *Command) deletePVCs(foundReleaseName, foundReleaseNamespace string) err return nil } for _, pvc := range pvcs.Items { - err := c.k8sClient.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).Delete(c.Ctx, pvc.Name, metav1.DeleteOptions{}) + err := c.kubernetes.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).Delete(c.Ctx, pvc.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deletePVCs: error deleting PVC %q: %s", pvc.Name, err) } pvcNames = append(pvcNames, pvc.Name) } err = backoff.Retry(func() error { - pvcs, err := c.k8sClient.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).List(c.Ctx, pvcSelector) + pvcs, err := c.kubernetes.CoreV1().PersistentVolumeClaims(foundReleaseNamespace).List(c.Ctx, pvcSelector) if err != nil { return fmt.Errorf("deletePVCs: %s", err) } @@ -642,7 +405,7 @@ func (c *Command) deletePVCs(foundReleaseName, foundReleaseNamespace string) err // deleteSecrets deletes any secrets that have the label "managed-by" set to "consul-k8s". func (c *Command) deleteSecrets(foundReleaseNamespace string) error { - secrets, err := c.k8sClient.CoreV1().Secrets(foundReleaseNamespace).List(c.Ctx, metav1.ListOptions{ + secrets, err := c.kubernetes.CoreV1().Secrets(foundReleaseNamespace).List(c.Ctx, metav1.ListOptions{ LabelSelector: common.CLILabelKey + "=" + common.CLILabelValue, }) if err != nil { @@ -654,7 +417,7 @@ func (c *Command) deleteSecrets(foundReleaseNamespace string) error { } var secretNames []string for _, secret := range secrets.Items { - err := c.k8sClient.CoreV1().Secrets(foundReleaseNamespace).Delete(c.Ctx, secret.Name, metav1.DeleteOptions{}) + err := c.kubernetes.CoreV1().Secrets(foundReleaseNamespace).Delete(c.Ctx, secret.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteSecrets: error deleting Secret %q: %s", secret.Name, err) } @@ -673,7 +436,7 @@ func (c *Command) deleteSecrets(foundReleaseNamespace string) error { func (c *Command) deleteServiceAccounts(foundReleaseName, foundReleaseNamespace string) error { var serviceAccountNames []string saSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - sas, err := c.k8sClient.CoreV1().ServiceAccounts(foundReleaseNamespace).List(c.Ctx, saSelector) + sas, err := c.kubernetes.CoreV1().ServiceAccounts(foundReleaseNamespace).List(c.Ctx, saSelector) if err != nil { return fmt.Errorf("deleteServiceAccounts: %s", err) } @@ -682,7 +445,7 @@ func (c *Command) deleteServiceAccounts(foundReleaseName, foundReleaseNamespace return nil } for _, sa := range sas.Items { - err := c.k8sClient.CoreV1().ServiceAccounts(foundReleaseNamespace).Delete(c.Ctx, sa.Name, metav1.DeleteOptions{}) + err := c.kubernetes.CoreV1().ServiceAccounts(foundReleaseNamespace).Delete(c.Ctx, sa.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteServiceAccounts: error deleting ServiceAccount %q: %s", sa.Name, err) } @@ -701,7 +464,7 @@ func (c *Command) deleteServiceAccounts(foundReleaseName, foundReleaseNamespace func (c *Command) deleteRoles(foundReleaseName, foundReleaseNamespace string) error { var roleNames []string roleSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - roles, err := c.k8sClient.RbacV1().Roles(foundReleaseNamespace).List(c.Ctx, roleSelector) + roles, err := c.kubernetes.RbacV1().Roles(foundReleaseNamespace).List(c.Ctx, roleSelector) if err != nil { return fmt.Errorf("deleteRoles: %s", err) } @@ -710,7 +473,7 @@ func (c *Command) deleteRoles(foundReleaseName, foundReleaseNamespace string) er return nil } for _, role := range roles.Items { - err := c.k8sClient.RbacV1().Roles(foundReleaseNamespace).Delete(c.Ctx, role.Name, metav1.DeleteOptions{}) + err := c.kubernetes.RbacV1().Roles(foundReleaseNamespace).Delete(c.Ctx, role.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteRoles: error deleting Role %q: %s", role.Name, err) } @@ -729,7 +492,7 @@ func (c *Command) deleteRoles(foundReleaseName, foundReleaseNamespace string) er func (c *Command) deleteRoleBindings(foundReleaseName, foundReleaseNamespace string) error { var rolebindingNames []string rolebindingSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - rolebindings, err := c.k8sClient.RbacV1().RoleBindings(foundReleaseNamespace).List(c.Ctx, rolebindingSelector) + rolebindings, err := c.kubernetes.RbacV1().RoleBindings(foundReleaseNamespace).List(c.Ctx, rolebindingSelector) if err != nil { return fmt.Errorf("deleteRoleBindings: %s", err) } @@ -738,7 +501,7 @@ func (c *Command) deleteRoleBindings(foundReleaseName, foundReleaseNamespace str return nil } for _, rolebinding := range rolebindings.Items { - err := c.k8sClient.RbacV1().RoleBindings(foundReleaseNamespace).Delete(c.Ctx, rolebinding.Name, metav1.DeleteOptions{}) + err := c.kubernetes.RbacV1().RoleBindings(foundReleaseNamespace).Delete(c.Ctx, rolebinding.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteRoleBindings: error deleting Role %q: %s", rolebinding.Name, err) } @@ -757,7 +520,7 @@ func (c *Command) deleteRoleBindings(foundReleaseName, foundReleaseNamespace str func (c *Command) deleteJobs(foundReleaseName, foundReleaseNamespace string) error { var jobNames []string jobSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - jobs, err := c.k8sClient.BatchV1().Jobs(foundReleaseNamespace).List(c.Ctx, jobSelector) + jobs, err := c.kubernetes.BatchV1().Jobs(foundReleaseNamespace).List(c.Ctx, jobSelector) if err != nil { return fmt.Errorf("deleteJobs: %s", err) } @@ -766,7 +529,7 @@ func (c *Command) deleteJobs(foundReleaseName, foundReleaseNamespace string) err return nil } for _, job := range jobs.Items { - err := c.k8sClient.BatchV1().Jobs(foundReleaseNamespace).Delete(c.Ctx, job.Name, metav1.DeleteOptions{}) + err := c.kubernetes.BatchV1().Jobs(foundReleaseNamespace).Delete(c.Ctx, job.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteJobs: error deleting Job %q: %s", job.Name, err) } @@ -785,7 +548,7 @@ func (c *Command) deleteJobs(foundReleaseName, foundReleaseNamespace string) err func (c *Command) deleteClusterRoles(foundReleaseName string) error { var clusterRolesNames []string clusterRolesSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - clusterRoles, err := c.k8sClient.RbacV1().ClusterRoles().List(c.Ctx, clusterRolesSelector) + clusterRoles, err := c.kubernetes.RbacV1().ClusterRoles().List(c.Ctx, clusterRolesSelector) if err != nil { return fmt.Errorf("deleteClusterRoles: %s", err) } @@ -794,7 +557,7 @@ func (c *Command) deleteClusterRoles(foundReleaseName string) error { return nil } for _, clusterRole := range clusterRoles.Items { - err := c.k8sClient.RbacV1().ClusterRoles().Delete(c.Ctx, clusterRole.Name, metav1.DeleteOptions{}) + err := c.kubernetes.RbacV1().ClusterRoles().Delete(c.Ctx, clusterRole.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteClusterRoles: error deleting cluster role %q: %s", clusterRole.Name, err) } @@ -813,7 +576,7 @@ func (c *Command) deleteClusterRoles(foundReleaseName string) error { func (c *Command) deleteClusterRoleBindings(foundReleaseName string) error { var clusterRoleBindingsNames []string clusterRoleBindingsSelector := metav1.ListOptions{LabelSelector: fmt.Sprintf("release=%s", foundReleaseName)} - clusterRoleBindings, err := c.k8sClient.RbacV1().ClusterRoleBindings().List(c.Ctx, clusterRoleBindingsSelector) + clusterRoleBindings, err := c.kubernetes.RbacV1().ClusterRoleBindings().List(c.Ctx, clusterRoleBindingsSelector) if err != nil { return fmt.Errorf("deleteClusterRoleBindings: %s", err) } @@ -822,7 +585,7 @@ func (c *Command) deleteClusterRoleBindings(foundReleaseName string) error { return nil } for _, clusterRoleBinding := range clusterRoleBindings.Items { - err := c.k8sClient.RbacV1().ClusterRoleBindings().Delete(c.Ctx, clusterRoleBinding.Name, metav1.DeleteOptions{}) + err := c.kubernetes.RbacV1().ClusterRoleBindings().Delete(c.Ctx, clusterRoleBinding.Name, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("deleteClusterRoleBindings: error deleting cluster role binding %q: %s", clusterRoleBinding.Name, err) } @@ -836,15 +599,3 @@ func (c *Command) deleteClusterRoleBindings(foundReleaseName string) error { } return nil } - -// mapCRKindToResourceName takes the list of custom resource definitions and -// creates a mapping from the "kind" of the CRD to its "resource" name. -// This is needed for the dynamic API which finds custom resources by their -// lowercase, plural resource name. (e.g. "ingressgateways" for "IngressGateway" kind). -func mapCRKindToResourceName(crds *apiextv1.CustomResourceDefinitionList) map[string]string { - kindToResourceName := make(map[string]string) - for _, crd := range crds.Items { - kindToResourceName[crd.Spec.Names.Kind] = crd.Spec.Names.Plural - } - return kindToResourceName -} diff --git a/cli/cmd/uninstall/uninstall_test.go b/cli/cmd/uninstall/uninstall_test.go index 2adc0960ab..8fa92e92b7 100644 --- a/cli/cmd/uninstall/uninstall_test.go +++ b/cli/cmd/uninstall/uninstall_test.go @@ -1,56 +1,29 @@ package uninstall import ( - "bytes" "context" - "errors" "flag" "fmt" - "io" "os" "testing" "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/consul-k8s/cli/helm" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - helmRelease "helm.sh/helm/v3/pkg/release" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apiextFake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - dynamicFake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/fake" ) -var ( - serviceDefaultsGRV = schema.GroupVersionResource{ - Group: "consul.hashicorp.com", - Version: "v1alpha1", - Resource: "servicedefaults", - } - nonConsulGRV = schema.GroupVersionResource{ - Group: "example.com", - Version: "v1", - Resource: "examples", - } -) - func TestDeletePVCs(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-server-test1", @@ -75,23 +48,23 @@ func TestDeletePVCs(t *testing.T) { }, }, } - _, err := c.k8sClient.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc, metav1.CreateOptions{}) + _, err := c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc2, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc3, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().PersistentVolumeClaims("default").Create(context.Background(), pvc3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deletePVCs("consul", "default") require.NoError(t, err) - pvcs, err := c.k8sClient.CoreV1().PersistentVolumeClaims("default").List(context.Background(), metav1.ListOptions{}) + pvcs, err := c.kubernetes.CoreV1().PersistentVolumeClaims("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, pvcs.Items, 1) require.Equal(t, pvcs.Items[0].Name, pvc3.Name) } func TestDeleteSecrets(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-secret1", @@ -117,15 +90,15 @@ func TestDeleteSecrets(t *testing.T) { }, }, } - _, err := c.k8sClient.CoreV1().Secrets("default").Create(context.Background(), secret, metav1.CreateOptions{}) + _, err := c.kubernetes.CoreV1().Secrets("default").Create(context.Background(), secret, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().Secrets("default").Create(context.Background(), secret2, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().Secrets("default").Create(context.Background(), secret2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().Secrets("default").Create(context.Background(), secret3, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().Secrets("default").Create(context.Background(), secret3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteSecrets("default") require.NoError(t, err) - secrets, err := c.k8sClient.CoreV1().Secrets("default").List(context.Background(), metav1.ListOptions{}) + secrets, err := c.kubernetes.CoreV1().Secrets("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) // Only secret1 should have been deleted, secret2 and secret 3 persist since it doesn't have the label. @@ -133,8 +106,8 @@ func TestDeleteSecrets(t *testing.T) { } func TestDeleteServiceAccounts(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() sa := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-sa1", @@ -159,23 +132,23 @@ func TestDeleteServiceAccounts(t *testing.T) { }, }, } - _, err := c.k8sClient.CoreV1().ServiceAccounts("default").Create(context.Background(), sa, metav1.CreateOptions{}) + _, err := c.kubernetes.CoreV1().ServiceAccounts("default").Create(context.Background(), sa, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().ServiceAccounts("default").Create(context.Background(), sa2, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().ServiceAccounts("default").Create(context.Background(), sa2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.CoreV1().ServiceAccounts("default").Create(context.Background(), sa3, metav1.CreateOptions{}) + _, err = c.kubernetes.CoreV1().ServiceAccounts("default").Create(context.Background(), sa3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteServiceAccounts("consul", "default") require.NoError(t, err) - sas, err := c.k8sClient.CoreV1().ServiceAccounts("default").List(context.Background(), metav1.ListOptions{}) + sas, err := c.kubernetes.CoreV1().ServiceAccounts("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, sas.Items, 1) require.Equal(t, sas.Items[0].Name, sa3.Name) } func TestDeleteRoles(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() role := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-role1", @@ -200,23 +173,23 @@ func TestDeleteRoles(t *testing.T) { }, }, } - _, err := c.k8sClient.RbacV1().Roles("default").Create(context.Background(), role, metav1.CreateOptions{}) + _, err := c.kubernetes.RbacV1().Roles("default").Create(context.Background(), role, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().Roles("default").Create(context.Background(), role2, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().Roles("default").Create(context.Background(), role2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().Roles("default").Create(context.Background(), role3, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().Roles("default").Create(context.Background(), role3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteRoles("consul", "default") require.NoError(t, err) - roles, err := c.k8sClient.RbacV1().Roles("default").List(context.Background(), metav1.ListOptions{}) + roles, err := c.kubernetes.RbacV1().Roles("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, roles.Items, 1) require.Equal(t, roles.Items[0].Name, role3.Name) } func TestDeleteRoleBindings(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() rolebinding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-role1", @@ -241,23 +214,23 @@ func TestDeleteRoleBindings(t *testing.T) { }, }, } - _, err := c.k8sClient.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding, metav1.CreateOptions{}) + _, err := c.kubernetes.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding2, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding3, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().RoleBindings("default").Create(context.Background(), rolebinding3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteRoleBindings("consul", "default") require.NoError(t, err) - rolebindings, err := c.k8sClient.RbacV1().RoleBindings("default").List(context.Background(), metav1.ListOptions{}) + rolebindings, err := c.kubernetes.RbacV1().RoleBindings("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, rolebindings.Items, 1) require.Equal(t, rolebindings.Items[0].Name, rolebinding3.Name) } func TestDeleteJobs(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-job1", @@ -282,23 +255,23 @@ func TestDeleteJobs(t *testing.T) { }, }, } - _, err := c.k8sClient.BatchV1().Jobs("default").Create(context.Background(), job, metav1.CreateOptions{}) + _, err := c.kubernetes.BatchV1().Jobs("default").Create(context.Background(), job, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.BatchV1().Jobs("default").Create(context.Background(), job2, metav1.CreateOptions{}) + _, err = c.kubernetes.BatchV1().Jobs("default").Create(context.Background(), job2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.BatchV1().Jobs("default").Create(context.Background(), job3, metav1.CreateOptions{}) + _, err = c.kubernetes.BatchV1().Jobs("default").Create(context.Background(), job3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteJobs("consul", "default") require.NoError(t, err) - jobs, err := c.k8sClient.BatchV1().Jobs("default").List(context.Background(), metav1.ListOptions{}) + jobs, err := c.kubernetes.BatchV1().Jobs("default").List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, jobs.Items, 1) require.Equal(t, jobs.Items[0].Name, job3.Name) } func TestDeleteClusterRoles(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() clusterrole := &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-clusterrole1", @@ -323,23 +296,23 @@ func TestDeleteClusterRoles(t *testing.T) { }, }, } - _, err := c.k8sClient.RbacV1().ClusterRoles().Create(context.Background(), clusterrole, metav1.CreateOptions{}) + _, err := c.kubernetes.RbacV1().ClusterRoles().Create(context.Background(), clusterrole, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().ClusterRoles().Create(context.Background(), clusterrole2, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().ClusterRoles().Create(context.Background(), clusterrole2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().ClusterRoles().Create(context.Background(), clusterrole3, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().ClusterRoles().Create(context.Background(), clusterrole3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteClusterRoles("consul") require.NoError(t, err) - clusterroles, err := c.k8sClient.RbacV1().ClusterRoles().List(context.Background(), metav1.ListOptions{}) + clusterroles, err := c.kubernetes.RbacV1().ClusterRoles().List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, clusterroles.Items, 1) require.Equal(t, clusterroles.Items[0].Name, clusterrole3.Name) } func TestDeleteClusterRoleBindings(t *testing.T) { - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset() + c := getInitializedCommand(t) + c.kubernetes = fake.NewSimpleClientset() clusterrolebinding := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "consul-test-clusterrolebinding1", @@ -364,37 +337,32 @@ func TestDeleteClusterRoleBindings(t *testing.T) { }, }, } - _, err := c.k8sClient.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding, metav1.CreateOptions{}) + _, err := c.kubernetes.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding2, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding2, metav1.CreateOptions{}) require.NoError(t, err) - _, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding3, metav1.CreateOptions{}) + _, err = c.kubernetes.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterrolebinding3, metav1.CreateOptions{}) require.NoError(t, err) err = c.deleteClusterRoleBindings("consul") require.NoError(t, err) - clusterrolebindings, err := c.k8sClient.RbacV1().ClusterRoleBindings().List(context.Background(), metav1.ListOptions{}) + clusterrolebindings, err := c.kubernetes.RbacV1().ClusterRoleBindings().List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) require.Len(t, clusterrolebindings.Items, 1) require.Equal(t, clusterrolebindings.Items[0].Name, clusterrolebinding3.Name) } // getInitializedCommand sets up a command struct for tests. -func getInitializedCommand(t *testing.T, buf io.Writer) *Command { +func getInitializedCommand(t *testing.T) *Command { t.Helper() log := hclog.New(&hclog.LoggerOptions{ Name: "cli", Level: hclog.Info, Output: os.Stdout, }) - var ui terminal.UI - if buf != nil { - ui = terminal.NewUI(context.Background(), buf) - } else { - ui = terminal.NewBasicUI(context.Background()) - } + baseCommand := &common.BaseCommand{ Log: log, - UI: ui, + UI: terminal.NewBasicUI(context.TODO()), } c := &Command{ @@ -406,7 +374,7 @@ func getInitializedCommand(t *testing.T, buf io.Writer) *Command { func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { t.Parallel() - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) predictor := cmd.AutocompleteFlags() @@ -429,444 +397,7 @@ func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { } func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -func TestFetchCustomResources(t *testing.T) { - cr := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "consul.hashicorp.com/v1alpha1", - "kind": "ServiceDefaults", - "metadata": map[string]interface{}{ - "name": "server", - "namespace": "default", - }, - }, - } - nonConsulCR1 := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "example.com/v1", - "kind": "Example", - "metadata": map[string]interface{}{ - "name": "example-resource", - "namespace": "default", - }, - }, - } - nonConsulCR2 := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "example.com/v1", - "kind": "Example", - "metadata": map[string]interface{}{ - "name": "example-resource", - "namespace": "other", - }, - }, - } - - c := getInitializedCommand(t, nil) - c.k8sClient = fake.NewSimpleClientset(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "other"}}) - c.apiextK8sClient, c.dynamicK8sClient = createClientsWithCrds() - - _, err := c.dynamicK8sClient.Resource(serviceDefaultsGRV).Namespace("default").Create(context.Background(), &cr, metav1.CreateOptions{}) - require.NoError(t, err) - _, err = c.dynamicK8sClient.Resource(nonConsulGRV).Namespace("default").Create(context.Background(), &nonConsulCR1, metav1.CreateOptions{}) - require.NoError(t, err) - _, err = c.dynamicK8sClient.Resource(nonConsulGRV).Namespace("other").Create(context.Background(), &nonConsulCR2, metav1.CreateOptions{}) - require.NoError(t, err) - - crds, err := c.fetchCustomResourceDefinitions() - require.NoError(t, err) - - actual, err := c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, actual, 1) - require.Contains(t, actual, cr) - require.NotContains(t, actual, nonConsulCR1) - require.NotContains(t, actual, nonConsulCR2) -} - -func TestDeleteCustomResources(t *testing.T) { - cr := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "consul.hashicorp.com/v1alpha1", - "kind": "ServiceDefaults", - "metadata": map[string]interface{}{ - "name": "server", - "namespace": "default", - }, - }, - } - - c := getInitializedCommand(t, nil) - c.apiextK8sClient, c.dynamicK8sClient = createClientsWithCrds() - - _, err := c.dynamicK8sClient.Resource(serviceDefaultsGRV).Namespace("default").Create(context.Background(), &cr, metav1.CreateOptions{}) - require.NoError(t, err) - - crds, err := c.fetchCustomResourceDefinitions() - require.NoError(t, err) - - actual, err := c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, actual, 1) - - err = c.deleteCustomResources([]unstructured.Unstructured{cr}, mapCRKindToResourceName(crds), fakeUILogger) - require.NoError(t, err) - - actual, err = c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, actual, 0) -} - -func TestPatchCustomResources(t *testing.T) { - cr := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "consul.hashicorp.com/v1alpha1", - "kind": "ServiceDefaults", - "metadata": map[string]interface{}{ - "name": "server", - "namespace": "default", - }, - }, - } - cr.SetFinalizers([]string{"consul.hashicorp.com"}) - - c := getInitializedCommand(t, nil) - c.apiextK8sClient, c.dynamicK8sClient = createClientsWithCrds() - - _, err := c.dynamicK8sClient.Resource(serviceDefaultsGRV).Namespace("default").Create(context.Background(), &cr, metav1.CreateOptions{}) - require.NoError(t, err) - - crds, err := c.fetchCustomResourceDefinitions() - require.NoError(t, err) - - err = c.patchCustomResources([]unstructured.Unstructured{cr}, mapCRKindToResourceName(crds), fakeUILogger) - require.NoError(t, err) - - actual, err := c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, actual, 1) - require.Len(t, actual[0].GetFinalizers(), 0) -} - -func TestMapKindToResource(t *testing.T) { - crds := apiextv1.CustomResourceDefinitionList{ - Items: []apiextv1.CustomResourceDefinition{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "servicedefaults.consul.hashicorp.com", - Labels: map[string]string{ - "app": "consul", - }, - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: "consul.hashicorp.com", - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: "servicedefaults", - Kind: "ServiceDefaults", - }, - Scope: "Namespaced", - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: "v1alpha1", - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "examples.example.com", - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: "example.com", - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: "examples", - Kind: "Example", - }, - Scope: "Namespaced", - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: "v1", - }, - }, - }, - }, - }, - } - - expected := map[string]string{ - "ServiceDefaults": "servicedefaults", - "Example": "examples", - } - - actual := mapCRKindToResourceName(&crds) - require.Equal(t, expected, actual) -} - -func TestUninstall(t *testing.T) { - cases := map[string]struct { - input []string - messages []string - helmActionsRunner *helm.MockActionRunner - preProcessingFunc func() - expectedReturnCode int - expectCheckedForConsulInstallations bool - expectCheckedForConsulDemoInstallations bool - expectConsulUninstalled bool - expectConsulDemoUninstalled bool - }{ - "uninstall when consul installation exists returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n No existing Consul demo application installation found.\n", - "\n==> Checking if Consul can be uninstalled\n ✓ Existing Consul installation found.\n", - "\n==> Consul Uninstall Summary\n Name: consul\n Namespace: consul\n --> Deleting custom resources managed by Consul\n --> Starting delete for \"server\" ServiceDefaults\n ✓ Successfully uninstalled Consul Helm release.\n ✓ Skipping deleting PVCs, secrets, and service accounts.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: true, - expectConsulDemoUninstalled: false, - }, - "uninstall when consul installation does not exist returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n No existing Consul demo application installation found.\n ! could not find Consul installation in cluster\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return false, "", "", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: false, - expectConsulDemoUninstalled: false, - }, - "uninstall with -wipe-data flag processes other resource and returns success": { - input: []string{ - "-wipe-data", - }, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n No existing Consul demo application installation found.\n", - "\n==> Checking if Consul can be uninstalled\n ✓ Existing Consul installation found.\n", - "\n==> Consul Uninstall Summary\n Name: consul\n Namespace: consul\n --> Deleting custom resources managed by Consul\n --> Starting delete for \"server\" ServiceDefaults\n ✓ Successfully uninstalled Consul Helm release.\n", - "\n==> Other Consul Resources\n Deleting data for installation: \n Name: consul\n Namespace consul\n ✓ No PVCs found.\n ✓ No Consul secrets found.\n ✓ No Consul service accounts found.\n ✓ No Consul roles found.\n ✓ No Consul rolebindings found.\n ✓ No Consul jobs found.\n ✓ No Consul cluster roles found.\n ✓ No Consul cluster role bindings found.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: true, - expectConsulDemoUninstalled: false, - }, - "uninstall when both consul and consul demo installations exist returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n ✓ Existing Consul demo application installation found.\n", - "\n==> Consul Demo Application Uninstall Summary\n Name: consul-demo\n Namespace: consul-demo\n ✓ Successfully uninstalled Consul demo application Helm release.\n", - "\n==> Checking if Consul can be uninstalled\n ✓ Existing Consul installation found.\n", - "\n==> Consul Uninstall Summary\n Name: consul\n Namespace: consul\n --> Deleting custom resources managed by Consul\n --> Starting delete for \"server\" ServiceDefaults\n ✓ Successfully uninstalled Consul Helm release.\n ✓ Skipping deleting PVCs, secrets, and service accounts.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: true, - expectConsulDemoUninstalled: true, - }, - "uninstall when consul uninstall errors returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n No existing Consul demo application installation found.\n", - "\n==> Checking if Consul can be uninstalled\n ✓ Existing Consul installation found.\n", - "\n==> Consul Uninstall Summary\n Name: consul\n Namespace: consul\n --> Deleting custom resources managed by Consul\n --> Starting delete for \"server\" ServiceDefaults\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - UninstallFunc: func(uninstall *action.Uninstall, name string) (*helmRelease.UninstallReleaseResponse, error) { - return nil, errors.New("Helm returned an error.") - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: false, - expectConsulDemoUninstalled: false, - }, - "uninstall when consul demo is installed consul demo uninstall errors returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul demo application can be uninstalled\n ✓ Existing Consul demo application installation found.\n", - "\n==> Consul Demo Application Uninstall Summary\n Name: consul-demo\n Namespace: consul-demo\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - UninstallFunc: func(uninstall *action.Uninstall, name string) (*helmRelease.UninstallReleaseResponse, error) { - if name == "consul" { - return &helmRelease.UninstallReleaseResponse{}, nil - } else { - return nil, errors.New("Helm returned an error.") - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUninstalled: false, - expectConsulDemoUninstalled: false, - }, - } - - cr := unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "consul.hashicorp.com/v1alpha1", - "kind": "ServiceDefaults", - "metadata": map[string]interface{}{ - "name": "server", - "namespace": "default", - }, - }, - } - cr.SetFinalizers([]string{"consul.hashicorp.com"}) - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - - c.k8sClient = fake.NewSimpleClientset() - - c.apiextK8sClient, c.dynamicK8sClient = createClientsWithCrds() - _, err := c.dynamicK8sClient.Resource(serviceDefaultsGRV).Namespace("default").Create(context.Background(), &cr, metav1.CreateOptions{}) - require.NoError(t, err) - - mock := tc.helmActionsRunner - c.helmActionsRunner = mock - - if tc.preProcessingFunc != nil { - tc.preProcessingFunc() - } - input := append([]string{ - "--auto-approve", - }, tc.input...) - returnCode := c.Run(input) - output := buf.String() - require.Equal(t, tc.expectedReturnCode, returnCode, output) - - require.Equal(t, tc.expectCheckedForConsulInstallations, mock.CheckedForConsulInstallations) - require.Equal(t, tc.expectCheckedForConsulDemoInstallations, mock.CheckedForConsulDemoInstallations) - require.Equal(t, tc.expectConsulUninstalled, mock.ConsulUninstalled) - require.Equal(t, tc.expectConsulDemoUninstalled, mock.ConsulDemoUninstalled) - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - - if tc.expectConsulUninstalled { - crds, err := c.fetchCustomResourceDefinitions() - require.NoError(t, err) - crs, err := c.fetchCustomResources(crds) - require.NoError(t, err) - require.Len(t, crs, 0) - } - }) - } -} - -func createClientsWithCrds() (apiext.Interface, dynamic.Interface) { - grvToListKind := map[schema.GroupVersionResource]string{ - serviceDefaultsGRV: "ServiceDefaultsList", - nonConsulGRV: "ExamplesList", - } - crds := apiextv1.CustomResourceDefinitionList{ - Items: []apiextv1.CustomResourceDefinition{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "servicedefaults.consul.hashicorp.com", - Labels: map[string]string{ - "app": "consul", - }, - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: "consul.hashicorp.com", - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: "servicedefaults", - Kind: "ServiceDefaults", - }, - Scope: "Namespaced", - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: "v1alpha1", - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "examples.example.com", - }, - Spec: apiextv1.CustomResourceDefinitionSpec{ - Group: "example.com", - Names: apiextv1.CustomResourceDefinitionNames{ - Plural: "examples", - Kind: "Example", - }, - Scope: "Namespaced", - Versions: []apiextv1.CustomResourceDefinitionVersion{ - { - Name: "v1", - }, - }, - }, - }, - }, - } - return apiextFake.NewSimpleClientset(&crds), dynamicFake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), grvToListKind) -} - -func fakeUILogger(s string, i ...interface{}) {} diff --git a/cli/cmd/upgrade/upgrade.go b/cli/cmd/upgrade/upgrade.go index 4c962c47b5..e1bb744ce1 100644 --- a/cli/cmd/upgrade/upgrade.go +++ b/cli/cmd/upgrade/upgrade.go @@ -3,7 +3,6 @@ package upgrade import ( "errors" "fmt" - "net/http" "os" "strings" "sync" @@ -15,14 +14,12 @@ import ( "github.com/hashicorp/consul-k8s/cli/common/terminal" "github.com/hashicorp/consul-k8s/cli/config" "github.com/hashicorp/consul-k8s/cli/helm" - "github.com/hashicorp/consul-k8s/cli/preset" "github.com/posener/complete" - + "helm.sh/helm/v3/pkg/action" helmCLI "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/cli/values" "helm.sh/helm/v3/pkg/getter" "k8s.io/client-go/kubernetes" - "k8s.io/utils/strings/slices" ) const ( @@ -51,39 +48,26 @@ const ( flagNameContext = "context" flagNameKubeconfig = "kubeconfig" - - flagNameDemo = "demo" - defaultDemo = false - - flagNameHCPResourceID = "hcp-resource-id" - - consulDemoChartPath = "demo" ) type Command struct { *common.BaseCommand - helmActionsRunner helm.HelmActionsRunner - kubernetes kubernetes.Interface - httpClient *http.Client - set *flag.Sets - flagPreset string - flagDryRun bool - flagAutoApprove bool - flagValueFiles []string - flagSetStringValues []string - flagSetValues []string - flagFileValues []string - flagTimeout string - timeoutDuration time.Duration - flagVerbose bool - flagWait bool - flagNameHCPResourceID string - flagDemo bool + flagPreset string + flagDryRun bool + flagAutoApprove bool + flagValueFiles []string + flagSetStringValues []string + flagSetValues []string + flagFileValues []string + flagTimeout string + timeoutDuration time.Duration + flagVerbose bool + flagWait bool flagKubeConfig string flagKubeContext string @@ -93,6 +77,12 @@ type Command struct { } func (c *Command) init() { + // Store all the possible preset values in 'presetList'. Printed in the help message. + var presetList []string + for name := range config.Presets { + presetList = append(presetList, name) + } + c.set = flag.NewSets() f := c.set.NewSet("Command Options") f.BoolVar(&flag.BoolVar{ @@ -117,7 +107,7 @@ func (c *Command) init() { Name: flagNamePreset, Target: &c.flagPreset, Default: defaultPreset, - Usage: fmt.Sprintf("Use an upgrade preset, one of %s. Defaults to none", strings.Join(preset.Presets, ", ")), + Usage: fmt.Sprintf("Use an upgrade preset, one of %s. Defaults to none", strings.Join(presetList, ", ")), }) f.StringSliceVar(&flag.StringSliceVar{ Name: flagNameSetValues, @@ -169,19 +159,6 @@ func (c *Command) init() { Default: "", Usage: "Set the Kubernetes context to use.", }) - f.StringVar(&flag.StringVar{ - Name: flagNameHCPResourceID, - Target: &c.flagNameHCPResourceID, - Default: "", - Usage: "Set the HCP resource_id when using the 'cloud' preset.", - }) - f.BoolVar(&flag.BoolVar{ - Name: flagNameDemo, - Target: &c.flagDemo, - Default: defaultDemo, - Usage: fmt.Sprintf("Install %s immediately after installing %s.", - common.ReleaseTypeConsulDemo, common.ReleaseTypeConsul), - }) c.help = c.set.Help() } @@ -192,10 +169,6 @@ func (c *Command) Run(args []string) int { defer common.CloseWithError(c.BaseCommand) - if c.helmActionsRunner == nil { - c.helmActionsRunner = &helm.ActionRunner{} - } - err := c.validateFlags(args) if err != nil { c.UI.Output(err.Error()) @@ -243,144 +216,99 @@ func (c *Command) Run(args []string) int { c.UI.Output("Checking if Consul can be upgraded", terminal.WithHeaderStyle()) uiLogger := c.createUILogger() - found, consulName, consulNamespace, err := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.DefaultReleaseName, - DebugLog: uiLogger, - }) - + name, namespace, err := common.CheckForInstallations(settings, uiLogger) if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) - return 1 - } - if !found { c.UI.Output("Cannot upgrade Consul. Existing Consul installation not found. Use the command `consul-k8s install` to install Consul.", terminal.WithErrorStyle()) return 1 - } else { - c.UI.Output("Existing %s installation found to be upgraded.", common.ReleaseTypeConsul, terminal.WithSuccessStyle()) - c.UI.Output("Name: %s\nNamespace: %s", consulName, consulNamespace, terminal.WithInfoStyle()) } + c.UI.Output("Existing Consul installation found to be upgraded.", terminal.WithSuccessStyle()) + c.UI.Output("Name: %s\nNamespace: %s", name, namespace, terminal.WithInfoStyle()) - c.UI.Output(fmt.Sprintf("Checking if %s can be upgraded", common.ReleaseTypeConsulDemo), terminal.WithHeaderStyle()) - // Ensure there is not an existing Consul demo installation which would cause a conflict. - foundDemo, demoName, demoNamespace, _ := c.helmActionsRunner.CheckForInstallations(&helm.CheckForInstallationsOptions{ - Settings: settings, - ReleaseName: common.ConsulDemoAppReleaseName, - DebugLog: uiLogger, - }) - if foundDemo { - c.UI.Output("Existing %s installation found to be upgraded.", common.ReleaseTypeConsulDemo, terminal.WithSuccessStyle()) - c.UI.Output("Name: %s\nNamespace: %s", demoName, demoNamespace, terminal.WithInfoStyle()) - } else { - if c.flagDemo { - c.UI.Output("No existing %s installation found, but -demo flag provided. %s will be installed in namespace %s.", - common.ConsulDemoAppReleaseName, common.ConsulDemoAppReleaseName, consulNamespace, terminal.WithInfoStyle()) - } else { - c.UI.Output("No existing %s installation found.", common.ReleaseTypeConsulDemo, terminal.WithInfoStyle()) - } - } - - // Handle preset, value files, and set values logic. - chartValues, err := c.mergeValuesFlagsWithPrecedence(settings, consulNamespace) + chart, err := helm.LoadChart(consulChart.ConsulHelmChart, common.TopLevelChartDirName) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } + c.UI.Output("Loaded charts", terminal.WithSuccessStyle()) - // Without informing the user, default global.name to consul if it hasn't been set already. We don't allow setting - // the release name, and since that is hardcoded to "consul", setting global.name to "consul" makes it so resources - // aren't double prefixed with "consul-consul-...". - chartValues = common.MergeMaps(config.ConvertToMap(config.GlobalNameConsul), chartValues) - - timeout, err := time.ParseDuration(c.flagTimeout) + currentChartValues, err := helm.FetchChartValues(namespace, name, settings, uiLogger) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - options := &helm.UpgradeOptions{ - ReleaseName: consulName, - ReleaseType: common.ReleaseTypeConsul, - ReleaseTypeName: common.ReleaseTypeConsul, - Namespace: consulNamespace, - Values: chartValues, - Settings: settings, - EmbeddedChart: consulChart.ConsulHelmChart, - ChartDirName: common.TopLevelChartDirName, - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } - err = helm.UpgradeHelmRelease(options) + // Handle preset, value files, and set values logic. + chartValues, err := c.mergeValuesFlagsWithPrecedence(settings) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - timeout, err = time.ParseDuration(c.flagTimeout) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) + // Without informing the user, default global.name to consul if it hasn't been set already. We don't allow setting + // the release name, and since that is hardcoded to "consul", setting global.name to "consul" makes it so resources + // aren't double prefixed with "consul-consul-...". + chartValues = common.MergeMaps(config.Convert(config.GlobalNameConsul), chartValues) + + // Print out the upgrade summary. + if err = c.printDiff(currentChartValues, chartValues); err != nil { + c.UI.Output("Could not print the different between current and upgraded charts: %v", err, terminal.WithErrorStyle()) return 1 } - if foundDemo { - options := &helm.UpgradeOptions{ - ReleaseName: demoName, - ReleaseType: common.ReleaseTypeConsulDemo, - ReleaseTypeName: common.ConsulDemoAppReleaseName, - Namespace: demoNamespace, - Values: make(map[string]interface{}), - Settings: settings, - EmbeddedChart: consulChart.DemoHelmChart, - ChartDirName: consulDemoChartPath, - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } + // Check if the user is OK with the upgrade unless the auto approve or dry run flags are true. + if !c.flagAutoApprove && !c.flagDryRun { + confirmation, err := c.UI.Input(&terminal.Input{ + Prompt: "Proceed with upgrade? (y/N)", + Style: terminal.InfoStyle, + Secret: false, + }) - err = helm.UpgradeHelmRelease(options) if err != nil { c.UI.Output(err.Error(), terminal.WithErrorStyle()) return 1 } - } else if c.flagDemo { - - options := &helm.InstallOptions{ - ReleaseName: common.ConsulDemoAppReleaseName, - ReleaseType: common.ReleaseTypeConsulDemo, - Namespace: settings.Namespace(), - Values: make(map[string]interface{}), - Settings: settings, - EmbeddedChart: consulChart.DemoHelmChart, - ChartDirName: consulDemoChartPath, - UILogger: uiLogger, - DryRun: c.flagDryRun, - AutoApprove: c.flagAutoApprove, - Wait: c.flagWait, - Timeout: timeout, - UI: c.UI, - HelmActionsRunner: c.helmActionsRunner, - } - err = helm.InstallDemoApp(options) - if err != nil { - c.UI.Output(err.Error(), terminal.WithErrorStyle()) + if common.Abort(confirmation) { + c.UI.Output("Upgrade aborted. Use the command `consul-k8s upgrade -help` to learn how to customize your upgrade.", + terminal.WithInfoStyle()) return 1 } } + if !c.flagDryRun { + c.UI.Output("Upgrading Consul", terminal.WithHeaderStyle()) + } else { + c.UI.Output("Performing Dry Run Upgrade", terminal.WithHeaderStyle()) + } + + // Setup action configuration for Helm Go SDK function calls. + actionConfig := new(action.Configuration) + actionConfig, err = helm.InitActionConfig(actionConfig, namespace, settings, uiLogger) + if err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + + // Setup the upgrade action. + upgrade := action.NewUpgrade(actionConfig) + upgrade.Namespace = namespace + upgrade.DryRun = c.flagDryRun + upgrade.Wait = c.flagWait + upgrade.Timeout = c.timeoutDuration + + // Run the upgrade. Note that the dry run config is passed into the upgrade action, so upgrade.Run is called even during a dry run. + _, err = upgrade.Run(common.DefaultReleaseName, chart, chartValues) + if err != nil { + c.UI.Output(err.Error(), terminal.WithErrorStyle()) + return 1 + } + if c.flagDryRun { c.UI.Output("Dry run complete. No changes were made to the Kubernetes cluster.\n"+ "Upgrade can proceed with this configuration.", terminal.WithInfoStyle()) return 0 } + + c.UI.Output("Consul upgraded in namespace %q.", namespace, terminal.WithSuccessStyle()) return 0 } @@ -401,8 +329,6 @@ func (c *Command) AutocompleteFlags() complete.Flags { fmt.Sprintf("-%s", flagNameWait): complete.PredictNothing, fmt.Sprintf("-%s", flagNameContext): complete.PredictNothing, fmt.Sprintf("-%s", flagNameKubeconfig): complete.PredictFiles("*"), - fmt.Sprintf("-%s", flagNameDemo): complete.PredictNothing, - fmt.Sprintf("-%s", flagNameHCPResourceID): complete.PredictNothing, } } @@ -424,7 +350,7 @@ func (c *Command) validateFlags(args []string) error { if len(c.flagValueFiles) != 0 && c.flagPreset != defaultPreset { return fmt.Errorf("cannot set both -%s and -%s", flagNameConfigFile, flagNamePreset) } - if ok := slices.Contains(preset.Presets, c.flagPreset); c.flagPreset != defaultPreset && !ok { + if _, ok := config.Presets[c.flagPreset]; c.flagPreset != defaultPreset && !ok { return fmt.Errorf("'%s' is not a valid preset", c.flagPreset) } if _, err := time.ParseDuration(c.flagTimeout); err != nil { @@ -438,20 +364,6 @@ func (c *Command) validateFlags(args []string) error { } } - if c.flagPreset == preset.PresetCloud { - clientID := os.Getenv(preset.EnvHCPClientID) - clientSecret := os.Getenv(preset.EnvHCPClientSecret) - if clientID == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' environment variable must also be set", preset.PresetCloud, preset.EnvHCPClientID) - } else if clientSecret == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' environment variable must also be set", preset.PresetCloud, preset.EnvHCPClientSecret) - } else if c.flagNameHCPResourceID == "" { - return fmt.Errorf("When '%s' is specified as the preset, the '%s' flag must also be provided", preset.PresetCloud, flagNameHCPResourceID) - } - } else if c.flagNameHCPResourceID != "" { - return fmt.Errorf("The '%s' flag can only be used with the '%s' preset", flagNameHCPResourceID, preset.PresetCloud) - } - return nil } @@ -464,7 +376,7 @@ func (c *Command) validateFlags(args []string) error { // 5. -set-file // For example, -set-file will override a value provided via -set. // Within each of these groups the rightmost flag value has the highest precedence. -func (c *Command) mergeValuesFlagsWithPrecedence(settings *helmCLI.EnvSettings, namespace string) (map[string]interface{}, error) { +func (c *Command) mergeValuesFlagsWithPrecedence(settings *helmCLI.EnvSettings) (map[string]interface{}, error) { p := getter.All(settings) v := &values.Options{ ValueFiles: c.flagValueFiles, @@ -478,14 +390,7 @@ func (c *Command) mergeValuesFlagsWithPrecedence(settings *helmCLI.EnvSettings, } if c.flagPreset != defaultPreset { // Note the ordering of the function call, presets have lower precedence than set vals. - p, err := c.getPreset(c.flagPreset, namespace) - if err != nil { - return nil, fmt.Errorf("error getting preset provider: %s", err) - } - presetMap, err := p.GetValueMap() - if err != nil { - return nil, fmt.Errorf("error getting preset values: %s", err) - } + presetMap := config.Presets[c.flagPreset].(map[string]interface{}) vals = common.MergeMaps(presetMap, vals) } return vals, err @@ -519,22 +424,24 @@ func (c *Command) createUILogger() func(string, ...interface{}) { } } -// getPreset is a factory function that, given a string, produces a struct that -// implements the Preset interface. If the string is not recognized an error is -// returned. -func (c *Command) getPreset(name string, namespace string) (preset.Preset, error) { - hcpConfig := preset.GetHCPPresetFromEnv(c.flagNameHCPResourceID) - getPresetConfig := &preset.GetPresetConfig{ - Name: name, - CloudPreset: &preset.CloudPreset{ - KubernetesClient: c.kubernetes, - KubernetesNamespace: namespace, - SkipSavingSecrets: true, - UI: c.UI, - HTTPClient: c.httpClient, - HCPConfig: hcpConfig, - Context: c.Ctx, - }, +// printDiff marshals both maps to YAML and prints the diff between the two. +func (c *Command) printDiff(old, new map[string]interface{}) error { + diff, err := common.Diff(old, new) + if err != nil { + return err } - return preset.GetPreset(getPresetConfig) + + c.UI.Output("\nDifference between user overrides for current and upgraded charts"+ + "\n--------------------------------------------------------------", terminal.WithInfoStyle()) + for _, line := range strings.Split(diff, "\n") { + if strings.HasPrefix(line, "+") { + c.UI.Output(line, terminal.WithDiffAddedStyle()) + } else if strings.HasPrefix(line, "-") { + c.UI.Output(line, terminal.WithDiffRemovedStyle()) + } else { + c.UI.Output(line, terminal.WithDiffUnchangedStyle()) + } + } + + return nil } diff --git a/cli/cmd/upgrade/upgrade_test.go b/cli/cmd/upgrade/upgrade_test.go index d21b17febf..9b4636eb57 100644 --- a/cli/cmd/upgrade/upgrade_test.go +++ b/cli/cmd/upgrade/upgrade_test.go @@ -1,29 +1,16 @@ package upgrade import ( - "bytes" - "context" - "errors" "flag" "fmt" - "io" "os" "testing" "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/consul-k8s/cli/helm" - "github.com/hashicorp/consul-k8s/cli/preset" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmRelease "helm.sh/helm/v3/pkg/release" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" ) // TestValidateFlags tests the validate flags function. @@ -56,7 +43,7 @@ func TestValidateFlags(t *testing.T) { } for _, testCase := range testCases { - c := getInitializedCommand(t, nil) + c := getInitializedCommand(t) t.Run(testCase.description, func(t *testing.T) { if err := c.validateFlags(testCase.input); err == nil { t.Errorf("Test case should have failed.") @@ -66,22 +53,16 @@ func TestValidateFlags(t *testing.T) { } // getInitializedCommand sets up a command struct for tests. -func getInitializedCommand(t *testing.T, buf io.Writer) *Command { +func getInitializedCommand(t *testing.T) *Command { t.Helper() log := hclog.New(&hclog.LoggerOptions{ Name: "cli", Level: hclog.Info, Output: os.Stdout, }) - var ui terminal.UI - if buf != nil { - ui = terminal.NewUI(context.Background(), buf) - } else { - ui = terminal.NewBasicUI(context.Background()) - } + baseCommand := &common.BaseCommand{ Log: log, - UI: ui, } c := &Command{ @@ -93,7 +74,7 @@ func getInitializedCommand(t *testing.T, buf io.Writer) *Command { func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { t.Parallel() - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) predictor := cmd.AutocompleteFlags() @@ -116,437 +97,7 @@ func TestTaskCreateCommand_AutocompleteFlags(t *testing.T) { } func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { - cmd := getInitializedCommand(t, nil) + cmd := getInitializedCommand(t) c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } - -func TestGetPreset(t *testing.T) { - testCases := []struct { - description string - presetName string - }{ - { - "'cloud' should return a CloudPreset'.", - preset.PresetCloud, - }, - { - "'quickstart' should return a QuickstartPreset'.", - preset.PresetQuickstart, - }, - { - "'secure' should return a SecurePreset'.", - preset.PresetSecure, - }, - } - - for _, tc := range testCases { - c := getInitializedCommand(t, nil) - t.Run(tc.description, func(t *testing.T) { - p, err := c.getPreset(tc.presetName, "consul") - require.NoError(t, err) - switch p.(type) { - case *preset.CloudPreset: - require.Equal(t, preset.PresetCloud, tc.presetName) - case *preset.QuickstartPreset: - require.Equal(t, preset.PresetQuickstart, tc.presetName) - case *preset.SecurePreset: - require.Equal(t, preset.PresetSecure, tc.presetName) - } - }) - } -} - -// TestValidateCloudPresets tests the validate flags function when passed the cloud preset. -func TestValidateCloudPresets(t *testing.T) { - testCases := []struct { - description string - input []string - preProcessingFunc func() - postProcessingFunc func() - expectError bool - }{ - { - "Should not error on cloud preset when HCP_CLIENT_ID and HCP_CLIENT_SECRET envvars are present and hcp-resource-id parameter is provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Setenv("HCP_CLIENT_ID", "") - os.Setenv("HCP_CLIENT_SECRET", "") - }, - false, - }, - { - "Should error on cloud preset when HCP_CLIENT_ID is not provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error on cloud preset when HCP_CLIENT_SECRET is not provided.", - []string{"-preset=cloud", "-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error on cloud preset when -hcp-resource-id flag is not provided.", - []string{"-preset=cloud"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - { - "Should error when -hcp-resource-id flag is provided but cloud preset is not specified.", - []string{"-hcp-resource-id=foobar"}, - func() { - os.Setenv("HCP_CLIENT_ID", "foo") - os.Setenv("HCP_CLIENT_SECRET", "bar") - }, - func() { - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") - }, - true, - }, - } - - for _, testCase := range testCases { - testCase.preProcessingFunc() - c := getInitializedCommand(t, nil) - t.Run(testCase.description, func(t *testing.T) { - err := c.validateFlags(testCase.input) - if testCase.expectError && err == nil { - t.Errorf("Test case should have failed.") - } else if !testCase.expectError && err != nil { - t.Errorf("Test case should not have failed.") - } - }) - testCase.postProcessingFunc() - } -} - -func TestUpgrade(t *testing.T) { - var k8s kubernetes.Interface - cases := map[string]struct { - input []string - messages []string - helmActionsRunner *helm.MockActionRunner - preProcessingFunc func() - expectedReturnCode int - expectCheckedForConsulInstallations bool - expectCheckedForConsulDemoInstallations bool - expectConsulUpgraded bool - expectConsulDemoUpgraded bool - expectConsulDemoInstalled bool - }{ - "upgrade when consul installation exists returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - }, - "upgrade when consul installation does not exists returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ! Cannot upgrade Consul. Existing Consul installation not found. Use the command `consul-k8s install` to install Consul.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return false, "", "", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: false, - expectConsulUpgraded: false, - expectConsulDemoUpgraded: false, - }, - "upgrade when consul upgrade errors returns error": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + global:\n + name: consul\n \n\n==> Upgrading Consul\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - UpgradeFunc: func(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*helmRelease.Release, error) { - return nil, errors.New("Helm returned an error.") - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: false, - expectConsulDemoUpgraded: false, - }, - "upgrade when demo flag provided but no demo installation exists installs demo and returns success": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing consul-demo installation found, but -demo flag provided. consul-demo will be installed in namespace consul.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: consul\n \n \n", - "\n==> Installing Consul demo application\n ✓ Downloaded charts.\n ✓ Consul demo application installed in namespace \"consul\".\n", - "\n==> Accessing Consul Demo Application UI\n kubectl port-forward service/nginx 8080:80 --namespace consul\n Browse to http://localhost:8080.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - expectConsulDemoInstalled: true, - }, - "upgrade when demo flag provided and demo installation exists upgrades demo and returns success": { - input: []string{ - "-demo", - }, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n ✓ Existing Consul demo application installation found to be upgraded.\n Name: consul-demo\n Namespace: consul-demo\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - "\n==> Consul-Demo Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n \n", - "\n==> Upgrading consul-demo\n ✓ Consul-Demo upgraded in namespace \"consul-demo\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: true, - expectConsulDemoInstalled: false, - }, - "upgrade when demo flag not provided but demo installation exists upgrades demo and returns success": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n ✓ Existing Consul demo application installation found to be upgraded.\n Name: consul-demo\n Namespace: consul-demo\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - "\n==> Consul-Demo Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n \n", - "\n==> Upgrading consul-demo\n ✓ Consul-Demo upgraded in namespace \"consul-demo\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: true, - expectConsulDemoInstalled: false, - }, - "upgrade when demo upgrade errors returns error with consul being upgraded but demo not being upgraded": { - input: []string{}, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n ✓ Existing Consul demo application installation found to be upgraded.\n Name: consul-demo\n Namespace: consul-demo\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - "\n==> Consul-Demo Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n \n", - "\n==> Upgrading consul-demo\n ! Helm returned an error.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - UpgradeFunc: func(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*helmRelease.Release, error) { - if name == "consul" { - return &helmRelease.Release{}, nil - } else { - return nil, errors.New("Helm returned an error.") - } - }, - }, - expectedReturnCode: 1, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - }, - "upgrade with quickstart preset when consul installation exists returns success": { - input: []string{ - "-preset", "quickstart", - }, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + connectInject:\n + enabled: true\n + metrics:\n + defaultEnableMerging: true\n + defaultEnabled: true\n + enableGatewayMetrics: true\n + global:\n + metrics:\n + enableAgentMetrics: true\n + enabled: true\n + name: consul\n + prometheus:\n + enabled: true\n + server:\n + replicas: 1\n + ui:\n + enabled: true\n + service:\n + enabled: true\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - }, - "upgrade with secure preset when consul installation exists returns success": { - input: []string{ - "-preset", "secure", - }, - messages: []string{ - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + connectInject:\n + enabled: true\n + global:\n + acls:\n + manageSystemACLs: true\n + gossipEncryption:\n + autoGenerate: true\n + name: consul\n + tls:\n + enableAutoEncrypt: true\n + enabled: true\n + server:\n + replicas: 1\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul\".\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: true, - expectConsulDemoUpgraded: false, - }, - "upgrade with --dry-run flag when consul installation exists returns success": { - input: []string{ - "--dry-run", - }, - messages: []string{ - " Performing dry run upgrade. No changes will be made to the cluster.\n", - "\n==> Checking if Consul can be upgraded\n ✓ Existing Consul installation found to be upgraded.\n Name: consul\n Namespace: consul\n", - "\n==> Checking if Consul demo application can be upgraded\n No existing Consul demo application installation found.\n", - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n + global:\n + name: consul\n \n", - "\n==> Performing Dry Run Upgrade\n Dry run complete. No changes were made to the Kubernetes cluster.\n Upgrade can proceed with this configuration.\n", - }, - helmActionsRunner: &helm.MockActionRunner{ - CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return true, "consul", "consul", nil - } else { - return false, "", "", nil - } - }, - }, - expectedReturnCode: 0, - expectCheckedForConsulInstallations: true, - expectCheckedForConsulDemoInstallations: true, - expectConsulUpgraded: false, - expectConsulDemoUpgraded: false, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - c := getInitializedCommand(t, buf) - k8s = fake.NewSimpleClientset() - c.kubernetes = k8s - mock := tc.helmActionsRunner - c.helmActionsRunner = mock - if tc.preProcessingFunc != nil { - tc.preProcessingFunc() - } - input := append([]string{ - "--auto-approve", - }, tc.input...) - returnCode := c.Run(input) - require.Equal(t, tc.expectedReturnCode, returnCode) - require.Equal(t, tc.expectCheckedForConsulInstallations, mock.CheckedForConsulInstallations) - require.Equal(t, tc.expectCheckedForConsulDemoInstallations, mock.CheckedForConsulDemoInstallations) - require.Equal(t, tc.expectConsulUpgraded, mock.ConsulUpgraded) - require.Equal(t, tc.expectConsulDemoUpgraded, mock.ConsulDemoUpgraded) - require.Equal(t, tc.expectConsulDemoInstalled, mock.ConsulDemoInstalled) - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) - } -} diff --git a/cli/commands.go b/cli/commands.go index fe4c47400e..d29b4a8ad7 100644 --- a/cli/commands.go +++ b/cli/commands.go @@ -6,12 +6,8 @@ import ( "github.com/hashicorp/consul-k8s/cli/cmd/install" "github.com/hashicorp/consul-k8s/cli/cmd/proxy" "github.com/hashicorp/consul-k8s/cli/cmd/proxy/list" - "github.com/hashicorp/consul-k8s/cli/cmd/proxy/loglevel" "github.com/hashicorp/consul-k8s/cli/cmd/proxy/read" "github.com/hashicorp/consul-k8s/cli/cmd/status" - "github.com/hashicorp/consul-k8s/cli/cmd/troubleshoot" - troubleshoot_proxy "github.com/hashicorp/consul-k8s/cli/cmd/troubleshoot/proxy" - "github.com/hashicorp/consul-k8s/cli/cmd/troubleshoot/upstreams" "github.com/hashicorp/consul-k8s/cli/cmd/uninstall" "github.com/hashicorp/consul-k8s/cli/cmd/upgrade" cmdversion "github.com/hashicorp/consul-k8s/cli/cmd/version" @@ -23,6 +19,7 @@ import ( ) func initializeCommands(ctx context.Context, log hclog.Logger) (*common.BaseCommand, map[string]cli.CommandFactory) { + baseCommand := &common.BaseCommand{ Ctx: ctx, Log: log, @@ -66,31 +63,11 @@ func initializeCommands(ctx context.Context, log hclog.Logger) (*common.BaseComm BaseCommand: baseCommand, }, nil }, - "proxy log": func() (cli.Command, error) { - return &loglevel.LogLevelCommand{ - BaseCommand: baseCommand, - }, nil - }, "proxy read": func() (cli.Command, error) { return &read.ReadCommand{ BaseCommand: baseCommand, }, nil }, - "troubleshoot": func() (cli.Command, error) { - return &troubleshoot.TroubleshootCommand{ - BaseCommand: baseCommand, - }, nil - }, - "troubleshoot proxy": func() (cli.Command, error) { - return &troubleshoot_proxy.ProxyCommand{ - BaseCommand: baseCommand, - }, nil - }, - "troubleshoot upstreams": func() (cli.Command, error) { - return &upstreams.UpstreamsCommand{ - BaseCommand: baseCommand, - }, nil - }, } return baseCommand, commands diff --git a/cli/common/envoy/logger_params.go b/cli/common/envoy/logger_params.go deleted file mode 100644 index b2eff623c0..0000000000 --- a/cli/common/envoy/logger_params.go +++ /dev/null @@ -1,166 +0,0 @@ -package envoy - -import ( - "fmt" - "strings" -) - -type logLevel struct { - name string - level string -} - -type LoggerParams struct { - globalLevel string - individualLevels []logLevel -} - -func NewLoggerParams() *LoggerParams { - return &LoggerParams{ - individualLevels: make([]logLevel, 0), - } -} - -func (l *LoggerParams) SetLoggerLevel(name, level string) error { - err := validateLoggerName(name) - if err != nil { - return err - } - err = validateLogLevel(level) - if err != nil { - return err - } - - l.individualLevels = append(l.individualLevels, logLevel{name: name, level: level}) - return nil -} - -func (l *LoggerParams) SetGlobalLoggerLevel(level string) error { - err := validateLogLevel(level) - if err != nil { - return err - } - l.globalLevel = level - return nil -} - -func validateLogLevel(level string) error { - if _, ok := envoyLevels[level]; !ok { - logLevels := []string{} - for levelName := range envoyLevels { - logLevels = append(logLevels, levelName) - } - return fmt.Errorf("Unknown log level %s, available log levels are %q", level, strings.Join(logLevels, ", ")) - } - return nil -} - -func validateLoggerName(name string) error { - if _, ok := EnvoyLoggers[name]; !ok { - loggers := []string{} - for loggerName := range envoyLevels { - loggers = append(loggers, loggerName) - } - return fmt.Errorf("Unknown logger %s, available loggers are %q", name, strings.Join(loggers, ", ")) - - } - return nil -} - -func (l *LoggerParams) String() string { - switch { - // Global log level change is set - case l.globalLevel != "": - return fmt.Sprintf("?level=%s", l.globalLevel) - - // only one specific logger is changed - case len(l.individualLevels) == 1: - params := fmt.Sprintf("?%s=%s", l.individualLevels[0].name, l.individualLevels[0].level) - return params - - // multiple specific loggers are changed - case len(l.individualLevels) > 1: - logParams := make([]string, 0, len(l.individualLevels)) - for _, logger := range l.individualLevels { - logParams = append(logParams, fmt.Sprintf("%s:%s", logger.name, logger.level)) - } - - params := fmt.Sprintf("?paths=%s", strings.Join(logParams, ",")) - return params - default: - - // default path, this is hit if there are no params - return "" - } -} - -// trace debug info warning error critical off. -var envoyLevels = map[string]struct{}{ - "trace": {}, - "debug": {}, - "info": {}, - "warning": {}, - "error": {}, - "critical": {}, - "off": {}, -} - -var EnvoyLoggers = map[string]struct{}{ - "admin": {}, - "alternate_protocols_cache": {}, - "aws": {}, - "assert": {}, - "backtrace": {}, - "cache_filter": {}, - "client": {}, - "config": {}, - "connection": {}, - "conn_handler": {}, - "decompression": {}, - "dns": {}, - "dubbo": {}, - "envoy_bug": {}, - "ext_authz": {}, - "ext_proc": {}, - "rocketmq": {}, - "file": {}, - "filter": {}, - "forward_proxy": {}, - "grpc": {}, - "happy_eyeballs": {}, - "hc": {}, - "health_checker": {}, - "http": {}, - "http2": {}, - "hystrix": {}, - "init": {}, - "io": {}, - "jwt": {}, - "kafka": {}, - "key_value_store": {}, - "lua": {}, - "main": {}, - "matcher": {}, - "misc": {}, - "mongo": {}, - "multi_connection": {}, - "oauth2": {}, - "quic": {}, - "quic_stream": {}, - "pool": {}, - "rbac": {}, - "rds": {}, - "redis": {}, - "router": {}, - "runtime": {}, - "stats": {}, - "secret": {}, - "tap": {}, - "testing": {}, - "thrift": {}, - "tracing": {}, - "upstream": {}, - "udp": {}, - "wasm": {}, - "websocket": {}, -} diff --git a/cli/common/envoy/logger_params_test.go b/cli/common/envoy/logger_params_test.go deleted file mode 100644 index 26be9f69a6..0000000000 --- a/cli/common/envoy/logger_params_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package envoy - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSetLoggerLevelSucceeds(t *testing.T) { - t.Parallel() - testCases := map[string]struct { - levelsToSet [][]string - expectedIndividualLevels []logLevel - }{ - "single log level change trace": { - levelsToSet: [][]string{ - {"admin", "trace"}, - }, - expectedIndividualLevels: []logLevel{ - {name: "admin", level: "trace"}, - }, - }, - "single log level change debug": { - levelsToSet: [][]string{ - {"admin", "debug"}, - }, - expectedIndividualLevels: []logLevel{ - {name: "admin", level: "debug"}, - }, - }, - "single log level change info": { - levelsToSet: [][]string{ - {"admin", "info"}, - }, - expectedIndividualLevels: []logLevel{ - {name: "admin", level: "info"}, - }, - }, - "single log level change warning": { - levelsToSet: [][]string{ - {"admin", "warning"}, - }, - expectedIndividualLevels: []logLevel{ - {name: "admin", level: "warning"}, - }, - }, - "single log level change error": { - levelsToSet: [][]string{ - {"admin", "error"}, - }, - expectedIndividualLevels: []logLevel{ - {name: "admin", level: "error"}, - }, - }, - "single log level change critical": { - levelsToSet: [][]string{ - {"admin", "critical"}, - }, - expectedIndividualLevels: []logLevel{ - {name: "admin", level: "critical"}, - }, - }, - "single log level change off": { - levelsToSet: [][]string{ - {"admin", "off"}, - }, - expectedIndividualLevels: []logLevel{ - {name: "admin", level: "off"}, - }, - }, - "multiple log level change": { - levelsToSet: [][]string{ - {"admin", "info"}, - {"grpc", "debug"}, - }, - expectedIndividualLevels: []logLevel{ - {name: "admin", level: "info"}, - {name: "grpc", level: "debug"}, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - loggerParams := NewLoggerParams() - for _, loggerLevel := range tc.levelsToSet { - logger, level := loggerLevel[0], loggerLevel[1] - err := loggerParams.SetLoggerLevel(logger, level) - require.NoError(t, err) - } - require.Equal(t, loggerParams.individualLevels, tc.expectedIndividualLevels) - }) - } -} - -func TestSetLoggerLevelFails(t *testing.T) { - t.Parallel() - testCases := map[string]struct { - loggerName string - loggerLevel string - }{ - "invalid logger name": { - loggerName: "this is not the logger you're looking for", - loggerLevel: "info", - }, - "invalid logger level": { - loggerName: "grpc", - loggerLevel: "this is also incorrect", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - loggerParams := NewLoggerParams() - err := loggerParams.SetLoggerLevel(tc.loggerName, tc.loggerLevel) - require.Error(t, err) - }) - } -} - -func TestSetGlobalLoggerLevel(t *testing.T) { - t.Parallel() - for level := range envoyLevels { - loggerParams := NewLoggerParams() - err := loggerParams.SetGlobalLoggerLevel(level) - require.NoError(t, err) - } -} - -func TestSetGlobalLoggerLevelFails(t *testing.T) { - t.Parallel() - loggerParams := NewLoggerParams() - err := loggerParams.SetGlobalLoggerLevel("not a valid level") - require.Error(t, err) -} - -func TestString(t *testing.T) { - t.Parallel() - testCases := map[string]struct { - subject *LoggerParams - expectedOutput string - }{ - "when global level is set": { - subject: &LoggerParams{globalLevel: "warn"}, - expectedOutput: "?level=warn", - }, - "when one specific log level is set": { - subject: &LoggerParams{ - individualLevels: []logLevel{ - {name: "grpc", level: "warn"}, - }, - }, - expectedOutput: "?grpc=warn", - }, - "when multiple specific log levels are set": { - subject: &LoggerParams{ - individualLevels: []logLevel{ - {name: "grpc", level: "warn"}, - {name: "http", level: "info"}, - }, - }, - expectedOutput: "?paths=grpc:warn,http:info", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - actual := tc.subject.String() - require.Equal(t, actual, tc.expectedOutput) - }) - } -} diff --git a/cli/common/envoy/testdata/fetch_debug_levels.txt b/cli/common/envoy/testdata/fetch_debug_levels.txt deleted file mode 100644 index 6b059dc1aa..0000000000 --- a/cli/common/envoy/testdata/fetch_debug_levels.txt +++ /dev/null @@ -1,59 +0,0 @@ -active loggers: - admin: debug - alternate_protocols_cache: debug - aws: debug - assert: debug - backtrace: debug - cache_filter: debug - client: debug - config: debug - connection: debug - conn_handler: debug - decompression: debug - dns: debug - dubbo: debug - envoy_bug: debug - ext_authz: debug - ext_proc: debug - rocketmq: debug - file: debug - filter: debug - forward_proxy: debug - grpc: debug - happy_eyeballs: debug - hc: debug - health_checker: debug - http: debug - http2: debug - hystrix: debug - init: debug - io: debug - jwt: debug - kafka: debug - key_value_store: debug - lua: debug - main: debug - matcher: debug - misc: debug - mongo: debug - multi_connection: debug - oauth2: debug - quic: debug - quic_stream: debug - pool: debug - rbac: debug - rds: debug - redis: debug - router: debug - runtime: debug - stats: debug - secret: debug - tap: debug - testing: debug - thrift: debug - tracing: debug - upstream: debug - udp: debug - wasm: debug - websocket: debug - diff --git a/cli/common/error.go b/cli/common/error.go deleted file mode 100644 index 3d8e3deb51..0000000000 --- a/cli/common/error.go +++ /dev/null @@ -1,25 +0,0 @@ -package common - -// DanglingResourceError should be used when a request was made to remove -// a resource and the resource still remains after enough time has elapsed -// that it should have been removed by Kubernetes. -type DanglingResourceError struct { - message string -} - -// NewDanglingResourceError returns a new instance of DanglingResourceError with -// the given message. -func NewDanglingResourceError(message string) *DanglingResourceError { - return &DanglingResourceError{message} -} - -// Error returns a string representation of the dangling resource error. -func (d *DanglingResourceError) Error() string { - return d.message -} - -// IsDanglingResourceError returns true if the error passed in is of type DanglingResourceError. -func IsDanglingResourceError(err error) bool { - _, ok := err.(*DanglingResourceError) - return ok -} diff --git a/cli/common/terminal/basic.go b/cli/common/terminal/basic.go index e8866415bc..e8411b9aec 100644 --- a/cli/common/terminal/basic.go +++ b/cli/common/terminal/basic.go @@ -79,7 +79,7 @@ func (ui *basicUI) Interactive() bool { return isatty.IsTerminal(os.Stdin.Fd()) } -// Output prints the given message using the formatting options passed in. +// Output implements UI. func (ui *basicUI) Output(msg string, raw ...interface{}) { msg, style, w := ui.parse(msg, raw...) @@ -115,6 +115,7 @@ func (ui *basicUI) Output(msg string, raw ...interface{}) { msg = strings.Join(lines, "\n") } + // Write it fmt.Fprintln(w, msg) } diff --git a/cli/common/terminal/table.go b/cli/common/terminal/table.go index ac9a762609..67278931a4 100644 --- a/cli/common/terminal/table.go +++ b/cli/common/terminal/table.go @@ -5,21 +5,15 @@ import ( ) const ( - Yellow = "yellow" - Green = "green" - Red = "red" - Blue = "blue" - Magenta = "magenta" - HiWhite = "hiwhite" + Yellow = "yellow" + Green = "green" + Red = "red" ) var colorMapping = map[string]int{ - Green: tablewriter.FgGreenColor, - Yellow: tablewriter.FgYellowColor, - Red: tablewriter.FgRedColor, - Blue: tablewriter.FgBlueColor, - Magenta: tablewriter.FgMagentaColor, - HiWhite: tablewriter.FgHiWhiteColor, + Green: tablewriter.FgGreenColor, + Yellow: tablewriter.FgYellowColor, + Red: tablewriter.FgRedColor, } // Passed to UI.Table to provide a nicely formatted table. diff --git a/cli/common/terminal/ui.go b/cli/common/terminal/ui.go index dde5d532ba..b90d05b197 100644 --- a/cli/common/terminal/ui.go +++ b/cli/common/terminal/ui.go @@ -7,36 +7,6 @@ import ( "github.com/fatih/color" ) -const ( - HeaderStyle = "header" - ErrorStyle = "error" - ErrorBoldStyle = "error-bold" - WarningStyle = "warning" - WarningBoldStyle = "warning-bold" - InfoStyle = "info" - LibraryStyle = "library" - SuccessStyle = "success" - SuccessBoldStyle = "success-bold" - DiffUnchangedStyle = "diff-unchanged" - DiffAddedStyle = "diff-added" - DiffRemovedStyle = "diff-removed" -) - -var ( - colorHeader = color.New(color.Bold) - colorInfo = color.New() - colorError = color.New(color.FgRed) - colorErrorBold = color.New(color.FgRed, color.Bold) - colorLibrary = color.New(color.FgCyan) - colorSuccess = color.New(color.FgGreen) - colorSuccessBold = color.New(color.FgGreen, color.Bold) - colorWarning = color.New(color.FgYellow) - colorWarningBold = color.New(color.FgYellow, color.Bold) - colorDiffUnchanged = color.New() - colorDiffAdded = color.New(color.FgGreen) - colorDiffRemoved = color.New(color.FgRed) -) - // ErrNonInteractive is returned when Input is called on a non-Interactive UI. var ErrNonInteractive = errors.New("noninteractive UI doesn't support this operation") @@ -95,6 +65,21 @@ type Input struct { Secret bool } +const ( + HeaderStyle = "header" + ErrorStyle = "error" + ErrorBoldStyle = "error-bold" + WarningStyle = "warning" + WarningBoldStyle = "warning-bold" + InfoStyle = "info" + LibraryStyle = "library" + SuccessStyle = "success" + SuccessBoldStyle = "success-bold" + DiffUnchangedStyle = "diff-unchanged" + DiffAddedStyle = "diff-added" + DiffRemovedStyle = "diff-removed" +) + type config struct { // Writer is where the message will be written to. Writer io.Writer @@ -182,3 +167,18 @@ func WithStyle(style string) Option { func WithWriter(w io.Writer) Option { return func(c *config) { c.Writer = w } } + +var ( + colorHeader = color.New(color.Bold) + colorInfo = color.New() + colorError = color.New(color.FgRed) + colorErrorBold = color.New(color.FgRed, color.Bold) + colorLibrary = color.New(color.FgCyan) + colorSuccess = color.New(color.FgGreen) + colorSuccessBold = color.New(color.FgGreen, color.Bold) + colorWarning = color.New(color.FgYellow) + colorWarningBold = color.New(color.FgYellow, color.Bold) + colorDiffUnchanged = color.New() + colorDiffAdded = color.New(color.FgGreen) + colorDiffRemoved = color.New(color.FgRed) +) diff --git a/cli/common/utils.go b/cli/common/utils.go index b2e9714a9d..e03238bfb0 100644 --- a/cli/common/utils.go +++ b/cli/common/utils.go @@ -1,17 +1,19 @@ package common import ( + "errors" + "fmt" "os" "strings" + + "helm.sh/helm/v3/pkg/action" + helmCLI "helm.sh/helm/v3/pkg/cli" ) const ( - DefaultReleaseName = "consul" - DefaultReleaseNamespace = "consul" - ConsulDemoAppReleaseName = "consul-demo" - TopLevelChartDirName = "consul" - ReleaseTypeConsul = "Consul" - ReleaseTypeConsulDemo = "Consul demo application" + DefaultReleaseName = "consul" + DefaultReleaseNamespace = "consul" + TopLevelChartDirName = "consul" // CLILabelKey and CLILabelValue are added to each secret on creation so the CLI knows // which key to delete on an uninstall. @@ -25,6 +27,32 @@ func Abort(raw string) bool { return !(strings.ToLower(confirmation) == "y" || strings.ToLower(confirmation) == "yes") } +// CheckForInstallations uses the helm Go SDK to find helm releases in all namespaces where the chart name is +// "consul", and returns the release name and namespace if found, or an error if not found. +func CheckForInstallations(settings *helmCLI.EnvSettings, uiLogger action.DebugLog) (string, string, error) { + // Need a specific action config to call helm list, where namespace is NOT specified. + listConfig := new(action.Configuration) + if err := listConfig.Init(settings.RESTClientGetter(), "", + os.Getenv("HELM_DRIVER"), uiLogger); err != nil { + return "", "", fmt.Errorf("couldn't initialize helm config: %s", err) + } + + lister := action.NewList(listConfig) + lister.AllNamespaces = true + lister.StateMask = action.ListAll + res, err := lister.Run() + if err != nil { + return "", "", fmt.Errorf("couldn't check for installations: %s", err) + } + + for _, rel := range res { + if rel.Chart.Metadata.Name == "consul" { + return rel.Name, rel.Namespace, nil + } + } + return "", "", errors.New("couldn't find consul installation") +} + // MergeMaps merges two maps giving b precedent. // @source: https://github.com/helm/helm/blob/main/pkg/cli/values/options.go func MergeMaps(a, b map[string]interface{}) map[string]interface{} { diff --git a/cli/config/config.go b/cli/config/config.go deleted file mode 100644 index d964bc3b5c..0000000000 --- a/cli/config/config.go +++ /dev/null @@ -1,16 +0,0 @@ -package config - -import "sigs.k8s.io/yaml" - -// GlobalNameConsul is used to set the global name of an install to consul. -const GlobalNameConsul = ` -global: - name: consul -` - -// ConvertToMap is a helper function that converts a YAML string to a map. -func ConvertToMap(s string) map[string]interface{} { - var m map[string]interface{} - _ = yaml.Unmarshal([]byte(s), &m) - return m -} diff --git a/cli/config/presets.go b/cli/config/presets.go new file mode 100644 index 0000000000..06b91ce8ce --- /dev/null +++ b/cli/config/presets.go @@ -0,0 +1,71 @@ +package config + +import "sigs.k8s.io/yaml" + +const ( + PresetDemo = "demo" + PresetSecure = "secure" +) + +// Presets is a map of pre-configured helm values. +var Presets = map[string]interface{}{ + PresetDemo: Convert(demo), + PresetSecure: Convert(secure), +} + +// demo is a preset of common values for setting up Consul. +const demo = ` +global: + name: consul + metrics: + enabled: true + enableAgentMetrics: true +connectInject: + enabled: true + metrics: + defaultEnabled: true + defaultEnableMerging: true + enableGatewayMetrics: true +server: + replicas: 1 +controller: + enabled: true +ui: + enabled: true + service: + enabled: true +prometheus: + enabled: true +` + +// secure is a preset of common values for setting up Consul in a secure manner. +const secure = ` +global: + name: consul + gossipEncryption: + autoGenerate: true + tls: + enabled: true + enableAutoEncrypt: true + acls: + manageSystemACLs: true +server: + replicas: 1 +connectInject: + enabled: true +controller: + enabled: true +` + +// GlobalNameConsul is used to set the global name of an install to consul. +const GlobalNameConsul = ` +global: + name: consul +` + +// convert is a helper function that converts a YAML string to a map. +func Convert(s string) map[string]interface{} { + var m map[string]interface{} + _ = yaml.Unmarshal([]byte(s), &m) + return m +} diff --git a/cli/go.mod b/cli/go.mod index ae69aa5a64..cd95397863 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/consul-k8s/cli -go 1.20 +go 1.18 require ( github.com/bgentry/speakeasy v0.1.0 @@ -8,19 +8,15 @@ require ( github.com/fatih/color v1.13.0 github.com/google/go-cmp v0.5.8 github.com/hashicorp/consul-k8s/charts v0.0.0-00010101000000-000000000000 - github.com/hashicorp/consul/troubleshoot v0.0.0-20230217154305-8dab825c3640 - github.com/hashicorp/go-hclog v1.2.1 - github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc + github.com/hashicorp/go-hclog v0.16.2 github.com/kr/text v0.2.0 - github.com/mattn/go-isatty v0.0.16 + github.com/mattn/go-isatty v0.0.14 github.com/mitchellh/cli v1.1.2 github.com/olekukonko/tablewriter v0.0.5 - github.com/posener/complete v1.2.3 - github.com/stretchr/testify v1.8.0 - golang.org/x/text v0.5.0 + github.com/posener/complete v1.1.1 + github.com/stretchr/testify v1.7.2 helm.sh/helm/v3 v3.9.4 k8s.io/api v0.25.0 - k8s.io/apiextensions-apiserver v0.25.0 k8s.io/apimachinery v0.25.0 k8s.io/cli-runtime v0.24.3 k8s.io/client-go v0.25.0 @@ -28,8 +24,6 @@ require ( sigs.k8s.io/yaml v1.3.0 ) -require go.opentelemetry.io/proto/otlp v0.11.0 // indirect - require ( cloud.google.com/go v0.99.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -49,17 +43,14 @@ require ( github.com/Masterminds/squirrel v1.5.3 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/armon/go-metrics v0.3.10 // indirect - github.com/armon/go-radix v1.0.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 // indirect + github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect - github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect github.com/containerd/containerd v1.6.6 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/cli v20.10.17+incompatible // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/docker v20.10.17+incompatible // indirect @@ -68,25 +59,15 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/go-errors/errors v1.0.1 // indirect - github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-gorp/gorp/v3 v3.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.2 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/runtime v0.24.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/strfmt v0.21.3 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-openapi/validate v0.21.0 // indirect - github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/swag v0.19.14 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect @@ -95,22 +76,14 @@ require ( github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.2.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/hashicorp/consul/api v1.10.1-0.20230209203402-db2bd404bf72 // indirect - github.com/hashicorp/consul/envoyextensions v0.0.0-20230210154717-4f2ce606547b // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-version v1.2.1 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/serf v0.10.1 // indirect github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -118,17 +91,14 @@ require ( github.com/klauspost/compress v1.13.6 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/lib/pq v1.10.7 // indirect + github.com/lib/pq v1.10.6 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -138,14 +108,12 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/oklog/ulid v1.3.1 // indirect github.com/onsi/ginkgo v1.16.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.12.2 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect @@ -154,7 +122,6 @@ require ( github.com/russross/blackfriday v1.5.2 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/spf13/cast v1.4.1 // indirect github.com/spf13/cobra v1.4.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -162,22 +129,23 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect - go.mongodb.org/mongo-driver v1.11.1 // indirect - go.starlark.net v0.0.0-20230128213706-3f75dec8e403 // indirect - golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect + go.starlark.net v0.0.0-20200707032745-474f21a9602d // indirect + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect + golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/term v0.3.0 // indirect + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 // indirect - google.golang.org/grpc v1.49.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect + google.golang.org/grpc v1.47.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.25.0 // indirect k8s.io/apiserver v0.25.0 // indirect k8s.io/component-base v0.25.0 // indirect k8s.io/klog/v2 v2.70.1 // indirect diff --git a/cli/go.sum b/cli/go.sum index 6582dc76ac..531cb6f202 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -71,7 +71,6 @@ github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= @@ -104,17 +103,13 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -132,8 +127,6 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXe github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -143,20 +136,15 @@ github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0= github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= @@ -171,9 +159,8 @@ github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269 h1:hbCT8ZPPMqefiAWD2ZKjn7ypokIGViTvBBg/ExLSdCk= @@ -207,12 +194,8 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -220,7 +203,6 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwC github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= @@ -239,9 +221,8 @@ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= -github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= -github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -254,76 +235,27 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/runtime v0.24.1 h1:Sml5cgQKGYQHF+M7yYSHaH1eOjvTykrddTE/KtQVjqo= -github.com/go-openapi/runtime v0.24.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= -github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= -github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -368,7 +300,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= @@ -419,8 +350,8 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -437,61 +368,31 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWet github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.10.1-0.20230209203402-db2bd404bf72 h1:O+z5m5kNtu6NHBMwMsRb1S0P7giqNu5vBBeCzgiAesg= -github.com/hashicorp/consul/api v1.10.1-0.20230209203402-db2bd404bf72/go.mod h1:c1u8FzGHcavbEtRW/p1YditvfMgn4QsKNgz2rnCDF7c= -github.com/hashicorp/consul/envoyextensions v0.0.0-20230210154717-4f2ce606547b h1:T+El0UxZP7h2mGL+EPBJejS4gKM/w0KAYOSpTs7hrbY= -github.com/hashicorp/consul/envoyextensions v0.0.0-20230210154717-4f2ce606547b/go.mod h1:oJKG0zAMtq6ZmZNYQyeKh6kIJmi01rZSZDSgnjzZ15w= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= -github.com/hashicorp/consul/troubleshoot v0.0.0-20230217154305-8dab825c3640 h1:P81kThpSzUW2oERDMrLsiZE3OuilLo3/EQhtVQW5M+8= -github.com/hashicorp/consul/troubleshoot v0.0.0-20230217154305-8dab825c3640/go.mod h1:rskvju2tK8XvHYTAILHjO7lpV1/uViHs3Q3mg9Rkwlg= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= -github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc h1:on26TCKYnX7JzZCtwkR/LWHSqMu40PoZ6h/0e6Pq8ug= -github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc/go.mod h1:/9UoDY2FYYA8lFaKBb2HmM/jKYZGANmf65q9QRc/cVw= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= -github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= @@ -500,20 +401,18 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -523,8 +422,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -532,15 +429,14 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -551,8 +447,8 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhR github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= @@ -560,56 +456,43 @@ github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPK github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -618,11 +501,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -642,7 +521,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -655,8 +533,6 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= @@ -675,12 +551,7 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -691,18 +562,15 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= -github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -715,7 +583,6 @@ github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= @@ -723,16 +590,12 @@ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rubenv/sql-migrate v1.1.1 h1:haR5Hn8hbW9/SpAICrXoZqXnywS7Q5WijwkQENPeNWY= github.com/rubenv/sql-migrate v1.1.1/go.mod h1:/7TZymwxN8VWumcIxw1jjHEcR1djpdkMHQPT4FWdnbQ= @@ -741,7 +604,6 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -749,14 +611,10 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -765,7 +623,6 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= @@ -778,28 +635,17 @@ github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -808,7 +654,6 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -819,16 +664,11 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.1 h1:QP0znIRTuL0jf1oBQoAoM0C6ZJfBK4kx0Uumtv1A7w8= -go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -848,11 +688,9 @@ go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.starlark.net v0.0.0-20230128213706-3f75dec8e403 h1:jPeC7Exc+m8OBJUlWbBLh0O5UZPM7yU5W4adnhhbG4U= -go.starlark.net v0.0.0-20230128213706-3f75dec8e403/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.starlark.net v0.0.0-20200707032745-474f21a9602d h1:uFqwFYlX7d5ZSp+IqhXxct0SybXrTzEBDvb2CkEhPBs= +go.starlark.net v0.0.0-20200707032745-474f21a9602d/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -862,23 +700,19 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -933,7 +767,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -957,16 +790,14 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -983,14 +814,12 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1009,23 +838,19 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1057,13 +882,11 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1083,18 +906,11 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1103,9 +919,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1119,19 +934,14 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1281,8 +1091,8 @@ google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 h1:K1zaaMdYBXRyX+cwFnxj7M6zwDyumLQMZ5xqwGvjreQ= -google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1308,9 +1118,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1325,8 +1135,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1351,10 +1161,8 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/cli/helm/action.go b/cli/helm/action.go index d71014c762..df8ba5fb07 100644 --- a/cli/helm/action.go +++ b/cli/helm/action.go @@ -1,14 +1,11 @@ package helm import ( - "embed" "fmt" "os" "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" helmCLI "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/release" "k8s.io/cli-runtime/pkg/genericclioptions" ) @@ -26,83 +23,3 @@ func InitActionConfig(actionConfig *action.Configuration, namespace string, sett } return actionConfig, nil } - -// HelmActionsRunner is a thin interface over existing Helm actions that normally -// require a Kubernetes cluster. This interface allows us to mock it in tests -// and get better coverage of CLI commands. -type HelmActionsRunner interface { - // A thin wrapper around the Helm list function. - CheckForInstallations(options *CheckForInstallationsOptions) (bool, string, string, error) - // A thin wrapper around the Helm status function. - GetStatus(status *action.Status, name string) (*release.Release, error) - // A thin wrapper around the Helm install function. - Install(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) - // A thin wrapper around the LoadChart function in consul-k8s CLI that reads the charts withing the embedded fle system. - LoadChart(chart embed.FS, chartDirName string) (*chart.Chart, error) - // A thin wrapper around the Helm uninstall function. - Uninstall(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) - // A thin wrapper around the Helm upgrade function. - Upgrade(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) -} - -// ActionRunner is the implementation of HelmActionsRunner interface that -// truly calls Helm sdk functions and requires a real Kubernetes cluster. It -// is the non-mock implementation of HelmActionsRunner that is used in the CLI. -type ActionRunner struct{} - -func (h *ActionRunner) Uninstall(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) { - return uninstall.Run(name) -} - -func (h *ActionRunner) Install(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return install.Run(chrt, vals) -} - -type CheckForInstallationsOptions struct { - Settings *helmCLI.EnvSettings - ReleaseName string - DebugLog action.DebugLog - SkipErrorWhenNotFound bool -} - -// CheckForInstallations uses the helm Go SDK to find helm releases in all namespaces where the chart name is -// "consul", and returns the release name and namespace if found, or an error if not found. -func (h *ActionRunner) CheckForInstallations(options *CheckForInstallationsOptions) (bool, string, string, error) { - // Need a specific action config to call helm list, where namespace is NOT specified. - listConfig := new(action.Configuration) - if err := listConfig.Init(options.Settings.RESTClientGetter(), "", - os.Getenv("HELM_DRIVER"), options.DebugLog); err != nil { - return false, "", "", fmt.Errorf("couldn't initialize helm config: %s", err) - } - - lister := action.NewList(listConfig) - lister.AllNamespaces = true - lister.StateMask = action.ListAll - res, err := lister.Run() - if err != nil { - return false, "", "", fmt.Errorf("couldn't check for installations: %s", err) - } - - for _, rel := range res { - if rel.Chart.Metadata.Name == options.ReleaseName { - return true, rel.Name, rel.Namespace, nil - } - } - var notFoundError error - if !options.SkipErrorWhenNotFound { - notFoundError = fmt.Errorf("couldn't find installation named '%s'", options.ReleaseName) - } - return false, "", "", notFoundError -} - -func (h *ActionRunner) GetStatus(status *action.Status, name string) (*release.Release, error) { - return status.Run(name) -} - -func (h *ActionRunner) Upgrade(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return upgrade.Run(name, chart, vals) -} - -func (h *ActionRunner) LoadChart(chart embed.FS, chartDirName string) (*chart.Chart, error) { - return LoadChart(chart, chartDirName) -} diff --git a/cli/helm/chart.go b/cli/helm/chart.go index f679ca591d..1a91ee19d5 100644 --- a/cli/helm/chart.go +++ b/cli/helm/chart.go @@ -29,7 +29,7 @@ func LoadChart(chart embed.FS, chartDirName string) (*chart.Chart, error) { // FetchChartValues will attempt to fetch the values from the currently // installed Helm chart. -func FetchChartValues(actionRunner HelmActionsRunner, namespace, name string, settings *helmCLI.EnvSettings, uiLogger action.DebugLog) (map[string]interface{}, error) { +func FetchChartValues(namespace, name string, settings *helmCLI.EnvSettings, uiLogger action.DebugLog) (map[string]interface{}, error) { cfg := new(action.Configuration) cfg, err := InitActionConfig(cfg, namespace, settings, uiLogger) if err != nil { @@ -37,7 +37,7 @@ func FetchChartValues(actionRunner HelmActionsRunner, namespace, name string, se } status := action.NewStatus(cfg) - release, err := actionRunner.GetStatus(status, name) + release, err := status.Run(name) if err != nil { return nil, err } diff --git a/cli/helm/install.go b/cli/helm/install.go deleted file mode 100644 index 1e0a83b8b8..0000000000 --- a/cli/helm/install.go +++ /dev/null @@ -1,141 +0,0 @@ -package helm - -import ( - "embed" - "fmt" - "time" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "golang.org/x/text/cases" - "golang.org/x/text/language" - "helm.sh/helm/v3/pkg/action" - helmCLI "helm.sh/helm/v3/pkg/cli" -) - -// InstallOptions is used when calling InstallHelmRelease. -type InstallOptions struct { - // ReleaseName is the name of the Helm release to be installed. - ReleaseName string - // ReleaseType is the helm upgrade type - consul vs consul-demo. - ReleaseType string - // Namespace is the Kubernetes namespace where the release is to be - // installed. - Namespace string - // Values the Helm chart values in a map form. - Values map[string]interface{} - // Settings is the Helm CLI environment settings. - Settings *helmCLI.EnvSettings - // Embedded chart specifies the Consul or Consul Demo Helm chart that has - // been embedded into the consul-k8s CLI. - EmbeddedChart embed.FS - // ChartDirName is the top level directory name fo the EmbeddedChart. - ChartDirName string - // UILogger is a DebugLog used to return messages from Helm to the UI. - UILogger action.DebugLog - // DryRun specifies whether the install/upgrade should actually modify the - // Kubernetes cluster. - DryRun bool - // AutoApprove will bypass any terminal prompts with an automatic yes. - AutoApprove bool - // Wait specifies whether the Helm install should wait until all pods - // are ready. - Wait bool - // Timeout is the duration that Helm will wait for the command to complete - // before it throws an error. - Timeout time.Duration - // UI is the terminal output representation that is used to prompt the user - // and output messages. - UI terminal.UI - // HelmActionsRunner is a thin interface around Helm actions for install, - // upgrade, and uninstall. - HelmActionsRunner HelmActionsRunner -} - -// InstallDemoApp will perform the following actions -// - Print out the installation summary. -// - Setup action configuration for Helm Go SDK function calls. -// - Setup the installation action. -// - Load the Helm chart. -// - Run the install. -func InstallDemoApp(options *InstallOptions) error { - options.UI.Output(fmt.Sprintf("%s Installation Summary", - cases.Title(language.English).String(common.ReleaseTypeConsulDemo)), - terminal.WithHeaderStyle()) - options.UI.Output("Name: %s", common.ConsulDemoAppReleaseName, terminal.WithInfoStyle()) - options.UI.Output("Namespace: %s", options.Settings.Namespace(), terminal.WithInfoStyle()) - options.UI.Output("\n", terminal.WithInfoStyle()) - - err := InstallHelmRelease(options) - if err != nil { - return err - } - - options.UI.Output("Accessing %s UI", cases.Title(language.English).String(common.ReleaseTypeConsulDemo), terminal.WithHeaderStyle()) - port := "8080" - portForwardCmd := fmt.Sprintf("kubectl port-forward service/nginx %s:80", port) - if options.Settings.Namespace() != "default" { - portForwardCmd += fmt.Sprintf(" --namespace %s", options.Settings.Namespace()) - } - options.UI.Output(portForwardCmd, terminal.WithInfoStyle()) - options.UI.Output("Browse to http://localhost:%s.", port, terminal.WithInfoStyle()) - return nil -} - -// InstallHelmRelease handles downloading the embedded helm chart, loading the -// values and runnning the Helm install command. -func InstallHelmRelease(options *InstallOptions) error { - if options.DryRun { - return nil - } - - if !options.AutoApprove { - confirmation, err := options.UI.Input(&terminal.Input{ - Prompt: "Proceed with installation? (Y/n)", - Style: terminal.InfoStyle, - Secret: false, - }) - - if err != nil { - return err - } - // The install will proceed if the user presses enter or responds with "y"/"yes" (case-insensitive). - if confirmation != "" && common.Abort(confirmation) { - options.UI.Output("Install aborted. Use the command `consul-k8s install -help` to learn how to customize your installation.", - terminal.WithInfoStyle()) - return err - } - } - - options.UI.Output("Installing %s", options.ReleaseType, terminal.WithHeaderStyle()) - - // Setup action configuration for Helm Go SDK function calls. - actionConfig := new(action.Configuration) - actionConfig, err := InitActionConfig(actionConfig, options.Namespace, options.Settings, options.UILogger) - if err != nil { - return err - } - - // Setup the installation action. - install := action.NewInstall(actionConfig) - install.ReleaseName = options.ReleaseName - install.Namespace = options.Namespace - install.CreateNamespace = true - install.Wait = options.Wait - install.Timeout = options.Timeout - - // Load the Helm chart. - chart, err := options.HelmActionsRunner.LoadChart(options.EmbeddedChart, options.ChartDirName) - if err != nil { - return err - } - options.UI.Output("Downloaded charts.", terminal.WithSuccessStyle()) - - // Run the install. - if _, err = options.HelmActionsRunner.Install(install, chart, options.Values); err != nil { - return err - } - - options.UI.Output("%s installed in namespace %q.", options.ReleaseType, options.Namespace, terminal.WithSuccessStyle()) - return nil -} diff --git a/cli/helm/install_test.go b/cli/helm/install_test.go deleted file mode 100644 index 6458a06e54..0000000000 --- a/cli/helm/install_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package helm - -import ( - "bytes" - "context" - "embed" - "errors" - "testing" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmCLI "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/release" -) - -func TestInstallDemoApp(t *testing.T) { - cases := map[string]struct { - messages []string - helmActionsRunner *MockActionRunner - expectError bool - }{ - "basic success": { - messages: []string{ - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: default\n \n \n", - "\n==> Installing Consul\n ✓ Downloaded charts.\n ✓ Consul installed in namespace \"consul-namespace\".\n", - "\n==> Accessing Consul Demo Application UI\n kubectl port-forward service/nginx 8080:80 --namespace consul-namespace\n Browse to http://localhost:8080.\n", - }, - helmActionsRunner: &MockActionRunner{}, - }, - "failure because LoadChart returns failure": { - messages: []string{ - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: default\n \n \n\n==> Installing Consul\n", - }, - helmActionsRunner: &MockActionRunner{ - LoadChartFunc: func(chrt embed.FS, chartDirName string) (*chart.Chart, error) { - return nil, errors.New("sad trombone!") - }, - }, - expectError: true, - }, - "failure because Install returns failure": { - messages: []string{ - "\n==> Consul Demo Application Installation Summary\n Name: consul-demo\n Namespace: default\n \n \n\n==> Installing Consul\n", - }, - helmActionsRunner: &MockActionRunner{ - InstallFunc: func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return nil, errors.New("sad trombone!") - }, - }, - expectError: true, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - mock := tc.helmActionsRunner - options := &InstallOptions{ - HelmActionsRunner: mock, - UI: terminal.NewUI(context.Background(), buf), - UILogger: func(format string, v ...interface{}) {}, - ReleaseName: "consul-release", - ReleaseType: common.ReleaseTypeConsul, - Namespace: "consul-namespace", - Settings: helmCLI.New(), - AutoApprove: true, - } - err := InstallDemoApp(options) - if tc.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) - } -} diff --git a/cli/helm/mock.go b/cli/helm/mock.go deleted file mode 100644 index 05d3b6edb4..0000000000 --- a/cli/helm/mock.go +++ /dev/null @@ -1,136 +0,0 @@ -package helm - -import ( - "embed" - - "github.com/hashicorp/consul-k8s/cli/common" - - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/release" -) - -type MockActionRunner struct { - CheckForInstallationsFunc func(options *CheckForInstallationsOptions) (bool, string, string, error) - GetStatusFunc func(status *action.Status, name string) (*release.Release, error) - InstallFunc func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) - LoadChartFunc func(chrt embed.FS, chartDirName string) (*chart.Chart, error) - UninstallFunc func(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) - UpgradeFunc func(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) - CheckedForConsulInstallations bool - CheckedForConsulDemoInstallations bool - GotStatusConsulRelease bool - GotStatusConsulDemoRelease bool - ConsulInstalled bool - ConsulUninstalled bool - ConsulUpgraded bool - ConsulDemoInstalled bool - ConsulDemoUninstalled bool - ConsulDemoUpgraded bool -} - -func (m *MockActionRunner) Install(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - var installFunc func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) - if m.InstallFunc == nil { - installFunc = func(install *action.Install, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return &release.Release{}, nil - } - } else { - installFunc = m.InstallFunc - } - - release, err := installFunc(install, chrt, vals) - if err == nil { - if install.ReleaseName == common.DefaultReleaseName { - m.ConsulInstalled = true - } else if install.ReleaseName == common.ConsulDemoAppReleaseName { - m.ConsulDemoInstalled = true - } - } - return release, err -} - -func (m *MockActionRunner) Uninstall(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) { - var uninstallFunc func(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) - - if m.UninstallFunc == nil { - uninstallFunc = func(uninstall *action.Uninstall, name string) (*release.UninstallReleaseResponse, error) { - return &release.UninstallReleaseResponse{}, nil - } - } else { - uninstallFunc = m.UninstallFunc - } - - release, err := uninstallFunc(uninstall, name) - if err == nil { - if name == common.DefaultReleaseName { - m.ConsulUninstalled = true - } else if name == common.ConsulDemoAppReleaseName { - m.ConsulDemoUninstalled = true - } - } - return release, err -} - -func (m *MockActionRunner) CheckForInstallations(options *CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == common.DefaultReleaseName { - m.CheckedForConsulInstallations = true - } else if options.ReleaseName == common.ConsulDemoAppReleaseName { - m.CheckedForConsulDemoInstallations = true - } - - if m.CheckForInstallationsFunc == nil { - return false, "", "", nil - } - return m.CheckForInstallationsFunc(options) -} - -func (m *MockActionRunner) GetStatus(status *action.Status, name string) (*release.Release, error) { - if name == common.DefaultReleaseName { - m.GotStatusConsulRelease = true - } else if name == common.ConsulDemoAppReleaseName { - m.GotStatusConsulDemoRelease = true - } - - if m.GetStatusFunc == nil { - return &release.Release{}, nil - } - return m.GetStatusFunc(status, name) -} - -func (m *MockActionRunner) Upgrade(upgrade *action.Upgrade, name string, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - var upgradeFunc func(upgrade *action.Upgrade, name string, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) - - if m.UpgradeFunc == nil { - upgradeFunc = func(upgrade *action.Upgrade, name string, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return &release.Release{}, nil - } - } else { - upgradeFunc = m.UpgradeFunc - } - - release, err := upgradeFunc(upgrade, name, chrt, vals) - if err == nil { - if name == common.DefaultReleaseName { - m.ConsulUpgraded = true - } else if name == common.ConsulDemoAppReleaseName { - m.ConsulDemoUpgraded = true - } - } - return release, err -} - -func (m *MockActionRunner) LoadChart(chrt embed.FS, chartDirName string) (*chart.Chart, error) { - var loadChartFunc func(chrt embed.FS, chartDirName string) (*chart.Chart, error) - - if m.LoadChartFunc == nil { - loadChartFunc = func(chrt embed.FS, chartDirName string) (*chart.Chart, error) { - return &chart.Chart{}, nil - } - } else { - loadChartFunc = m.LoadChartFunc - } - - release, err := loadChartFunc(chrt, chartDirName) - return release, err -} diff --git a/cli/helm/upgrade.go b/cli/helm/upgrade.go deleted file mode 100644 index e9d4545652..0000000000 --- a/cli/helm/upgrade.go +++ /dev/null @@ -1,150 +0,0 @@ -package helm - -import ( - "embed" - "strings" - "time" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "golang.org/x/text/cases" - "golang.org/x/text/language" - "helm.sh/helm/v3/pkg/action" - helmCLI "helm.sh/helm/v3/pkg/cli" -) - -// UpgradeOptions is used when calling UpgradeHelmRelease. -type UpgradeOptions struct { - // ReleaseName is the name of the installed Helm release to upgrade. - ReleaseName string - // ReleaseType is the helm upgrade type - consul vs consul-demo. - ReleaseType string - // ReleaseTypeName is a user friendly version of ReleaseType. The values - // are consul and consul demo application. - ReleaseTypeName string - // Namespace is the Kubernetes namespace where the release is installed. - Namespace string - // Values the Helm chart values in a map form. - Values map[string]interface{} - // Settings is the Helm CLI environment settings. - Settings *helmCLI.EnvSettings - // Embedded chart specifies the Consul or Consul Demo Helm chart that has - // been embedded into the consul-k8s CLI. - EmbeddedChart embed.FS - // ChartDirName is the top level directory name fo the EmbeddedChart. - ChartDirName string - // UILogger is a DebugLog used to return messages from Helm to the UI. - UILogger action.DebugLog - // DryRun specifies whether the upgrade should actually modify the - // Kubernetes cluster. - DryRun bool - // AutoApprove will bypass any terminal prompts with an automatic yes. - AutoApprove bool - // Wait specifies whether the Helm install should wait until all pods - // are ready. - Wait bool - // Timeout is the duration that Helm will wait for the command to complete - // before it throws an error. - Timeout time.Duration - // UI is the terminal output representation that is used to prompt the user - // and output messages. - UI terminal.UI - // HelmActionsRunner is a thin interface around Helm actions for install, - // upgrade, and uninstall. - HelmActionsRunner HelmActionsRunner -} - -// UpgradeHelmRelease handles downloading the embedded helm chart, loading the -// values, showing the diff between new and installed values, and runnning the -// Helm install command. -func UpgradeHelmRelease(options *UpgradeOptions) error { - options.UI.Output("%s Upgrade Summary", cases.Title(language.English).String(options.ReleaseTypeName), terminal.WithHeaderStyle()) - - chart, err := options.HelmActionsRunner.LoadChart(options.EmbeddedChart, options.ChartDirName) - if err != nil { - return err - } - options.UI.Output("Downloaded charts.", terminal.WithSuccessStyle()) - - currentChartValues, err := FetchChartValues(options.HelmActionsRunner, - options.Namespace, options.ReleaseName, options.Settings, options.UILogger) - if err != nil { - return err - } - - // Print out the upgrade summary. - if err = printDiff(currentChartValues, options.Values, options.UI); err != nil { - options.UI.Output("Could not print the different between current and upgraded charts: %v", err, terminal.WithErrorStyle()) - return err - } - - // Check if the user is OK with the upgrade unless the auto approve or dry run flags are true. - if !options.AutoApprove && !options.DryRun { - confirmation, err := options.UI.Input(&terminal.Input{ - Prompt: "Proceed with upgrade? (Y/n)", - Style: terminal.InfoStyle, - Secret: false, - }) - - if err != nil { - return err - } - // The upgrade will proceed if the user presses enter or responds with "y"/"yes" (case-insensitive). - if confirmation != "" && common.Abort(confirmation) { - options.UI.Output("Upgrade aborted. Use the command `consul-k8s upgrade -help` to learn how to customize your upgrade.", - terminal.WithInfoStyle()) - return err - } - } - - if !options.DryRun { - options.UI.Output("Upgrading %s", options.ReleaseTypeName, terminal.WithHeaderStyle()) - } else { - options.UI.Output("Performing Dry Run Upgrade", terminal.WithHeaderStyle()) - return nil - } - - // Setup action configuration for Helm Go SDK function calls. - actionConfig := new(action.Configuration) - actionConfig, err = InitActionConfig(actionConfig, options.Namespace, options.Settings, options.UILogger) - if err != nil { - return err - } - - // Setup the upgrade action. - upgrade := action.NewUpgrade(actionConfig) - upgrade.Namespace = options.Namespace - upgrade.DryRun = options.DryRun - upgrade.Wait = options.Wait - upgrade.Timeout = options.Timeout - - // Run the upgrade. Note that the dry run config is passed into the upgrade action, so upgrade.Run is called even during a dry run. - _, err = options.HelmActionsRunner.Upgrade(upgrade, options.ReleaseName, chart, options.Values) - if err != nil { - return err - } - options.UI.Output("%s upgraded in namespace %q.", cases.Title(language.English).String(options.ReleaseTypeName), options.Namespace, terminal.WithSuccessStyle()) - return nil -} - -// printDiff marshals both maps to YAML and prints the diff between the two. -func printDiff(old, new map[string]interface{}, ui terminal.UI) error { - diff, err := common.Diff(old, new) - if err != nil { - return err - } - - ui.Output("\nDifference between user overrides for current and upgraded charts"+ - "\n-----------------------------------------------------------------", terminal.WithInfoStyle()) - for _, line := range strings.Split(diff, "\n") { - if strings.HasPrefix(line, "+") { - ui.Output(line, terminal.WithDiffAddedStyle()) - } else if strings.HasPrefix(line, "-") { - ui.Output(line, terminal.WithDiffRemovedStyle()) - } else { - ui.Output(line, terminal.WithDiffUnchangedStyle()) - } - } - - return nil -} diff --git a/cli/helm/upgrade_test.go b/cli/helm/upgrade_test.go deleted file mode 100644 index 4ae09bb36f..0000000000 --- a/cli/helm/upgrade_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package helm - -import ( - "bytes" - "context" - "embed" - "errors" - "testing" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/stretchr/testify/require" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - helmCLI "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/release" -) - -func TestUpgrade(t *testing.T) { - buf := new(bytes.Buffer) - mock := &MockActionRunner{ - CheckForInstallationsFunc: func(options *CheckForInstallationsOptions) (bool, string, string, error) { - if options.ReleaseName == "consul" { - return false, "", "", nil - } else { - return true, "consul-demo", "consul-demo", nil - } - }, - } - - options := &UpgradeOptions{ - HelmActionsRunner: mock, - UI: terminal.NewUI(context.Background(), buf), - UILogger: func(format string, v ...interface{}) {}, - ReleaseName: "consul-release", - ReleaseType: common.ReleaseTypeConsul, - Namespace: "consul-namespace", - Settings: helmCLI.New(), - AutoApprove: true, - } - - expectedMessages := []string{ - "\n==> Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n \n", - "\n==> Upgrading \n ✓ upgraded in namespace \"consul-namespace\".\n", - } - err := UpgradeHelmRelease(options) - require.NoError(t, err) - output := buf.String() - for _, msg := range expectedMessages { - require.Contains(t, output, msg) - } -} - -func TestUpgradeHelmRelease(t *testing.T) { - cases := map[string]struct { - messages []string - helmActionsRunner *MockActionRunner - expectError bool - }{ - "basic success": { - messages: []string{ - "\n==> Consul Upgrade Summary\n ✓ Downloaded charts.\n \n Difference between user overrides for current and upgraded charts\n -----------------------------------------------------------------\n \n", - "\n==> Upgrading Consul\n ✓ Consul upgraded in namespace \"consul-namespace\".\n", - }, - helmActionsRunner: &MockActionRunner{}, - }, - "failure because LoadChart returns failure": { - messages: []string{ - "\n==> Consul Upgrade Summary\n", - }, - helmActionsRunner: &MockActionRunner{ - LoadChartFunc: func(chrt embed.FS, chartDirName string) (*chart.Chart, error) { - return nil, errors.New("sad trombone!") - }, - }, - expectError: true, - }, - "failure because Upgrade returns failure": { - messages: []string{ - "\n==> Consul Upgrade Summary\n", - }, - helmActionsRunner: &MockActionRunner{ - UpgradeFunc: func(upgrade *action.Upgrade, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { - return nil, errors.New("sad trombone!") - }, - }, - expectError: true, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - mock := tc.helmActionsRunner - options := &UpgradeOptions{ - HelmActionsRunner: mock, - UI: terminal.NewUI(context.Background(), buf), - UILogger: func(format string, v ...interface{}) {}, - ReleaseName: "consul-release", - ReleaseType: common.ReleaseTypeConsul, - ReleaseTypeName: common.ReleaseTypeConsul, - Namespace: "consul-namespace", - Settings: helmCLI.New(), - AutoApprove: true, - } - err := UpgradeHelmRelease(options) - if tc.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - output := buf.String() - for _, msg := range tc.messages { - require.Contains(t, output, msg) - } - }) - } -} diff --git a/cli/helm/values.go b/cli/helm/values.go index e7042f85e1..669c67da25 100644 --- a/cli/helm/values.go +++ b/cli/helm/values.go @@ -147,33 +147,38 @@ type Resources struct { Limits Limits `yaml:"limits"` } +type ConsulSidecarContainer struct { + Resources Resources `yaml:"resources"` +} + type Openshift struct { Enabled bool `yaml:"enabled"` } type Global struct { - Enabled bool `yaml:"enabled"` - LogLevel string `yaml:"logLevel"` - LogJSON bool `yaml:"logJSON"` - Name interface{} `yaml:"name"` - Domain string `yaml:"domain"` - AdminPartitions AdminPartitions `yaml:"adminPartitions"` - Image string `yaml:"image"` - ImagePullSecrets []interface{} `yaml:"imagePullSecrets"` - ImageK8S string `yaml:"imageK8S"` - Datacenter string `yaml:"datacenter"` - EnablePodSecurityPolicies bool `yaml:"enablePodSecurityPolicies"` - SecretsBackend SecretsBackend `yaml:"secretsBackend"` - GossipEncryption GossipEncryption `yaml:"gossipEncryption"` - Recursors []interface{} `yaml:"recursors"` - TLS TLS `yaml:"tls"` - EnableConsulNamespaces bool `yaml:"enableConsulNamespaces"` - Acls Acls `yaml:"acls"` - EnterpriseLicense EnterpriseLicense `yaml:"enterpriseLicense"` - Federation Federation `yaml:"federation"` - Metrics GlobalMetrics `yaml:"metrics"` - ImageEnvoy string `yaml:"imageEnvoy"` - Openshift Openshift `yaml:"openshift"` + Enabled bool `yaml:"enabled"` + LogLevel string `yaml:"logLevel"` + LogJSON bool `yaml:"logJSON"` + Name interface{} `yaml:"name"` + Domain string `yaml:"domain"` + AdminPartitions AdminPartitions `yaml:"adminPartitions"` + Image string `yaml:"image"` + ImagePullSecrets []interface{} `yaml:"imagePullSecrets"` + ImageK8S string `yaml:"imageK8S"` + Datacenter string `yaml:"datacenter"` + EnablePodSecurityPolicies bool `yaml:"enablePodSecurityPolicies"` + SecretsBackend SecretsBackend `yaml:"secretsBackend"` + GossipEncryption GossipEncryption `yaml:"gossipEncryption"` + Recursors []interface{} `yaml:"recursors"` + TLS TLS `yaml:"tls"` + EnableConsulNamespaces bool `yaml:"enableConsulNamespaces"` + Acls Acls `yaml:"acls"` + EnterpriseLicense EnterpriseLicense `yaml:"enterpriseLicense"` + Federation Federation `yaml:"federation"` + Metrics GlobalMetrics `yaml:"metrics"` + ConsulSidecarContainer ConsulSidecarContainer `yaml:"consulSidecarContainer"` + ImageEnvoy string `yaml:"imageEnvoy"` + Openshift Openshift `yaml:"openshift"` } type ServerCert struct { @@ -480,6 +485,10 @@ type WanAddress struct { Static string `yaml:"static"` } +type InitCopyConsulContainer struct { + Resources Resources `yaml:"resources"` +} + type InitServiceInitContainer struct { Resources Resources `yaml:"resources"` } @@ -496,6 +505,7 @@ type MeshGateway struct { HostPort interface{} `yaml:"hostPort"` ServiceAccount ServiceAccount `yaml:"serviceAccount"` Resources Resources `yaml:"resources"` + InitCopyConsulContainer InitCopyConsulContainer `yaml:"initCopyConsulContainer"` InitServiceInitContainer InitServiceInitContainer `yaml:"initServiceInitContainer"` Affinity string `yaml:"affinity"` Tolerations interface{} `yaml:"tolerations"` @@ -517,17 +527,18 @@ type DefaultsService struct { } type IngressGatewayDefaults struct { - Replicas int `yaml:"replicas"` - Service DefaultsService `yaml:"service"` - ServiceAccount ServiceAccount `yaml:"serviceAccount"` - Resources Resources `yaml:"resources"` - Affinity string `yaml:"affinity"` - Tolerations interface{} `yaml:"tolerations"` - NodeSelector interface{} `yaml:"nodeSelector"` - PriorityClassName string `yaml:"priorityClassName"` - TerminationGracePeriodSeconds int `yaml:"terminationGracePeriodSeconds"` - Annotations interface{} `yaml:"annotations"` - ConsulNamespace string `yaml:"consulNamespace"` + Replicas int `yaml:"replicas"` + Service DefaultsService `yaml:"service"` + ServiceAccount ServiceAccount `yaml:"serviceAccount"` + Resources Resources `yaml:"resources"` + InitCopyConsulContainer InitCopyConsulContainer `yaml:"initCopyConsulContainer"` + Affinity string `yaml:"affinity"` + Tolerations interface{} `yaml:"tolerations"` + NodeSelector interface{} `yaml:"nodeSelector"` + PriorityClassName string `yaml:"priorityClassName"` + TerminationGracePeriodSeconds int `yaml:"terminationGracePeriodSeconds"` + Annotations interface{} `yaml:"annotations"` + ConsulNamespace string `yaml:"consulNamespace"` } type Gateways struct { @@ -541,16 +552,17 @@ type IngressGateways struct { } type Defaults struct { - Replicas int `yaml:"replicas"` - ExtraVolumes []interface{} `yaml:"extraVolumes"` - Resources Resources `yaml:"resources"` - Affinity string `yaml:"affinity"` - Tolerations interface{} `yaml:"tolerations"` - NodeSelector interface{} `yaml:"nodeSelector"` - PriorityClassName string `yaml:"priorityClassName"` - Annotations interface{} `yaml:"annotations"` - ServiceAccount ServiceAccount `yaml:"serviceAccount"` - ConsulNamespace string `yaml:"consulNamespace"` + Replicas int `yaml:"replicas"` + ExtraVolumes []interface{} `yaml:"extraVolumes"` + Resources Resources `yaml:"resources"` + InitCopyConsulContainer InitCopyConsulContainer `yaml:"initCopyConsulContainer"` + Affinity string `yaml:"affinity"` + Tolerations interface{} `yaml:"tolerations"` + NodeSelector interface{} `yaml:"nodeSelector"` + PriorityClassName string `yaml:"priorityClassName"` + Annotations interface{} `yaml:"annotations"` + ServiceAccount ServiceAccount `yaml:"serviceAccount"` + ConsulNamespace string `yaml:"consulNamespace"` } type TerminatingGateways struct { diff --git a/cli/preset/cloud_preset.go b/cli/preset/cloud_preset.go deleted file mode 100644 index 95219cb378..0000000000 --- a/cli/preset/cloud_preset.go +++ /dev/null @@ -1,431 +0,0 @@ -package preset - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/consul-k8s/cli/config" - "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models" - "github.com/hashicorp/hcp-sdk-go/httpclient" - "github.com/hashicorp/hcp-sdk-go/resource" - - hcpgnm "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/client/global_network_manager_service" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -const ( - secretNameHCPClientID = "consul-hcp-client-id" - secretKeyHCPClientID = "client-id" - secretNameHCPClientSecret = "consul-hcp-client-secret" - secretKeyHCPClientSecret = "client-secret" - secretNameHCPResourceID = "consul-hcp-resource-id" - secretKeyHCPResourceID = "resource-id" - secretNameHCPAPIHostname = "consul-hcp-api-host" - secretKeyHCPAPIHostname = "api-hostname" - secretNameHCPAuthURL = "consul-hcp-auth-url" - secretKeyHCPAuthURL = "auth-url" - secretNameHCPScadaAddress = "consul-hcp-scada-address" - secretKeyHCPScadaAddress = "scada-address" - secretNameGossipKey = "consul-gossip-key" - secretKeyGossipKey = "key" - secretNameBootstrapToken = "consul-bootstrap-token" - secretKeyBootstrapToken = "token" - secretNameServerCA = "consul-server-ca" - secretNameServerCert = "consul-server-cert" -) - -// CloudBootstrapConfig represents the response fetched from the agent -// bootstrap config endpoint in HCP. -type CloudBootstrapConfig struct { - BootstrapResponse *models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse - ConsulConfig ConsulConfig - HCPConfig HCPConfig -} - -// HCPConfig represents the resource-id, client-id, and client-secret -// provided by the user in order to make a call to fetch the agent bootstrap -// config data from the endpoint in HCP. -type HCPConfig struct { - ResourceID string - ClientID string - ClientSecret string - AuthURL string - APIHostname string - ScadaAddress string -} - -// ConsulConfig represents 'cluster.consul_config' in the response -// fetched from the agent bootstrap config endpoint in HCP. -type ConsulConfig struct { - ACL ACL `json:"acl"` -} - -// ACL represents 'cluster.consul_config.acl' in the response -// fetched from the agent bootstrap config endpoint in HCP. -type ACL struct { - Tokens Tokens `json:"tokens"` -} - -// Tokens represents 'cluster.consul_config.acl.tokens' in the -// response fetched from the agent bootstrap config endpoint in HCP. -type Tokens struct { - Agent string `json:"agent"` - InitialManagement string `json:"initial_management"` -} - -// CloudPreset struct is an implementation of the Preset interface that is used -// to fetch agent bootrap config from HCP, save it to secrets, and provide a -// Helm values map that is used during installation. -type CloudPreset struct { - HCPConfig *HCPConfig - KubernetesClient kubernetes.Interface - KubernetesNamespace string - UI terminal.UI - SkipSavingSecrets bool - Context context.Context - HTTPClient *http.Client -} - -// GetValueMap must fetch configuration from HCP, save various secrets from -// the response, and map the secret names into the returned value map. -func (c *CloudPreset) GetValueMap() (map[string]interface{}, error) { - bootstrapConfig, err := c.fetchAgentBootstrapConfig() - if err != nil { - return nil, err - } - - if !c.SkipSavingSecrets { - err = c.saveSecretsFromBootstrapConfig(bootstrapConfig) - if err != nil { - return nil, err - } - } - - return c.getHelmConfigWithMapSecretNames(bootstrapConfig), nil -} - -// fetchAgentBootstrapConfig use the resource-id, client-id, and client-secret -// to call to the agent bootstrap config endpoint and parse the response into a -// CloudBootstrapConfig struct. -func (c *CloudPreset) fetchAgentBootstrapConfig() (*CloudBootstrapConfig, error) { - c.UI.Output("Fetching Consul cluster configuration from HCP", terminal.WithHeaderStyle()) - httpClientCfg := httpclient.Config{} - clientRuntime, err := httpclient.New(httpClientCfg) - if err != nil { - return nil, err - } - - hcpgnmClient := hcpgnm.New(clientRuntime, nil) - clusterResource, err := resource.FromString(c.HCPConfig.ResourceID) - if err != nil { - return nil, err - } - - params := hcpgnm.NewAgentBootstrapConfigParamsWithContext(c.Context). - WithID(clusterResource.ID). - WithLocationOrganizationID(clusterResource.Organization). - WithLocationProjectID(clusterResource.Project). - WithHTTPClient(c.HTTPClient) - - resp, err := hcpgnmClient.AgentBootstrapConfig(params, nil) - if err != nil { - return nil, err - } - - bootstrapConfig := resp.GetPayload() - c.UI.Output("HCP configuration successfully fetched.", terminal.WithSuccessStyle()) - - return c.parseBootstrapConfigResponse(bootstrapConfig) -} - -// parseBootstrapConfigResponse unmarshals the boostrap parseBootstrapConfigResponse -// and also sets the HCPConfig values to return CloudBootstrapConfig struct. -func (c *CloudPreset) parseBootstrapConfigResponse(bootstrapRepsonse *models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse) (*CloudBootstrapConfig, error) { - var cbc CloudBootstrapConfig - var consulConfig ConsulConfig - err := json.Unmarshal([]byte(bootstrapRepsonse.Bootstrap.ConsulConfig), &consulConfig) - if err != nil { - return nil, err - } - cbc.ConsulConfig = consulConfig - cbc.HCPConfig = *c.HCPConfig - cbc.BootstrapResponse = bootstrapRepsonse - - return &cbc, nil -} - -func getOptionalSecretFromHCPConfig(hcpConfigValue, valuesConfigKey, secretName, secretKey string) string { - if hcpConfigValue != "" { - // Need to make sure the below has strict spaces and no tabs - return fmt.Sprintf(`%s: - secretName: %s - secretKey: %s - `, valuesConfigKey, secretName, secretKey) - } - return "" -} - -// getHelmConfigWithMapSecretNames maps the secret names were agent bootstrap -// config values have been saved, maps them into the Helm values template for -// the cloud preset, and returns the value map. -func (c *CloudPreset) getHelmConfigWithMapSecretNames(cfg *CloudBootstrapConfig) map[string]interface{} { - apiHostCfg := getOptionalSecretFromHCPConfig(cfg.HCPConfig.APIHostname, "apiHost", secretNameHCPAPIHostname, secretKeyHCPAPIHostname) - authURLCfg := getOptionalSecretFromHCPConfig(cfg.HCPConfig.AuthURL, "authUrl", secretNameHCPAuthURL, secretKeyHCPAuthURL) - scadaAddressCfg := getOptionalSecretFromHCPConfig(cfg.HCPConfig.ScadaAddress, "scadaAddress", secretNameHCPScadaAddress, secretKeyHCPScadaAddress) - - // Need to make sure the below has strict spaces and no tabs - values := fmt.Sprintf(` -global: - datacenter: %s - tls: - enabled: true - enableAutoEncrypt: true - caCert: - secretName: %s - secretKey: %s - gossipEncryption: - secretName: %s - secretKey: %s - acls: - manageSystemACLs: true - bootstrapToken: - secretName: %s - secretKey: %s - cloud: - enabled: true - resourceId: - secretName: %s - secretKey: %s - clientId: - secretName: %s - secretKey: %s - clientSecret: - secretName: %s - secretKey: %s - %s - %s - %s -server: - replicas: %d - affinity: null - serverCert: - secretName: %s -connectInject: - enabled: true -controller: - enabled: true -`, cfg.BootstrapResponse.Cluster.ID, secretNameServerCA, corev1.TLSCertKey, - secretNameGossipKey, secretKeyGossipKey, secretNameBootstrapToken, - secretKeyBootstrapToken, - secretNameHCPResourceID, secretKeyHCPResourceID, - secretNameHCPClientID, secretKeyHCPClientID, - secretNameHCPClientSecret, secretKeyHCPClientSecret, - apiHostCfg, authURLCfg, scadaAddressCfg, - cfg.BootstrapResponse.Cluster.BootstrapExpect, secretNameServerCert) - valuesMap := config.ConvertToMap(values) - return valuesMap -} - -// saveSecretsFromBootstrapConfig takes the following items from the -// agent bootstrap config from HCP and saves them into known secret names and -// keys: -// - HCP configresource-id. -// - HCP client-id. -// - HCP client-secret. -// - HCP auth URL (optional) -// - HCP api hostname (optional) -// - HCP scada address (optional) -// - ACL bootstrap token. -// - gossip encryption key. -// - server tls cert and key. -// - server CA cert. -func (c *CloudPreset) saveSecretsFromBootstrapConfig(config *CloudBootstrapConfig) error { - // create namespace - if err := c.createNamespaceIfNotExists(); err != nil { - return err - } - - // HCP resource id - if config.HCPConfig.ResourceID != "" { - data := map[string][]byte{ - secretKeyHCPResourceID: []byte(config.HCPConfig.ResourceID), - } - if err := c.saveSecret(secretNameHCPResourceID, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP resource id saved in '%s' secret in namespace '%s'.", - secretKeyHCPResourceID, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // HCP client id - if config.HCPConfig.ClientID != "" { - data := map[string][]byte{ - secretKeyHCPClientID: []byte(config.HCPConfig.ClientID), - } - if err := c.saveSecret(secretNameHCPClientID, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP client id saved in '%s' secret in namespace '%s'.", - secretKeyHCPClientID, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // HCP client secret - if config.HCPConfig.ClientSecret != "" { - data := map[string][]byte{ - secretKeyHCPClientSecret: []byte(config.HCPConfig.ClientSecret), - } - if err := c.saveSecret(secretNameHCPClientSecret, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP client secret saved in '%s' secret in namespace '%s'.", - secretKeyHCPClientSecret, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // bootstrap token - if config.ConsulConfig.ACL.Tokens.InitialManagement != "" { - data := map[string][]byte{ - secretKeyBootstrapToken: []byte(config.ConsulConfig.ACL.Tokens.InitialManagement), - } - if err := c.saveSecret(secretNameBootstrapToken, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("ACL bootstrap token saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyBootstrapToken, secretNameBootstrapToken, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // gossip key - if config.BootstrapResponse.Bootstrap.GossipKey != "" { - data := map[string][]byte{ - secretKeyGossipKey: []byte(config.BootstrapResponse.Bootstrap.GossipKey), - } - if err := c.saveSecret(secretNameGossipKey, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("Gossip encryption key saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyGossipKey, secretNameGossipKey, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // server cert secret - if config.BootstrapResponse.Bootstrap.ServerTLS.Cert != "" { - data := map[string][]byte{ - corev1.TLSCertKey: []byte(config.BootstrapResponse.Bootstrap.ServerTLS.Cert), - corev1.TLSPrivateKeyKey: []byte(config.BootstrapResponse.Bootstrap.ServerTLS.PrivateKey), - } - if err := c.saveSecret(secretNameServerCert, data, corev1.SecretTypeTLS); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("Server TLS cert and key saved as '%s' and '%s' key in '%s secret in namespace '%s'.", - corev1.TLSCertKey, corev1.TLSPrivateKeyKey, secretNameServerCert, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // server CA - if len(config.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities) > 0 && - config.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities[0] != "" { - data := map[string][]byte{ - corev1.TLSCertKey: []byte(config.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities[0]), - } - if err := c.saveSecret(secretNameServerCA, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("Server TLS CA saved as '%s' key in '%s' secret in namespace '%s'.", - corev1.TLSCertKey, secretNameServerCA, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - // Optional secrets - // HCP auth url - if config.HCPConfig.AuthURL != "" { - data := map[string][]byte{ - secretKeyHCPAuthURL: []byte(config.HCPConfig.AuthURL), - } - if err := c.saveSecret(secretNameHCPAuthURL, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP auth url saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyHCPAuthURL, secretNameHCPAuthURL, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // HCP api hostname - if config.HCPConfig.APIHostname != "" { - data := map[string][]byte{ - secretKeyHCPAPIHostname: []byte(config.HCPConfig.APIHostname), - } - if err := c.saveSecret(secretNameHCPAPIHostname, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP api hostname saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyHCPAPIHostname, secretNameHCPAPIHostname, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - // HCP scada address - if config.HCPConfig.ScadaAddress != "" { - data := map[string][]byte{ - secretKeyHCPScadaAddress: []byte(config.HCPConfig.ScadaAddress), - } - if err := c.saveSecret(secretNameHCPScadaAddress, data, corev1.SecretTypeOpaque); err != nil { - return err - } - c.UI.Output(fmt.Sprintf("HCP scada address saved as '%s' key in '%s' secret in namespace '%s'.", - secretKeyHCPScadaAddress, secretNameHCPScadaAddress, c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - - return nil -} - -// createNamespaceIfNotExists checks to see if a given namespace exists and if -// it does not will create it. This function is needed to ensure a namespace -// exists before HCP config secrets are saved. -func (c *CloudPreset) createNamespaceIfNotExists() error { - c.UI.Output(fmt.Sprintf("Checking if %s namespace needs to be created", c.KubernetesNamespace), terminal.WithHeaderStyle()) - // Create k8s namespace if it doesn't exist. - _, err := c.KubernetesClient.CoreV1().Namespaces().Get(context.Background(), c.KubernetesNamespace, metav1.GetOptions{}) - if k8serrors.IsNotFound(err) { - _, err = c.KubernetesClient.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.KubernetesNamespace, - }, - }, metav1.CreateOptions{}) - if err != nil { - return err - } - c.UI.Output(fmt.Sprintf("Namespace '%s' has been created.", c.KubernetesNamespace), terminal.WithSuccessStyle()) - - } else if err != nil { - return err - } else { - c.UI.Output(fmt.Sprintf("Namespace '%s' already exists.", c.KubernetesNamespace), terminal.WithSuccessStyle()) - } - return nil -} - -// saveSecret saves given key value pairs into a given secret in a given -// namespace. It is the generic function that helps saves all of the specific -// cloud preset secrets. -func (c *CloudPreset) saveSecret(secretName string, kvps map[string][]byte, secretType corev1.SecretType) error { - _, err := c.KubernetesClient.CoreV1().Secrets(c.KubernetesNamespace).Get(context.Background(), secretName, metav1.GetOptions{}) - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: c.KubernetesNamespace, - Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, - }, - Data: kvps, - Type: secretType, - } - if k8serrors.IsNotFound(err) { - _, err = c.KubernetesClient.CoreV1().Secrets(c.KubernetesNamespace).Create(context.Background(), secret, metav1.CreateOptions{}) - if err != nil { - return err - } - } else if err != nil { - return err - } else { - return fmt.Errorf("'%s' secret in '%s' namespace already exists", secretName, c.KubernetesNamespace) - } - return nil -} diff --git a/cli/preset/cloud_preset_test.go b/cli/preset/cloud_preset_test.go deleted file mode 100644 index 946e1ca158..0000000000 --- a/cli/preset/cloud_preset_test.go +++ /dev/null @@ -1,701 +0,0 @@ -package preset - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "os" - "testing" - - "github.com/hashicorp/consul-k8s/cli/common" - "github.com/hashicorp/consul-k8s/cli/common/terminal" - "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "sigs.k8s.io/yaml" -) - -const ( - hcpClientID = "RAxJflDbxDXw8kLY6jWmwqMz3kVe7NnL" - hcpClientSecret = "1fNzurLatQPLPwf7jnD4fRtU9f5nH31RKBHayy08uQ6P-6nwI1rFZjMXb4m3cCKH" - hcpResourceID = "organization/ccbdd191-5dc3-4a73-9e05-6ac30ca67992/project/36019e0d-ed59-4df6-9990-05bb7fc793b6/hashicorp.consul.global-network-manager.cluster/prod-on-prem" - expectedSecretNameHCPClientId = "consul-hcp-client-id" - expectedSecretNameHCPClientSecret = "consul-hcp-client-secret" - expectedSecretNameHCPResourceId = "consul-hcp-resource-id" - expectedSecretNameHCPAuthURL = "consul-hcp-auth-url" - expectedSecretNameHCPApiHostname = "consul-hcp-api-host" - expectedSecretNameHCPScadaAddress = "consul-hcp-scada-address" - expectedSecretNameGossipKey = "consul-gossip-key" - expectedSecretNameBootstrap = "consul-bootstrap-token" - expectedSecretNameServerCA = "consul-server-ca" - expectedSecretNameServerCert = "consul-server-cert" - namespace = "consul" - validResponse = ` -{ - "cluster": - { - "id": "dc1", - "bootstrap_expect" : 3 - }, - "bootstrap": - { - "gossip_key": "Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=", - "server_tls": { - "certificate_authorities": [ - "-----BEGIN CERTIFICATE-----\nMIIC6TCCAo+gAwIBAgIQA3pUmJcy9uw8MNIDZPiaZjAKBggqhkjOPQQDAjCBtzEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT4wPAYDVQQDEzVDb25zdWwgQWdlbnQgQ0Eg\nNDYyMjg2MDAxNTk3NzI1NDMzMTgxNDQ4OTAzODMyNjg5NzI1NDAeFw0yMjAzMjkx\nMTEyNDNaFw0yNzAzMjgxMTEyNDNaMIG3MQswCQYDVQQGEwJVUzELMAkGA1UECBMC\nQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xGjAYBgNVBAkTETEwMSBTZWNvbmQg\nU3RyZWV0MQ4wDAYDVQQREwU5NDEwNTEXMBUGA1UEChMOSGFzaGlDb3JwIEluYy4x\nPjA8BgNVBAMTNUNvbnN1bCBBZ2VudCBDQSA0NjIyODYwMDE1OTc3MjU0MzMxODE0\nNDg5MDM4MzI2ODk3MjU0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAERs73JA+K\n9xMorTz6fA5x8Dmin6l8pNgka3/Ye3SFWJD/0lKFTXEX7Li8+hXG31WMLdXgoWHS\nkL1HoLboV8hEAKN7MHkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8w\nKQYDVR0OBCIEICst9kpfDK0LtEbUghWf4ahjpzd7Mlh07OLT/e38PKDmMCsGA1Ud\nIwQkMCKAICst9kpfDK0LtEbUghWf4ahjpzd7Mlh07OLT/e38PKDmMAoGCCqGSM49\nBAMCA0gAMEUCIQCuk/n49np4m76jTFLk2zeiSi7UfubMeS2BD4bkMt6v/wIgbO0R\npTqCOYQr3cji1EpEQca95VCZ26lBEjqLQF3osGc=\n-----END CERTIFICATE-----\n" - ], - "private_key": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIA+DFWCFz+SujFCuWM3GpoTLPX8igerwMw+8efNbx7a+oAoGCCqGSM49\nAwEHoUQDQgAE7LdWJpna88mohlnuTyGJ+WZ3P6BCxGqBRWNJn3+JEoHhmaifx7Sq\nWLMCEB1UNbH5Z1esaS4h33Gb0pyyiCy19A==\n-----END EC PRIVATE KEY-----\n", - "cert": "-----BEGIN CERTIFICATE-----\nMIICmzCCAkGgAwIBAgIRAKZ77a2h+plK2yXFsW0kfgAwCgYIKoZIzj0EAwIwgbcx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE+MDwGA1UEAxM1Q29uc3VsIEFnZW50IENB\nIDQ2MjI4NjAwMTU5NzcyNTQzMzE4MTQ0ODkwMzgzMjY4OTcyNTQwHhcNMjIwMzI5\nMTExMjUwWhcNMjMwMzI5MTExMjUwWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABOy3ViaZ2vPJqIZZ7k8hiflm\ndz+gQsRqgUVjSZ9/iRKB4Zmon8e0qlizAhAdVDWx+WdXrGkuId9xm9KcsogstfSj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDaH9x1CRRqM5BYCMKBnAFyZjQq\nSY9IcJnhZUZIIJHU4jArBgNVHSMEJDAigCArLfZKXwytC7RG1IIVn+GoY6c3ezJY\ndOzi0/3t/Dyg5jAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQCOxQHGF2483Cdd9nXcqAoOcxYP\nIqNP/WM03qyERyYNNQIgbtFBLIAgrhdXdjEvHMjU5ceHSwle/K0p0OTSIwSk8xI=\n-----END CERTIFICATE-----\n" - }, - "consul_config": "{\"acl\":{\"default_policy\":\"deny\",\"enable_token_persistence\":true,\"enabled\":true,\"tokens\":{\"agent\":\"74044c72-03c8-42b0-b57f-728bb22ca7fb\",\"initial_management\":\"74044c72-03c8-42b0-b57f-728bb22ca7fb\"}},\"auto_encrypt\":{\"allow_tls\":true},\"bootstrap_expect\":1,\"encrypt\":\"yUPhgtteok1/bHoVIoRnJMfOrKrb1TDDyWJRh9rlUjg=\",\"encrypt_verify_incoming\":true,\"encrypt_verify_outgoing\":true,\"ports\":{\"http\":-1,\"https\":8501},\"retry_join\":[],\"verify_incoming\":true,\"verify_outgoing\":true,\"verify_server_hostname\":true}" - } -}` -) - -var validBootstrapReponse *models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse = &models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse{ - Bootstrap: &models.HashicorpCloudGlobalNetworkManager20220215ClusterBootstrap{ - ID: "dc1", - GossipKey: "Wa6/XFAnYy0f9iqVH2iiG+yore3CqHSemUy4AIVTa/w=", - BootstrapExpect: 3, - ServerTLS: &models.HashicorpCloudGlobalNetworkManager20220215ServerTLS{ - CertificateAuthorities: []string{"-----BEGIN CERTIFICATE-----\nMIIC6TCCAo+gAwIBAgIQA3pUmJcy9uw8MNIDZPiaZjAKBggqhkjOPQQDAjCBtzEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv\nMRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV\nBgNVBAoTDkhhc2hpQ29ycCBJbmMuMT4wPAYDVQQDEzVDb25zdWwgQWdlbnQgQ0Eg\nNDYyMjg2MDAxNTk3NzI1NDMzMTgxNDQ4OTAzODMyNjg5NzI1NDAeFw0yMjAzMjkx\nMTEyNDNaFw0yNzAzMjgxMTEyNDNaMIG3MQswCQYDVQQGEwJVUzELMAkGA1UECBMC\nQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xGjAYBgNVBAkTETEwMSBTZWNvbmQg\nU3RyZWV0MQ4wDAYDVQQREwU5NDEwNTEXMBUGA1UEChMOSGFzaGlDb3JwIEluYy4x\nPjA8BgNVBAMTNUNvbnN1bCBBZ2VudCBDQSA0NjIyODYwMDE1OTc3MjU0MzMxODE0\nNDg5MDM4MzI2ODk3MjU0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAERs73JA+K\n9xMorTz6fA5x8Dmin6l8pNgka3/Ye3SFWJD/0lKFTXEX7Li8+hXG31WMLdXgoWHS\nkL1HoLboV8hEAKN7MHkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8w\nKQYDVR0OBCIEICst9kpfDK0LtEbUghWf4ahjpzd7Mlh07OLT/e38PKDmMCsGA1Ud\nIwQkMCKAICst9kpfDK0LtEbUghWf4ahjpzd7Mlh07OLT/e38PKDmMAoGCCqGSM49\nBAMCA0gAMEUCIQCuk/n49np4m76jTFLk2zeiSi7UfubMeS2BD4bkMt6v/wIgbO0R\npTqCOYQr3cji1EpEQca95VCZ26lBEjqLQF3osGc=\n-----END CERTIFICATE-----\n"}, - PrivateKey: "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIA+DFWCFz+SujFCuWM3GpoTLPX8igerwMw+8efNbx7a+oAoGCCqGSM49\nAwEHoUQDQgAE7LdWJpna88mohlnuTyGJ+WZ3P6BCxGqBRWNJn3+JEoHhmaifx7Sq\nWLMCEB1UNbH5Z1esaS4h33Gb0pyyiCy19A==\n-----END EC PRIVATE KEY-----\n", - Cert: "-----BEGIN CERTIFICATE-----\nMIICmzCCAkGgAwIBAgIRAKZ77a2h+plK2yXFsW0kfgAwCgYIKoZIzj0EAwIwgbcx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE+MDwGA1UEAxM1Q29uc3VsIEFnZW50IENB\nIDQ2MjI4NjAwMTU5NzcyNTQzMzE4MTQ0ODkwMzgzMjY4OTcyNTQwHhcNMjIwMzI5\nMTExMjUwWhcNMjMwMzI5MTExMjUwWjAcMRowGAYDVQQDExFzZXJ2ZXIuZGMxLmNv\nbnN1bDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABOy3ViaZ2vPJqIZZ7k8hiflm\ndz+gQsRqgUVjSZ9/iRKB4Zmon8e0qlizAhAdVDWx+WdXrGkuId9xm9KcsogstfSj\ngccwgcQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF\nBQcDAjAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCDaH9x1CRRqM5BYCMKBnAFyZjQq\nSY9IcJnhZUZIIJHU4jArBgNVHSMEJDAigCArLfZKXwytC7RG1IIVn+GoY6c3ezJY\ndOzi0/3t/Dyg5jAtBgNVHREEJjAkghFzZXJ2ZXIuZGMxLmNvbnN1bIIJbG9jYWxo\nb3N0hwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQCOxQHGF2483Cdd9nXcqAoOcxYP\nIqNP/WM03qyERyYNNQIgbtFBLIAgrhdXdjEvHMjU5ceHSwle/K0p0OTSIwSk8xI=\n-----END CERTIFICATE-----\n"}, - ConsulConfig: "{\"acl\":{\"default_policy\":\"deny\",\"enable_token_persistence\":true,\"enabled\":true,\"tokens\":{\"agent\":\"74044c72-03c8-42b0-b57f-728bb22ca7fb\",\"initial_management\":\"74044c72-03c8-42b0-b57f-728bb22ca7fb\"}},\"auto_encrypt\":{\"allow_tls\":true},\"bootstrap_expect\":1,\"encrypt\":\"yUPhgtteok1/bHoVIoRnJMfOrKrb1TDDyWJRh9rlUjg=\",\"encrypt_verify_incoming\":true,\"encrypt_verify_outgoing\":true,\"ports\":{\"http\":-1,\"https\":8501},\"retry_join\":[],\"verify_incoming\":true,\"verify_outgoing\":true,\"verify_server_hostname\":true}", - }, - Cluster: &models.HashicorpCloudGlobalNetworkManager20220215Cluster{ - ID: "dc1", - BootstrapExpect: 3, - }, -} - -var hcpConfig *HCPConfig = &HCPConfig{ - ResourceID: hcpResourceID, - ClientID: hcpClientID, - ClientSecret: hcpClientSecret, - AuthURL: "https://foobar", - APIHostname: "https://foo.bar", - ScadaAddress: "10.10.10.10", -} - -var validBootstrapConfig *CloudBootstrapConfig = &CloudBootstrapConfig{ - HCPConfig: *hcpConfig, - ConsulConfig: ConsulConfig{ - ACL: ACL{ - Tokens: Tokens{ - Agent: "74044c72-03c8-42b0-b57f-728bb22ca7fb", - InitialManagement: "74044c72-03c8-42b0-b57f-728bb22ca7fb", - }, - }, - }, - BootstrapResponse: validBootstrapReponse, -} - -func TestGetValueMap(t *testing.T) { - // Create fake k8s. - k8s := fake.NewSimpleClientset() - namespace := "consul" - - // Start the mock HCP server. - hcpMockServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("content-type", "application/json") - if r != nil && r.URL.Path == "/global-network-manager/2022-02-15/organizations/ccbdd191-5dc3-4a73-9e05-6ac30ca67992/projects/36019e0d-ed59-4df6-9990-05bb7fc793b6/clusters/prod-on-prem/agent/bootstrap_config" && - r.Method == "GET" { - w.Write([]byte(validResponse)) - } else { - w.Write([]byte(` - { - "access_token": "dummy-token" - } - `)) - } - })) - hcpMockServer.StartTLS() - t.Cleanup(hcpMockServer.Close) - mockServerURL, err := url.Parse(hcpMockServer.URL) - require.NoError(t, err) - os.Setenv("HCP_AUTH_URL", hcpMockServer.URL) - os.Setenv("HCP_API_HOST", mockServerURL.Host) - os.Setenv("HCP_CLIENT_ID", "fGY34fkOxcQmpkcygQmGHQZkEcLDhBde") - os.Setenv("HCP_CLIENT_SECRET", "8EWngREObMe90HNDN6oQv3YKQlRtVkg-28AgZylz1en0DHwyiE2pYCbwi61oF8dr") - bsConfig := getDeepCopyOfValidBootstrapConfig() - bsConfig.HCPConfig.APIHostname = mockServerURL.Host - bsConfig.HCPConfig.AuthURL = hcpMockServer.URL - - testCases := []struct { - description string - installer *CloudPreset - postProcessingFunc func() - requireCheck func() - }{ - { - "Should save secrets when SkipSavingSecrets is false.", - &CloudPreset{ - HCPConfig: &bsConfig.HCPConfig, - KubernetesClient: k8s, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - HTTPClient: hcpMockServer.Client(), - Context: context.Background(), - }, - func() { - deleteSecrets(k8s) - }, - func() { - checkAllSecretsWereSaved(t, k8s, bsConfig) - }, - }, - { - "Should not save secrets when SkipSavingSecrets is true.", - &CloudPreset{ - HCPConfig: &bsConfig.HCPConfig, - KubernetesClient: k8s, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - SkipSavingSecrets: true, - HTTPClient: hcpMockServer.Client(), - Context: context.Background(), - }, - func() { - deleteSecrets(k8s) - }, - func() { - checkAllSecretsWereSaved(t, k8s, bsConfig) - }, - }, - { - "Should not save save api-hostname, scada-address, or auth-url keys as empty strings if they are not configured.", - &CloudPreset{ - HCPConfig: &HCPConfig{ - ResourceID: hcpResourceID, - ClientID: hcpClientID, - ClientSecret: hcpClientSecret, - }, - KubernetesClient: k8s, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - SkipSavingSecrets: false, - HTTPClient: hcpMockServer.Client(), - Context: context.Background(), - }, - func() { - deleteSecrets(k8s) - }, - func() { - // Check the hcp resource id secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPResourceID, secretKeyHCPResourceID, - bsConfig.HCPConfig.ResourceID, corev1.SecretTypeOpaque) - - // Check the hcp client id secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPClientID, secretKeyHCPClientID, - bsConfig.HCPConfig.ClientID, corev1.SecretTypeOpaque) - - // Check the hcp client secret secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPClientSecret, secretKeyHCPClientSecret, - bsConfig.HCPConfig.ClientSecret, corev1.SecretTypeOpaque) - - // Check the bootstrap token secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameBootstrapToken, secretKeyBootstrapToken, - bsConfig.ConsulConfig.ACL.Tokens.InitialManagement, corev1.SecretTypeOpaque) - - // Check the gossip key secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameGossipKey, secretKeyGossipKey, - bsConfig.BootstrapResponse.Bootstrap.GossipKey, corev1.SecretTypeOpaque) - - // Check the server cert secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCert, corev1.TLSCertKey, - bsConfig.BootstrapResponse.Bootstrap.ServerTLS.Cert, corev1.SecretTypeTLS) - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCert, corev1.TLSPrivateKeyKey, - bsConfig.BootstrapResponse.Bootstrap.ServerTLS.PrivateKey, corev1.SecretTypeTLS) - - // Check the server CA secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCA, corev1.TLSCertKey, - bsConfig.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities[0], corev1.SecretTypeOpaque) - - // Check that HCP scada address, auth url, and api hostname are not saved - hcpAuthURLSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPAuthURL, metav1.GetOptions{}) - require.Nil(t, hcpAuthURLSecret) - hcpApiHostnameSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPAPIHostname, metav1.GetOptions{}) - require.Nil(t, hcpApiHostnameSecret) - hcpScadaAddress, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPScadaAddress, metav1.GetOptions{}) - require.Nil(t, hcpScadaAddress) - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - config, err := tc.installer.GetValueMap() - require.NoError(t, err) - require.NotNil(t, config) - if tc.installer.SkipSavingSecrets { - checkSecretsWereNotSaved(k8s) - } else { - tc.requireCheck() - } - tc.postProcessingFunc() - }) - } - os.Unsetenv("HCP_AUTH_URL") - os.Unsetenv("HCP_API_HOST") - os.Unsetenv("HCP_CLIENT_ID") - os.Unsetenv("HCP_CLIENT_SECRET") -} - -// TestParseBootstrapConfigResponse tests that response string from agent bootstrap -// config endpoint can be converted into CloudBootstrapConfig bootstrap object. -func TestParseBootstrapConfigResponse(t *testing.T) { - testCases := []struct { - description string - input string - expectedConfig *CloudBootstrapConfig - }{ - { - "Should properly parse a valid response.", - validResponse, - validBootstrapConfig, - }, - } - - cloudPreset := &CloudPreset{ - HCPConfig: hcpConfig, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - } - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - config, err := cloudPreset.parseBootstrapConfigResponse(validBootstrapReponse) - require.NoError(t, err) - require.Equal(t, tc.expectedConfig, config) - }) - } -} - -func TestSaveSecretsFromBootstrapConfig(t *testing.T) { - t.Parallel() - - // Create fake k8s. - k8s := fake.NewSimpleClientset() - - testCases := []struct { - description string - expectsError bool - expectedErrorMessage string - preProcessingFunc func() - postProcessingFunc func() - }{ - { - "Properly saves secrets with a full bootstrapConfig.", - false, - "", - func() {}, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp client id secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPClientId, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPClientId, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp client secret secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPClientSecret, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPClientSecret, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp resource id secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPResourceId, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPResourceId, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp auth url secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPAuthURL, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPAuthURL, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp api hostname secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPApiHostname, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPApiHostname, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when hcp scada address secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameHCPScadaAddress, namespace), - func() { - savePlaceholderSecret(expectedSecretNameHCPScadaAddress, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when bootstrap token secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameBootstrap, namespace), - func() { - savePlaceholderSecret(expectedSecretNameBootstrap, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when gossip key secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameGossipKey, namespace), - func() { - savePlaceholderSecret(expectedSecretNameGossipKey, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when server cert secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameServerCert, namespace), - func() { - savePlaceholderSecret(expectedSecretNameServerCert, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - { - "Errors when server CA secret already exists", - true, - fmt.Sprintf("'%s' secret in '%s' namespace already exists", expectedSecretNameServerCA, namespace), - func() { - savePlaceholderSecret(expectedSecretNameServerCA, k8s) - }, - func() { - deleteSecrets(k8s) - }, - }, - } - cloudPreset := &CloudPreset{ - HCPConfig: hcpConfig, - KubernetesClient: k8s, - KubernetesNamespace: namespace, - UI: terminal.NewBasicUI(context.Background()), - } - - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - tc.preProcessingFunc() - err := cloudPreset.saveSecretsFromBootstrapConfig(validBootstrapConfig) - if tc.expectsError && err != nil { - require.Equal(t, tc.expectedErrorMessage, err.Error()) - - } else { - require.NoError(t, err) - require.Equal(t, expectedSecretNameBootstrap, secretNameBootstrapToken) - require.Equal(t, expectedSecretNameGossipKey, secretNameGossipKey) - require.Equal(t, expectedSecretNameHCPClientId, secretNameHCPClientID) - require.Equal(t, expectedSecretNameHCPClientSecret, secretNameHCPClientSecret) - require.Equal(t, expectedSecretNameHCPResourceId, secretNameHCPResourceID) - require.Equal(t, expectedSecretNameServerCA, secretNameServerCA) - require.Equal(t, expectedSecretNameServerCert, secretNameServerCert) - - checkAllSecretsWereSaved(t, k8s, validBootstrapConfig) - - } - tc.postProcessingFunc() - }) - } - -} - -func TestGetHelmConfigWithMapSecretNames(t *testing.T) { - t.Parallel() - - const expectedFull = `connectInject: - enabled: true -controller: - enabled: true -global: - acls: - bootstrapToken: - secretKey: token - secretName: consul-bootstrap-token - manageSystemACLs: true - cloud: - apiHost: - secretKey: api-hostname - secretName: consul-hcp-api-host - authUrl: - secretKey: auth-url - secretName: consul-hcp-auth-url - clientId: - secretKey: client-id - secretName: consul-hcp-client-id - clientSecret: - secretKey: client-secret - secretName: consul-hcp-client-secret - enabled: true - resourceId: - secretKey: resource-id - secretName: consul-hcp-resource-id - scadaAddress: - secretKey: scada-address - secretName: consul-hcp-scada-address - datacenter: dc1 - gossipEncryption: - secretKey: key - secretName: consul-gossip-key - tls: - caCert: - secretKey: tls.crt - secretName: consul-server-ca - enableAutoEncrypt: true - enabled: true -server: - affinity: null - replicas: 3 - serverCert: - secretName: consul-server-cert -` - - const expectedWithoutOptional = `connectInject: - enabled: true -controller: - enabled: true -global: - acls: - bootstrapToken: - secretKey: token - secretName: consul-bootstrap-token - manageSystemACLs: true - cloud: - clientId: - secretKey: client-id - secretName: consul-hcp-client-id - clientSecret: - secretKey: client-secret - secretName: consul-hcp-client-secret - enabled: true - resourceId: - secretKey: resource-id - secretName: consul-hcp-resource-id - datacenter: dc1 - gossipEncryption: - secretKey: key - secretName: consul-gossip-key - tls: - caCert: - secretKey: tls.crt - secretName: consul-server-ca - enableAutoEncrypt: true - enabled: true -server: - affinity: null - replicas: 3 - serverCert: - secretName: consul-server-cert -` - - cloudPreset := &CloudPreset{} - - testCases := []struct { - description string - config *CloudBootstrapConfig - expectedYaml string - }{ - {"Config including optional parameters", - &CloudBootstrapConfig{ - BootstrapResponse: &models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse{ - Cluster: &models.HashicorpCloudGlobalNetworkManager20220215Cluster{ - BootstrapExpect: 3, - ID: "dc1", - }, - }, - HCPConfig: HCPConfig{ - ResourceID: "consul-hcp-resource-id", - ClientID: "consul-hcp-client-id", - ClientSecret: "consul-hcp-client-secret", - AuthURL: "consul-hcp-auth-url", - APIHostname: "consul-hcp-api-host", - ScadaAddress: "consul-hcp-scada-address", - }, - }, - expectedFull, - }, - {"Config without optional parameters", - &CloudBootstrapConfig{ - BootstrapResponse: &models.HashicorpCloudGlobalNetworkManager20220215AgentBootstrapResponse{ - Cluster: &models.HashicorpCloudGlobalNetworkManager20220215Cluster{ - BootstrapExpect: 3, - ID: "dc1", - }, - }, - HCPConfig: HCPConfig{ - ResourceID: "consul-hcp-resource-id", - ClientID: "consul-hcp-client-id", - ClientSecret: "consul-hcp-client-secret", - }, - }, - expectedWithoutOptional, - }, - } - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - cloudHelmValues := cloudPreset.getHelmConfigWithMapSecretNames(tc.config) - require.NotNil(t, cloudHelmValues) - valuesYaml, err := yaml.Marshal(cloudHelmValues) - yml := string(valuesYaml) - require.NoError(t, err) - require.Equal(t, tc.expectedYaml, yml) - }) - } - -} - -func savePlaceholderSecret(secretName string, k8sClient kubernetes.Interface) { - data := map[string][]byte{} - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, - }, - Data: data, - Type: corev1.SecretTypeOpaque, - } - k8sClient.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}) -} - -func deleteSecrets(k8sClient kubernetes.Interface) { - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPClientId, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPClientSecret, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPResourceId, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPAuthURL, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPApiHostname, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameHCPScadaAddress, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameBootstrap, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameGossipKey, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameServerCert, metav1.DeleteOptions{}) - k8sClient.CoreV1().Secrets(namespace).Delete(context.Background(), expectedSecretNameServerCA, metav1.DeleteOptions{}) -} - -func checkAllSecretsWereSaved(t require.TestingT, k8s kubernetes.Interface, expectedConfig *CloudBootstrapConfig) { - - // Check that namespace is created - _, err := k8s.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) - require.NoError(t, err) - - // Check the hcp resource id secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPResourceID, secretKeyHCPResourceID, - expectedConfig.HCPConfig.ResourceID, corev1.SecretTypeOpaque) - - // Check the hcp client id secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPClientID, secretKeyHCPClientID, - expectedConfig.HCPConfig.ClientID, corev1.SecretTypeOpaque) - - // Check the hcp client secret secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPClientSecret, secretKeyHCPClientSecret, - expectedConfig.HCPConfig.ClientSecret, corev1.SecretTypeOpaque) - - // Check the hcp auth URL secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPAuthURL, secretKeyHCPAuthURL, - expectedConfig.HCPConfig.AuthURL, corev1.SecretTypeOpaque) - - // Check the hcp api hostname secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPAPIHostname, secretKeyHCPAPIHostname, - expectedConfig.HCPConfig.APIHostname, corev1.SecretTypeOpaque) - - // Check the hcp scada address secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameHCPScadaAddress, secretKeyHCPScadaAddress, - expectedConfig.HCPConfig.ScadaAddress, corev1.SecretTypeOpaque) - - // Check the bootstrap token secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameBootstrapToken, secretKeyBootstrapToken, - expectedConfig.ConsulConfig.ACL.Tokens.InitialManagement, corev1.SecretTypeOpaque) - - // Check the gossip key secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameGossipKey, secretKeyGossipKey, - expectedConfig.BootstrapResponse.Bootstrap.GossipKey, corev1.SecretTypeOpaque) - - // Check the server cert secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCert, corev1.TLSCertKey, - expectedConfig.BootstrapResponse.Bootstrap.ServerTLS.Cert, corev1.SecretTypeTLS) - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCert, corev1.TLSPrivateKeyKey, - expectedConfig.BootstrapResponse.Bootstrap.ServerTLS.PrivateKey, corev1.SecretTypeTLS) - - // Check the server CA secret is as expected. - ensureSecretKeyValueMatchesExpected(t, k8s, secretNameServerCA, corev1.TLSCertKey, - expectedConfig.BootstrapResponse.Bootstrap.ServerTLS.CertificateAuthorities[0], corev1.SecretTypeOpaque) -} - -func ensureSecretKeyValueMatchesExpected(t require.TestingT, k8s kubernetes.Interface, - secretName, secretKey, - expectedValue string, expectedSecretType corev1.SecretType) { - secret, err := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, expectedValue, string(secret.Data[secretKey])) - require.Equal(t, expectedSecretType, secret.Type) - require.Equal(t, common.CLILabelValue, secret.Labels[common.CLILabelKey]) -} - -func checkSecretsWereNotSaved(k8s kubernetes.Interface) bool { - ns, _ := k8s.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) - hcpClientIdSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPClientID, metav1.GetOptions{}) - hcpClientSecretSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPClientSecret, metav1.GetOptions{}) - hcpResourceIdSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameHCPResourceID, metav1.GetOptions{}) - bootstrapSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameBootstrapToken, metav1.GetOptions{}) - gossipKeySecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameGossipKey, metav1.GetOptions{}) - serverCertSecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameServerCert, metav1.GetOptions{}) - serverCASecret, _ := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretNameServerCA, metav1.GetOptions{}) - return ns == nil && hcpClientIdSecret == nil && hcpClientSecretSecret == nil && - hcpResourceIdSecret == nil && bootstrapSecret == nil && - gossipKeySecret == nil && serverCASecret == nil && serverCertSecret == nil -} - -func getDeepCopyOfValidBootstrapConfig() *CloudBootstrapConfig { - data, err := json.Marshal(validBootstrapConfig) - if err != nil { - panic(err) - } - - var copy *CloudBootstrapConfig - if err := json.Unmarshal(data, ©); err != nil { - panic(err) - } - return copy -} diff --git a/cli/preset/demo.go b/cli/preset/demo.go deleted file mode 100644 index ee1b100114..0000000000 --- a/cli/preset/demo.go +++ /dev/null @@ -1,41 +0,0 @@ -package preset - -import "github.com/hashicorp/consul-k8s/cli/config" - -// DemoPreset struct is an implementation of the Preset interface that provides -// a Helm values map that is used during installation and represents the -// the quickstart configuration for Consul on Kubernetes. -type DemoPreset struct{} - -// GetValueMap returns the Helm value map representing the quickstart -// configuration for Consul on Kubernetes. It does the following: -// - server replicas equal to 1. -// - enables the service mesh. -// - enables the ui. -// - enables metrics. -// - enables Prometheus. -func (i *DemoPreset) GetValueMap() (map[string]interface{}, error) { - values := ` -global: - name: consul - metrics: - enabled: true - enableAgentMetrics: true -connectInject: - enabled: true - metrics: - defaultEnabled: true - defaultEnableMerging: true - enableGatewayMetrics: true -server: - replicas: 1 -ui: - enabled: true - service: - enabled: true -prometheus: - enabled: true -` - - return config.ConvertToMap(values), nil -} diff --git a/cli/preset/preset.go b/cli/preset/preset.go deleted file mode 100644 index 2eb2c94bc4..0000000000 --- a/cli/preset/preset.go +++ /dev/null @@ -1,84 +0,0 @@ -package preset - -import ( - "fmt" - "os" -) - -const ( - PresetSecure = "secure" - PresetQuickstart = "quickstart" - PresetCloud = "cloud" - - EnvHCPClientID = "HCP_CLIENT_ID" - EnvHCPClientSecret = "HCP_CLIENT_SECRET" - EnvHCPAuthURL = "HCP_AUTH_URL" - EnvHCPAPIHost = "HCP_API_HOST" - EnvHCPScadaAddress = "HCP_SCADA_ADDRESS" -) - -// Presets is a list of all the available presets for use with CLI's install -// and uninstall commands. -var Presets = []string{PresetCloud, PresetQuickstart, PresetSecure} - -// Preset is the interface that each instance must implement. For demo and -// secure presets, they merely return a pre-configred value map. For cloud, -// it must fetch configuration from HCP, save various secrets from the response, -// and map the secret names into the value map. -type Preset interface { - GetValueMap() (map[string]interface{}, error) -} - -type GetPresetConfig struct { - Name string - CloudPreset *CloudPreset -} - -// GetPreset is a factory function that, given a configuration, produces a -// struct that implements the Preset interface based on the name in the -// configuration. If the string is not recognized an error is returned. This -// helper function is utilized by both the cli install and upgrade commands. -func GetPreset(config *GetPresetConfig) (Preset, error) { - switch config.Name { - case PresetCloud: - return config.CloudPreset, nil - case PresetQuickstart: - return &QuickstartPreset{}, nil - case PresetSecure: - return &SecurePreset{}, nil - } - return nil, fmt.Errorf("'%s' is not a valid preset", config.Name) -} - -func GetHCPPresetFromEnv(resourceID string) *HCPConfig { - hcpConfig := &HCPConfig{ - ResourceID: resourceID, - } - - // Read clientID from environment - if clientID, ok := os.LookupEnv(EnvHCPClientID); ok { - hcpConfig.ClientID = clientID - } - - // Read clientSecret from environment - if clientSecret, ok := os.LookupEnv(EnvHCPClientSecret); ok { - hcpConfig.ClientSecret = clientSecret - } - - // Read authURL from environment - if authURL, ok := os.LookupEnv(EnvHCPAuthURL); ok { - hcpConfig.AuthURL = authURL - } - - // Read apiHost from environment - if apiHost, ok := os.LookupEnv(EnvHCPAPIHost); ok { - hcpConfig.APIHostname = apiHost - } - - // Read scadaAddress from environment - if scadaAddress, ok := os.LookupEnv(EnvHCPScadaAddress); ok { - hcpConfig.ScadaAddress = scadaAddress - } - - return hcpConfig -} diff --git a/cli/preset/preset_test.go b/cli/preset/preset_test.go deleted file mode 100644 index c39c11e80f..0000000000 --- a/cli/preset/preset_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package preset - -import ( - "os" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestGetHCPPresetFromEnv(t *testing.T) { - const ( - scadaAddress = "scada-address" - clientID = "client-id" - clientSecret = "client-secret" - apiHost = "api-host" - authURL = "auth-url" - resourceID = "resource-id" - ) - - testCases := []struct { - description string - resourceID string - preProcessingFunc func() - postProcessingFunc func() - expectedPreset *HCPConfig - }{ - { - "Should populate properties in addition to resourceID on HCPConfig when environment variables are set.", - resourceID, - func() { - os.Setenv(EnvHCPClientID, clientID) - os.Setenv(EnvHCPClientSecret, clientSecret) - os.Setenv(EnvHCPAPIHost, apiHost) - os.Setenv(EnvHCPAuthURL, authURL) - os.Setenv(EnvHCPScadaAddress, scadaAddress) - }, - func() { - os.Unsetenv(EnvHCPClientID) - os.Unsetenv(EnvHCPClientSecret) - os.Unsetenv(EnvHCPAPIHost) - os.Unsetenv(EnvHCPAuthURL) - os.Unsetenv(EnvHCPScadaAddress) - }, - &HCPConfig{ - ResourceID: resourceID, - ClientID: clientID, - ClientSecret: clientSecret, - AuthURL: authURL, - APIHostname: apiHost, - ScadaAddress: scadaAddress, - }, - }, - { - "Should only populate resourceID on HCPConfig when environment variables are not set.", - resourceID, - func() { - os.Unsetenv(EnvHCPClientID) - os.Unsetenv(EnvHCPClientSecret) - os.Unsetenv(EnvHCPAPIHost) - os.Unsetenv(EnvHCPAuthURL) - os.Unsetenv(EnvHCPScadaAddress) - }, - func() {}, - &HCPConfig{ - ResourceID: resourceID, - }, - }, - } - - for _, testCase := range testCases { - testCase.preProcessingFunc() - defer testCase.postProcessingFunc() - t.Run(testCase.description, func(t *testing.T) { - hcpPreset := GetHCPPresetFromEnv(testCase.resourceID) - require.Equal(t, testCase.expectedPreset, hcpPreset) - }) - } -} diff --git a/cli/preset/quickstart.go b/cli/preset/quickstart.go deleted file mode 100644 index 823a60e312..0000000000 --- a/cli/preset/quickstart.go +++ /dev/null @@ -1,41 +0,0 @@ -package preset - -import "github.com/hashicorp/consul-k8s/cli/config" - -// QuickstartPreset struct is an implementation of the Preset interface that provides -// a Helm values map that is used during installation and represents the -// the quickstart configuration for Consul on Kubernetes. -type QuickstartPreset struct{} - -// GetValueMap returns the Helm value map representing the quickstart -// configuration for Consul on Kubernetes. It does the following: -// - server replicas equal to 1. -// - enables the service mesh. -// - enables the ui. -// - enables metrics. -// - enables Prometheus. -func (i *QuickstartPreset) GetValueMap() (map[string]interface{}, error) { - values := ` -global: - name: consul - metrics: - enabled: true - enableAgentMetrics: true -connectInject: - enabled: true - metrics: - defaultEnabled: true - defaultEnableMerging: true - enableGatewayMetrics: true -server: - replicas: 1 -ui: - enabled: true - service: - enabled: true -prometheus: - enabled: true -` - - return config.ConvertToMap(values), nil -} diff --git a/cli/preset/secure.go b/cli/preset/secure.go deleted file mode 100644 index 6fccc956a6..0000000000 --- a/cli/preset/secure.go +++ /dev/null @@ -1,35 +0,0 @@ -package preset - -import "github.com/hashicorp/consul-k8s/cli/config" - -// SecurePreset struct is an implementation of the Preset interface that provides -// a Helm values map that is used during installation and represents the -// the quickstart configuration for Consul on Kubernetes. -type SecurePreset struct{} - -// GetValueMap returns the Helm value map representing the quickstart -// configuration for Consul on Kubernetes. It does the following: -// - server replicas equal to 1. -// - enables the service mesh. -// - enables tls. -// - enables gossip encryption. -// - enables ACLs. -func (i *SecurePreset) GetValueMap() (map[string]interface{}, error) { - values := ` -global: - name: consul - gossipEncryption: - autoGenerate: true - tls: - enabled: true - enableAutoEncrypt: true - acls: - manageSystemACLs: true -server: - replicas: 1 -connectInject: - enabled: true -` - - return config.ConvertToMap(values), nil -} diff --git a/cli/version/version.go b/cli/version/version.go index 933f072f35..f89582cd9d 100644 --- a/cli/version/version.go +++ b/cli/version/version.go @@ -14,7 +14,7 @@ var ( // // Version must conform to the format expected by // github.com/hashicorp/go-version for tests to work. - Version = "1.1.0" + Version = "0.49.5" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/control-plane/Dockerfile b/control-plane/Dockerfile index 2989712a5f..5a418b5bf4 100644 --- a/control-plane/Dockerfile +++ b/control-plane/Dockerfile @@ -11,14 +11,10 @@ # # =================================== -# go-discover builds the discover binary (which we don't currently publish -# either). -FROM golang:1.19.2-alpine as go-discover -RUN CGO_ENABLED=0 go install github.com/hashicorp/go-discover/cmd/discover@49f60c093101c9c5f6b04d5b1c80164251a761a6 - # dev copies the binary from a local build # ----------------------------------- # BIN_NAME is a requirement in the hashicorp docker github action + FROM alpine:3.16 AS dev # NAME and VERSION are the name of the software in releases.hashicorp.com @@ -41,13 +37,12 @@ LABEL name=${BIN_NAME} \ ENV BIN_NAME=${BIN_NAME} ENV VERSION=${VERSION} -RUN apk add --no-cache ca-certificates libcap openssl su-exec iputils libc6-compat iptables +RUN apk add --no-cache ca-certificates curl libcap openssl su-exec iputils libc6-compat iptables # Create a non-root user to run the software. RUN addgroup ${BIN_NAME} && \ adduser -S -G ${BIN_NAME} 100 -COPY --from=go-discover /go/bin/discover /bin/ COPY pkg/bin/linux_${TARGETARCH}/${BIN_NAME} /bin COPY cni/pkg/bin/linux_${TARGETARCH}/${CNI_BIN_NAME} /bin @@ -99,7 +94,6 @@ ARG TARGETARCH RUN addgroup ${BIN_NAME} && \ adduser -S -G ${BIN_NAME} 100 -COPY --from=go-discover /go/bin/discover /bin/ COPY dist/${TARGETOS}/${TARGETARCH}/${BIN_NAME} /bin/ COPY dist/cni/${TARGETOS}/${TARGETARCH}/${CNI_BIN_NAME} /bin/ @@ -150,7 +144,7 @@ ARG TARGETOS ARG TARGETARCH # Copy license for Red Hat certification. -COPY LICENSE /licenses/mozilla.txt +COPY LICENSE.md /licenses/mozilla.txt RUN microdnf install -y ca-certificates libcap openssl shadow-utils iptables @@ -161,7 +155,6 @@ RUN groupadd --gid 1000 ${BIN_NAME} && \ adduser --uid 100 --system -g ${BIN_NAME} ${BIN_NAME} && \ usermod -a -G root ${BIN_NAME} -COPY --from=go-discover /go/bin/discover /bin/ COPY dist/${TARGETOS}/${TARGETARCH}/${BIN_NAME} /bin/ COPY dist/cni/${TARGETOS}/${TARGETARCH}/${CNI_BIN_NAME} /bin/ diff --git a/control-plane/Makefile b/control-plane/Makefile index 4c3cbac971..b6d6594c3d 100644 --- a/control-plane/Makefile +++ b/control-plane/Makefile @@ -22,8 +22,29 @@ CI_DEV_DOCKER_NAMESPACE?=hashicorpdev CI_DEV_DOCKER_IMAGE_NAME?=consul-k8s-control-plane CI_DEV_DOCKER_WORKDIR?=. CONSUL_K8S_IMAGE_VERSION?=latest + +# Helm Test Image +CI_DEV_HELM_TEST_IMAGE?=consul-helm-test +# Represent the latest supported version for this branch +# Increment this when building a new version container +TEST_IMAGE_VERSION=0.12.3 +HELM_TEST_WORKDIR=../charts/consul/test/docker + ################ +# Make target for building and pushing the helm test container +# used to run various pipeline tests (including GKE/AKS/EKS). This container +# provides the necessary dependencies for running on our cloud targets. +ci.dev-helm-test-docker: + @echo "Building helm test Development container - $(CI_DEV_HELM_TEST_IMAGE)" + @echo $(DOCKER_PASS) | docker login -u="$(DOCKER_USER)" --password-stdin + @docker buildx create --use && docker buildx build -t '$(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_HELM_TEST_IMAGE):$(TEST_IMAGE_VERSION)' \ + --platform linux/amd64,linux/arm64 \ + --push \ + --label COMMIT_SHA=$(GIT_COMMIT) \ + $(HELM_TEST_WORKDIR) -f $(HELM_TEST_WORKDIR)/Test.dockerfile + @echo "Pushed dev image to: $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_HELM_TEST_IMAGE):$(TEST_IMAGE_VERSION)" + # TODO: Remove this ci.dev-docker target once we move the acceptance tests to Github Actions. # In CircleCI, the linux binary will be attached from a previous step at pkg/bin/linux_amd64/. This make target # should only run in CI and not locally. diff --git a/control-plane/api/v1alpha1/exportedservices_webhook.go b/control-plane/api/v1alpha1/exportedservices_webhook.go index 5a3d2cb2f1..d80062e958 100644 --- a/control-plane/api/v1alpha1/exportedservices_webhook.go +++ b/control-plane/api/v1alpha1/exportedservices_webhook.go @@ -7,6 +7,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -16,9 +17,10 @@ import ( type ExportedServicesWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/exportedservices_webhook_test.go b/control-plane/api/v1alpha1/exportedservices_webhook_test.go index 6548c131f7..a1af17f7e8 100644 --- a/control-plane/api/v1alpha1/exportedservices_webhook_test.go +++ b/control-plane/api/v1alpha1/exportedservices_webhook_test.go @@ -176,10 +176,11 @@ func TestValidateExportedServices(t *testing.T) { require.NoError(t, err) validator := &ExportedServicesWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, - ConsulMeta: c.consulMeta, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, + ConsulMeta: c.consulMeta, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/ingressgateway_webhook.go b/control-plane/api/v1alpha1/ingressgateway_webhook.go index 7f8ba37558..8dcc2fa9ee 100644 --- a/control-plane/api/v1alpha1/ingressgateway_webhook.go +++ b/control-plane/api/v1alpha1/ingressgateway_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type IngressGatewayWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/mesh_types.go b/control-plane/api/v1alpha1/mesh_types.go index 502e567829..7053db67bc 100644 --- a/control-plane/api/v1alpha1/mesh_types.go +++ b/control-plane/api/v1alpha1/mesh_types.go @@ -52,8 +52,6 @@ type MeshSpec struct { TLS *MeshTLSConfig `json:"tls,omitempty"` // HTTP defines the HTTP configuration for the service mesh. HTTP *MeshHTTPConfig `json:"http,omitempty"` - // Peering defines the peering configuration for the service mesh. - Peering *PeeringMeshConfig `json:"peering,omitempty"` } // TransparentProxyMeshConfig controls configuration specific to proxies in "transparent" mode. Added in v1.10.0. @@ -77,15 +75,6 @@ type MeshHTTPConfig struct { SanitizeXForwardedClientCert bool `json:"sanitizeXForwardedClientCert"` } -type PeeringMeshConfig struct { - // PeerThroughMeshGateways determines whether peering traffic between - // control planes should flow through mesh gateways. If enabled, - // Consul servers will advertise mesh gateway addresses as their own. - // Additionally, mesh gateways will configure themselves to expose - // the local servers using a peering-specific SNI. - PeerThroughMeshGateways bool `json:"peerThroughMeshGateways,omitempty"` -} - type MeshDirectionalTLSConfig struct { // TLSMinVersion sets the default minimum TLS version supported. // One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. @@ -192,7 +181,6 @@ func (in *Mesh) ToConsul(datacenter string) capi.ConfigEntry { TransparentProxy: in.Spec.TransparentProxy.toConsul(), TLS: in.Spec.TLS.toConsul(), HTTP: in.Spec.HTTP.toConsul(), - Peering: in.Spec.Peering.toConsul(), Meta: meta(datacenter), } } @@ -206,12 +194,11 @@ func (in *Mesh) MatchesConsul(candidate capi.ConfigEntry) bool { return cmp.Equal(in.ToConsul(""), configEntry, cmpopts.IgnoreFields(capi.MeshConfigEntry{}, "Partition", "Namespace", "Meta", "ModifyIndex", "CreateIndex"), cmpopts.IgnoreUnexported(), cmpopts.EquateEmpty()) } -func (in *Mesh) Validate(consulMeta common.ConsulMeta) error { +func (in *Mesh) Validate(_ common.ConsulMeta) error { var errs field.ErrorList path := field.NewPath("spec") errs = append(errs, in.Spec.TLS.validate(path.Child("tls"))...) - errs = append(errs, in.Spec.Peering.validate(path.Child("peering"), consulMeta.PartitionsEnabled, consulMeta.Partition)...) if len(errs) > 0 { return apierrors.NewInvalid( @@ -279,28 +266,6 @@ func (in *MeshDirectionalTLSConfig) toConsul() *capi.MeshDirectionalTLSConfig { } } -func (in *PeeringMeshConfig) toConsul() *capi.PeeringMeshConfig { - if in == nil { - return nil - } - return &capi.PeeringMeshConfig{PeerThroughMeshGateways: in.PeerThroughMeshGateways} -} - -func (in *PeeringMeshConfig) validate(path *field.Path, partitionsEnabled bool, partition string) field.ErrorList { - if in == nil { - return nil - } - - var errs field.ErrorList - - if partitionsEnabled && in.PeerThroughMeshGateways && partition != common.DefaultConsulPartition { - errs = append(errs, field.Forbidden(path.Child("peerThroughMeshGateways"), - "\"peerThroughMeshGateways\" is only valid in the \"default\" partition")) - } - - return errs -} - // DefaultNamespaceFields has no behaviour here as meshes have no namespace specific fields. func (in *Mesh) DefaultNamespaceFields(_ common.ConsulMeta) { } diff --git a/control-plane/api/v1alpha1/mesh_types_test.go b/control-plane/api/v1alpha1/mesh_types_test.go index 392c38d354..99de86d1fd 100644 --- a/control-plane/api/v1alpha1/mesh_types_test.go +++ b/control-plane/api/v1alpha1/mesh_types_test.go @@ -60,9 +60,6 @@ func TestMesh_MatchesConsul(t *testing.T) { HTTP: &MeshHTTPConfig{ SanitizeXForwardedClientCert: true, }, - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, }, }, Theirs: &capi.MeshConfigEntry{ @@ -84,9 +81,6 @@ func TestMesh_MatchesConsul(t *testing.T) { HTTP: &capi.MeshHTTPConfig{ SanitizeXForwardedClientCert: true, }, - Peering: &capi.PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, CreateIndex: 1, ModifyIndex: 2, Meta: map[string]string{ @@ -160,9 +154,6 @@ func TestMesh_ToConsul(t *testing.T) { HTTP: &MeshHTTPConfig{ SanitizeXForwardedClientCert: true, }, - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, }, }, Exp: &capi.MeshConfigEntry{ @@ -184,9 +175,6 @@ func TestMesh_ToConsul(t *testing.T) { HTTP: &capi.MeshHTTPConfig{ SanitizeXForwardedClientCert: true, }, - Peering: &capi.PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, Namespace: "", Meta: map[string]string{ common.SourceKey: common.SourceValue, @@ -209,7 +197,6 @@ func TestMesh_Validate(t *testing.T) { cases := map[string]struct { input *Mesh expectedErrMsgs []string - consulMeta common.ConsulMeta }{ "tls.incoming.minTLSVersion invalid": { input: &Mesh{ @@ -309,53 +296,6 @@ func TestMesh_Validate(t *testing.T) { }, }, }, - "peering.peerThroughMeshGateways in invalid partition": { - input: &Mesh{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - }, - Spec: MeshSpec{ - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, - }, - }, - consulMeta: common.ConsulMeta{ - Partition: "blurg", - PartitionsEnabled: true, - }, - expectedErrMsgs: []string{ - `spec.peering.peerThroughMeshGateways: Forbidden: "peerThroughMeshGateways" is only valid in the "default" partition`, - }, - }, - "peering.peerThroughMeshGateways valid partition": { - input: &Mesh{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - }, - Spec: MeshSpec{ - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, - }, - }, - consulMeta: common.ConsulMeta{ - Partition: "default", - PartitionsEnabled: true, - }, - }, - "peering.peerThroughMeshGateways valid with no partitions": { - input: &Mesh{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - }, - Spec: MeshSpec{ - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, - }, - }, - }, "multiple errors": { input: &Mesh{ ObjectMeta: metav1.ObjectMeta{ @@ -372,28 +312,20 @@ func TestMesh_Validate(t *testing.T) { TLSMaxVersion: "bar", }, }, - Peering: &PeeringMeshConfig{ - PeerThroughMeshGateways: true, - }, }, }, - consulMeta: common.ConsulMeta{ - Partition: "blurg", - PartitionsEnabled: true, - }, expectedErrMsgs: []string{ `spec.tls.incoming.tlsMinVersion: Invalid value: "foo": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, `spec.tls.incoming.tlsMaxVersion: Invalid value: "bar": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, `spec.tls.outgoing.tlsMinVersion: Invalid value: "foo": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, `spec.tls.outgoing.tlsMaxVersion: Invalid value: "bar": must be one of "TLS_AUTO", "TLSv1_0", "TLSv1_1", "TLSv1_2", "TLSv1_3", ""`, - `spec.peering.peerThroughMeshGateways: Forbidden: "peerThroughMeshGateways" is only valid in the "default" partition`, }, }, } for name, testCase := range cases { t.Run(name, func(t *testing.T) { - err := testCase.input.Validate(testCase.consulMeta) + err := testCase.input.Validate(common.ConsulMeta{}) if len(testCase.expectedErrMsgs) != 0 { require.Error(t, err) for _, s := range testCase.expectedErrMsgs { diff --git a/control-plane/api/v1alpha1/mesh_webhook.go b/control-plane/api/v1alpha1/mesh_webhook.go index 5c714c4e5f..d28cfc193c 100644 --- a/control-plane/api/v1alpha1/mesh_webhook.go +++ b/control-plane/api/v1alpha1/mesh_webhook.go @@ -7,6 +7,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -16,12 +17,9 @@ import ( type MeshWebhook struct { client.Client - Logger logr.Logger - - // ConsulMeta contains metadata specific to the Consul installation. - ConsulMeta common.ConsulMeta - - decoder *admission.Decoder + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder } // NOTE: The path value in the below line is the path to the webhook. @@ -61,19 +59,7 @@ func (v *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi } } - return common.ValidateConfigEntry(ctx, req, v.Logger, v, &mesh, v.ConsulMeta) -} - -func (v *MeshWebhook) List(ctx context.Context) ([]common.ConfigEntryResource, error) { - var meshList MeshList - if err := v.Client.List(ctx, &meshList); err != nil { - return nil, err - } - var entries []common.ConfigEntryResource - for _, item := range meshList.Items { - entries = append(entries, common.ConfigEntryResource(&item)) - } - return entries, nil + return admission.Allowed(fmt.Sprintf("valid %s request", mesh.KubeKind())) } func (v *MeshWebhook) InjectDecoder(d *admission.Decoder) error { diff --git a/control-plane/api/v1alpha1/mesh_webhook_test.go b/control-plane/api/v1alpha1/mesh_webhook_test.go index 55b0c3a77d..633ec55497 100644 --- a/control-plane/api/v1alpha1/mesh_webhook_test.go +++ b/control-plane/api/v1alpha1/mesh_webhook_test.go @@ -63,23 +63,6 @@ func TestValidateMesh(t *testing.T) { expAllow: false, expErrMessage: "mesh resource name must be \"mesh\"", }, - "validation rejects": { - existingResources: nil, - newResource: &Mesh{ - ObjectMeta: metav1.ObjectMeta{ - Name: common.Mesh, - }, - Spec: MeshSpec{ - TLS: &MeshTLSConfig{ - Incoming: &MeshDirectionalTLSConfig{ - TLSMinVersion: "foo", - }, - }, - }, - }, - expAllow: false, - expErrMessage: "mesh.consul.hashicorp.com \"mesh\" is invalid: spec.tls.incoming.tlsMinVersion: Invalid value: \"foo\": must be one of \"TLS_AUTO\", \"TLSv1_0\", \"TLSv1_1\", \"TLSv1_2\", \"TLSv1_3\", \"\"", - }, } for name, c := range cases { t.Run(name, func(t *testing.T) { @@ -93,9 +76,10 @@ func TestValidateMesh(t *testing.T) { require.NoError(t, err) validator := &MeshWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/peeringacceptor_webhook.go b/control-plane/api/v1alpha1/peeringacceptor_webhook.go index 60367c1384..728bd205ee 100644 --- a/control-plane/api/v1alpha1/peeringacceptor_webhook.go +++ b/control-plane/api/v1alpha1/peeringacceptor_webhook.go @@ -6,6 +6,7 @@ import ( "net/http" "github.com/go-logr/logr" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -15,8 +16,10 @@ import ( type PeeringAcceptorWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder + //ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/peeringacceptor_webhook_test.go b/control-plane/api/v1alpha1/peeringacceptor_webhook_test.go index a65966881a..26ed3e2150 100644 --- a/control-plane/api/v1alpha1/peeringacceptor_webhook_test.go +++ b/control-plane/api/v1alpha1/peeringacceptor_webhook_test.go @@ -134,9 +134,10 @@ func TestValidatePeeringAcceptor(t *testing.T) { require.NoError(t, err) validator := &PeeringAcceptorWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/peeringdialer_webhook.go b/control-plane/api/v1alpha1/peeringdialer_webhook.go index fc0b1c38f6..587f998155 100644 --- a/control-plane/api/v1alpha1/peeringdialer_webhook.go +++ b/control-plane/api/v1alpha1/peeringdialer_webhook.go @@ -6,6 +6,7 @@ import ( "net/http" "github.com/go-logr/logr" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -15,8 +16,9 @@ import ( type PeeringDialerWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/peeringdialer_webhook_test.go b/control-plane/api/v1alpha1/peeringdialer_webhook_test.go index e8b206e3e6..abdca4f417 100644 --- a/control-plane/api/v1alpha1/peeringdialer_webhook_test.go +++ b/control-plane/api/v1alpha1/peeringdialer_webhook_test.go @@ -134,9 +134,10 @@ func TestValidatePeeringDialer(t *testing.T) { require.NoError(t, err) validator := &PeeringDialerWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/proxydefaults_types.go b/control-plane/api/v1alpha1/proxydefaults_types.go index 543d8f7ae4..215ec708ff 100644 --- a/control-plane/api/v1alpha1/proxydefaults_types.go +++ b/control-plane/api/v1alpha1/proxydefaults_types.go @@ -75,10 +75,6 @@ type ProxyDefaultsSpec struct { MeshGateway MeshGateway `json:"meshGateway,omitempty"` // Expose controls the default expose path configuration for Envoy. Expose Expose `json:"expose,omitempty"` - // AccessLogs controls all envoy instances' access logging configuration. - AccessLogs *AccessLogs `json:"accessLogs,omitempty"` - // EnvoyExtensions are a list of extensions to modify Envoy proxy configuration. - EnvoyExtensions EnvoyExtensions `json:"envoyExtensions,omitempty"` } func (in *ProxyDefaults) GetObjectMeta() metav1.ObjectMeta { @@ -169,8 +165,6 @@ func (in *ProxyDefaults) ToConsul(datacenter string) capi.ConfigEntry { Expose: in.Spec.Expose.toConsul(), Config: consulConfig, TransparentProxy: in.Spec.TransparentProxy.toConsul(), - AccessLogs: in.Spec.AccessLogs.toConsul(), - EnvoyExtensions: in.Spec.EnvoyExtensions.toConsul(), Meta: meta(datacenter), } } @@ -201,12 +195,7 @@ func (in *ProxyDefaults) Validate(_ common.ConsulMeta) error { if err := in.validateConfig(path.Child("config")); err != nil { allErrs = append(allErrs, err) } - if err := in.Spec.AccessLogs.validate(path.Child("accessLogs")); err != nil { - allErrs = append(allErrs, err) - } allErrs = append(allErrs, in.Spec.Expose.validate(path.Child("expose"))...) - allErrs = append(allErrs, in.Spec.EnvoyExtensions.validate(path.Child("envoyExtensions"))...) - if len(allErrs) > 0 { return apierrors.NewInvalid( schema.GroupKind{Group: ConsulHashicorpGroup, Kind: ProxyDefaultsKubeKind}, @@ -244,93 +233,7 @@ func (in *ProxyDefaults) validateConfig(path *field.Path) *field.Error { } var outConfig map[string]interface{} if err := json.Unmarshal(in.Spec.Config, &outConfig); err != nil { - return field.Invalid(path, string(in.Spec.Config), fmt.Sprintf(`must be valid map value: %s`, err)) + return field.Invalid(path, in.Spec.Config, fmt.Sprintf(`must be valid map value: %s`, err)) } return nil } - -// LogSinkType represents the destination for Envoy access logs. -// One of "file", "stderr", or "stdout". -type LogSinkType string - -const ( - DefaultLogSinkType LogSinkType = "" - FileLogSinkType LogSinkType = "file" - StdErrLogSinkType LogSinkType = "stderr" - StdOutLogSinkType LogSinkType = "stdout" -) - -// AccessLogs describes the access logging configuration for all Envoy proxies in the mesh. -type AccessLogs struct { - // Enabled turns on all access logging - Enabled bool `json:"enabled,omitempty"` - - // DisableListenerLogs turns off just listener logs for connections rejected by Envoy because they don't - // have a matching listener filter. - DisableListenerLogs bool `json:"disableListenerLogs,omitempty"` - - // Type selects the output for logs - // one of "file", "stderr". "stdout" - Type LogSinkType `json:"type,omitempty"` - - // Path is the output file to write logs for file-type logging - Path string `json:"path,omitempty"` - - // JSONFormat is a JSON-formatted string of an Envoy access log format dictionary. - // See for more info on formatting: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-dictionaries - // Defining JSONFormat and TextFormat is invalid. - JSONFormat string `json:"jsonFormat,omitempty"` - - // TextFormat is a representation of Envoy access logs format. - // See for more info on formatting: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-strings - // Defining JSONFormat and TextFormat is invalid. - TextFormat string `json:"textFormat,omitempty"` -} - -func (in *AccessLogs) validate(path *field.Path) *field.Error { - if in == nil { - return nil - } - - switch in.Type { - case DefaultLogSinkType, StdErrLogSinkType, StdOutLogSinkType: - // OK - case FileLogSinkType: - if in.Path == "" { - return field.Invalid(path.Child("path"), in.Path, "path must be specified when using file type access logs") - } - default: - return field.Invalid(path.Child("type"), in.Type, "invalid access log type (must be one of \"stdout\", \"stderr\", \"file\"") - } - - if in.JSONFormat != "" && in.TextFormat != "" { - return field.Invalid(path.Child("textFormat"), in.TextFormat, "cannot specify both access log jsonFormat and textFormat") - } - - if in.Type != FileLogSinkType && in.Path != "" { - return field.Invalid(path.Child("path"), in.Path, "path is only valid for file type access logs") - } - - if in.JSONFormat != "" { - msg := json.RawMessage{} - if err := json.Unmarshal([]byte(in.JSONFormat), &msg); err != nil { - return field.Invalid(path.Child("jsonFormat"), in.JSONFormat, "invalid access log json") - } - } - - return nil -} - -func (in *AccessLogs) toConsul() *capi.AccessLogsConfig { - if in == nil { - return nil - } - return &capi.AccessLogsConfig{ - Enabled: in.Enabled, - DisableListenerLogs: in.DisableListenerLogs, - JSONFormat: in.JSONFormat, - Path: in.Path, - TextFormat: in.TextFormat, - Type: capi.LogSinkType(in.Type), - } -} diff --git a/control-plane/api/v1alpha1/proxydefaults_types_test.go b/control-plane/api/v1alpha1/proxydefaults_types_test.go index 225068c136..2950a3a36e 100644 --- a/control-plane/api/v1alpha1/proxydefaults_types_test.go +++ b/control-plane/api/v1alpha1/proxydefaults_types_test.go @@ -71,25 +71,6 @@ func TestProxyDefaults_MatchesConsul(t *testing.T) { OutboundListenerPort: 1000, DialedDirectly: true, }, - AccessLogs: &AccessLogs{ - Enabled: true, - DisableListenerLogs: true, - Type: FileLogSinkType, - Path: "/var/log/envoy.logs", - TextFormat: "ITS WORKING %START_TIME%", - }, - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"AWSServiceName": "s3", "Region": "us-west-2"}`), - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: json.RawMessage(`{"ClusterName": "zipkin_cluster", "Port": "9411", "CollectorEndpoint":"/api/v2/spans"}`), - Required: true, - }, - }, }, }, Theirs: &capi.ProxyConfigEntry{ @@ -122,32 +103,6 @@ func TestProxyDefaults_MatchesConsul(t *testing.T) { OutboundListenerPort: 1000, DialedDirectly: true, }, - AccessLogs: &capi.AccessLogsConfig{ - Enabled: true, - DisableListenerLogs: true, - Type: capi.FileLogSinkType, - Path: "/var/log/envoy.logs", - TextFormat: "ITS WORKING %START_TIME%", - }, - EnvoyExtensions: []capi.EnvoyExtension{ - { - Name: "aws_request_signing", - Arguments: map[string]interface{}{ - "AWSServiceName": "s3", - "Region": "us-west-2", - }, - Required: false, - }, - { - Name: "zipkin", - Arguments: map[string]interface{}{ - "ClusterName": "zipkin_cluster", - "Port": "9411", - "CollectorEndpoint": "/api/v2/spans", - }, - Required: true, - }, - }, }, Matches: true, }, @@ -281,25 +236,6 @@ func TestProxyDefaults_ToConsul(t *testing.T) { OutboundListenerPort: 1000, DialedDirectly: true, }, - AccessLogs: &AccessLogs{ - Enabled: true, - DisableListenerLogs: true, - Type: FileLogSinkType, - Path: "/var/log/envoy.logs", - TextFormat: "ITS WORKING %START_TIME%", - }, - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"AWSServiceName": "s3", "Region": "us-west-2"}`), - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: json.RawMessage(`{"ClusterName": "zipkin_cluster", "Port": "9411", "CollectorEndpoint":"/api/v2/spans"}`), - Required: true, - }, - }, }, }, Exp: &capi.ProxyConfigEntry{ @@ -333,32 +269,6 @@ func TestProxyDefaults_ToConsul(t *testing.T) { OutboundListenerPort: 1000, DialedDirectly: true, }, - AccessLogs: &capi.AccessLogsConfig{ - Enabled: true, - DisableListenerLogs: true, - Type: capi.FileLogSinkType, - Path: "/var/log/envoy.logs", - TextFormat: "ITS WORKING %START_TIME%", - }, - EnvoyExtensions: []capi.EnvoyExtension{ - { - Name: "aws_request_signing", - Arguments: map[string]interface{}{ - "AWSServiceName": "s3", - "Region": "us-west-2", - }, - Required: false, - }, - { - Name: "zipkin", - Arguments: map[string]interface{}{ - "ClusterName": "zipkin_cluster", - "Port": "9411", - "CollectorEndpoint": "/api/v2/spans", - }, - Required: true, - }, - }, Meta: map[string]string{ common.SourceKey: common.SourceValue, common.DatacenterKey: "datacenter", @@ -383,30 +293,8 @@ func TestProxyDefaults_Validate(t *testing.T) { input *ProxyDefaults expectedErrMsg string }{ - "valid envoyExtension": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"AWSServiceName": "s3", "Region": "us-west-2"}`), - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: json.RawMessage(`{"ClusterName": "zipkin_cluster", "Port": "9411", "CollectorEndpoint":"/api/v2/spans"}`), - Required: true, - }, - }, - }, - }, - expectedErrMsg: "", - }, "meshgateway.mode": { - input: &ProxyDefaults{ + &ProxyDefaults{ ObjectMeta: metav1.ObjectMeta{ Name: "global", }, @@ -416,10 +304,10 @@ func TestProxyDefaults_Validate(t *testing.T) { }, }, }, - expectedErrMsg: `proxydefaults.consul.hashicorp.com "global" is invalid: spec.meshGateway.mode: Invalid value: "foobar": must be one of "remote", "local", "none", ""`, + `proxydefaults.consul.hashicorp.com "global" is invalid: spec.meshGateway.mode: Invalid value: "foobar": must be one of "remote", "local", "none", ""`, }, "expose.paths[].protocol": { - input: &ProxyDefaults{ + &ProxyDefaults{ ObjectMeta: metav1.ObjectMeta{ Name: "global", }, @@ -434,10 +322,10 @@ func TestProxyDefaults_Validate(t *testing.T) { }, }, }, - expectedErrMsg: `proxydefaults.consul.hashicorp.com "global" is invalid: spec.expose.paths[0].protocol: Invalid value: "invalid-protocol": must be one of "http", "http2"`, + `proxydefaults.consul.hashicorp.com "global" is invalid: spec.expose.paths[0].protocol: Invalid value: "invalid-protocol": must be one of "http", "http2"`, }, "expose.paths[].path": { - input: &ProxyDefaults{ + &ProxyDefaults{ ObjectMeta: metav1.ObjectMeta{ Name: "global", }, @@ -452,10 +340,10 @@ func TestProxyDefaults_Validate(t *testing.T) { }, }, }, - expectedErrMsg: `proxydefaults.consul.hashicorp.com "global" is invalid: spec.expose.paths[0].path: Invalid value: "invalid-path": must begin with a '/'`, + `proxydefaults.consul.hashicorp.com "global" is invalid: spec.expose.paths[0].path: Invalid value: "invalid-path": must begin with a '/'`, }, "transparentProxy.outboundListenerPort": { - input: &ProxyDefaults{ + &ProxyDefaults{ ObjectMeta: metav1.ObjectMeta{ Name: "global", }, @@ -465,10 +353,10 @@ func TestProxyDefaults_Validate(t *testing.T) { }, }, }, - expectedErrMsg: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.transparentProxy.outboundListenerPort: Invalid value: 1000: use the annotation `consul.hashicorp.com/transparent-proxy-outbound-listener-port` to configure the Outbound Listener Port", + "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.transparentProxy.outboundListenerPort: Invalid value: 1000: use the annotation `consul.hashicorp.com/transparent-proxy-outbound-listener-port` to configure the Outbound Listener Port", }, "mode": { - input: &ProxyDefaults{ + &ProxyDefaults{ ObjectMeta: metav1.ObjectMeta{ Name: "global", }, @@ -476,137 +364,10 @@ func TestProxyDefaults_Validate(t *testing.T) { Mode: proxyModeRef("transparent"), }, }, - expectedErrMsg: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.mode: Invalid value: \"transparent\": use the annotation `consul.hashicorp.com/transparent-proxy` to configure the Transparent Proxy Mode", - }, - "accessLogs.type": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - AccessLogs: &AccessLogs{ - Type: "foo", - }, - }, - }, - expectedErrMsg: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.accessLogs.type: Invalid value: \"foo\": invalid access log type (must be one of \"stdout\", \"stderr\", \"file\"", - }, - "accessLogs.path missing": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - AccessLogs: &AccessLogs{ - Type: "file", - }, - }, - }, - expectedErrMsg: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.accessLogs.path: Invalid value: \"\": path must be specified when using file type access logs", - }, - "accessLogs.path for wrong type": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - AccessLogs: &AccessLogs{ - Path: "/var/log/envoy.logs", - }, - }, - }, - expectedErrMsg: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.accessLogs.path: Invalid value: \"/var/log/envoy.logs\": path is only valid for file type access logs", - }, - "accessLogs.jsonFormat": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - AccessLogs: &AccessLogs{ - JSONFormat: "{ \"start_time\": \"%START_TIME\"", // intentionally missing the closing brace - }, - }, - }, - expectedErrMsg: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.accessLogs.jsonFormat: Invalid value: \"{ \\\"start_time\\\": \\\"%START_TIME\\\"\": invalid access log json", - }, - "accessLogs.textFormat": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - AccessLogs: &AccessLogs{ - JSONFormat: "{ \"start_time\": \"%START_TIME\" }", - TextFormat: "MY START TIME %START_TIME", - }, - }, - }, - expectedErrMsg: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.accessLogs.textFormat: Invalid value: \"MY START TIME %START_TIME\": cannot specify both access log jsonFormat and textFormat", - }, - "envoyExtension.arguments single empty": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"AWSServiceName": "s3", "Region": "us-west-2"}`), - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: nil, - Required: true, - }, - }, - }, - }, - expectedErrMsg: `proxydefaults.consul.hashicorp.com "global" is invalid: spec.envoyExtensions.envoyExtension[1].arguments: Required value: arguments must be defined`, - }, - "envoyExtension.arguments multi empty": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: nil, - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: nil, - Required: true, - }, - }, - }, - }, - expectedErrMsg: `proxydefaults.consul.hashicorp.com "global" is invalid: [spec.envoyExtensions.envoyExtension[0].arguments: Required value: arguments must be defined, spec.envoyExtensions.envoyExtension[1].arguments: Required value: arguments must be defined]`, - }, - "envoyExtension.arguments invalid json": { - input: &ProxyDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "global", - }, - Spec: ProxyDefaultsSpec{ - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"SOME_INVALID_JSON"}`), - Required: false, - }, - }, - }, - }, - expectedErrMsg: `proxydefaults.consul.hashicorp.com "global" is invalid: spec.envoyExtensions.envoyExtension[0].arguments: Invalid value: "{\"SOME_INVALID_JSON\"}": must be valid map value: invalid character '}' after object key`, + "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.mode: Invalid value: \"transparent\": use the annotation `consul.hashicorp.com/transparent-proxy` to configure the Transparent Proxy Mode", }, "multi-error": { - input: &ProxyDefaults{ + &ProxyDefaults{ ObjectMeta: metav1.ObjectMeta{ Name: "global", }, @@ -625,14 +386,10 @@ func TestProxyDefaults_Validate(t *testing.T) { TransparentProxy: &TransparentProxy{ OutboundListenerPort: 1000, }, - AccessLogs: &AccessLogs{ - JSONFormat: "{ \"start_time\": \"%START_TIME\" }", - TextFormat: "MY START TIME %START_TIME", - }, Mode: proxyModeRef("transparent"), }, }, - expectedErrMsg: "proxydefaults.consul.hashicorp.com \"global\" is invalid: [spec.meshGateway.mode: Invalid value: \"invalid-mode\": must be one of \"remote\", \"local\", \"none\", \"\", spec.transparentProxy.outboundListenerPort: Invalid value: 1000: use the annotation `consul.hashicorp.com/transparent-proxy-outbound-listener-port` to configure the Outbound Listener Port, spec.mode: Invalid value: \"transparent\": use the annotation `consul.hashicorp.com/transparent-proxy` to configure the Transparent Proxy Mode, spec.accessLogs.textFormat: Invalid value: \"MY START TIME %START_TIME\": cannot specify both access log jsonFormat and textFormat, spec.expose.paths[0].path: Invalid value: \"invalid-path\": must begin with a '/', spec.expose.paths[0].protocol: Invalid value: \"invalid-protocol\": must be one of \"http\", \"http2\"]", + "proxydefaults.consul.hashicorp.com \"global\" is invalid: [spec.meshGateway.mode: Invalid value: \"invalid-mode\": must be one of \"remote\", \"local\", \"none\", \"\", spec.transparentProxy.outboundListenerPort: Invalid value: 1000: use the annotation `consul.hashicorp.com/transparent-proxy-outbound-listener-port` to configure the Outbound Listener Port, spec.mode: Invalid value: \"transparent\": use the annotation `consul.hashicorp.com/transparent-proxy` to configure the Transparent Proxy Mode, spec.expose.paths[0].path: Invalid value: \"invalid-path\": must begin with a '/', spec.expose.paths[0].protocol: Invalid value: \"invalid-protocol\": must be one of \"http\", \"http2\"]", }, } for name, testCase := range cases { diff --git a/control-plane/api/v1alpha1/proxydefaults_webhook.go b/control-plane/api/v1alpha1/proxydefaults_webhook.go index 3873516074..4e221e0130 100644 --- a/control-plane/api/v1alpha1/proxydefaults_webhook.go +++ b/control-plane/api/v1alpha1/proxydefaults_webhook.go @@ -7,6 +7,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -16,9 +17,10 @@ import ( type ProxyDefaultsWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/proxydefaults_webhook_test.go b/control-plane/api/v1alpha1/proxydefaults_webhook_test.go index 3136540089..a728ddc130 100644 --- a/control-plane/api/v1alpha1/proxydefaults_webhook_test.go +++ b/control-plane/api/v1alpha1/proxydefaults_webhook_test.go @@ -46,7 +46,7 @@ func TestValidateProxyDefault(t *testing.T) { }, expAllow: false, // This error message is because the value "1" is valid JSON but is an invalid map - expErrMessage: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.config: Invalid value: \"1\": must be valid map value: json: cannot unmarshal number into Go value of type map[string]interface {}", + expErrMessage: "proxydefaults.consul.hashicorp.com \"global\" is invalid: spec.config: Invalid value: json.RawMessage{0x31}: must be valid map value: json: cannot unmarshal number into Go value of type map[string]interface {}", }, "proxy default exists": { existingResources: []runtime.Object{&ProxyDefaults{ @@ -118,9 +118,10 @@ func TestValidateProxyDefault(t *testing.T) { require.NoError(t, err) validator := &ProxyDefaultsWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, } response := validator.Handle(ctx, admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/control-plane/api/v1alpha1/servicedefaults_types.go b/control-plane/api/v1alpha1/servicedefaults_types.go index 06da8b1d2c..41a536e855 100644 --- a/control-plane/api/v1alpha1/servicedefaults_types.go +++ b/control-plane/api/v1alpha1/servicedefaults_types.go @@ -2,11 +2,9 @@ package v1alpha1 import ( "fmt" - "net" - "strings" - "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/consul-k8s/control-plane/api/common" capi "github.com/hashicorp/consul/api" "github.com/miekg/dns" corev1 "k8s.io/api/core/v1" @@ -14,8 +12,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" - - "github.com/hashicorp/consul-k8s/control-plane/api/common" + "net" + "strings" ) const ( @@ -89,19 +87,13 @@ type ServiceDefaultsSpec struct { // MaxInboundConnections is the maximum number of concurrent inbound connections to // each service instance. Defaults to 0 (using consul's default) if not set. MaxInboundConnections int `json:"maxInboundConnections,omitempty"` - // LocalConnectTimeoutMs is the number of milliseconds allowed to make connections to the local application + // The number of milliseconds allowed to make connections to the local application // instance before timing out. Defaults to 5000. LocalConnectTimeoutMs int `json:"localConnectTimeoutMs,omitempty"` - // LocalRequestTimeoutMs is the timeout for HTTP requests to the local application instance in milliseconds. + // In milliseconds, the timeout for HTTP requests to the local application instance. // Applies to HTTP-based protocols only. If not specified, inherits the Envoy default for // route timeouts (15s). LocalRequestTimeoutMs int `json:"localRequestTimeoutMs,omitempty"` - // BalanceInboundConnections sets the strategy for allocating inbound connections to the service across - // proxy threads. The only supported value is exact_balance. By default, no connection balancing is used. - // Refer to the Envoy Connection Balance config for details. - BalanceInboundConnections string `json:"balanceInboundConnections,omitempty"` - // EnvoyExtensions are a list of extensions to modify Envoy proxy configuration. - EnvoyExtensions EnvoyExtensions `json:"envoyExtensions,omitempty"` } type Upstreams struct { @@ -114,14 +106,12 @@ type Upstreams struct { } type Upstream struct { - // Name is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides config entry. + // Name is only accepted within a service-defaults config entry. Name string `json:"name,omitempty"` - // Namespace is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides config entry. + // Namespace is only accepted within a service-defaults config entry. Namespace string `json:"namespace,omitempty"` - // Partition is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides config entry. + // Partition is only accepted within a service-defaults config entry. Partition string `json:"partition,omitempty"` - // Peer is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides config entry. - Peer string `json:"peer,omitempty"` // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's // listener. // Note: This escape hatch is NOT compatible with the discovery chain and @@ -269,21 +259,19 @@ func (in *ServiceDefaults) SyncedConditionStatus() corev1.ConditionStatus { // ToConsul converts the entry into it's Consul equivalent struct. func (in *ServiceDefaults) ToConsul(datacenter string) capi.ConfigEntry { return &capi.ServiceConfigEntry{ - Kind: in.ConsulKind(), - Name: in.ConsulName(), - Protocol: in.Spec.Protocol, - MeshGateway: in.Spec.MeshGateway.toConsul(), - Expose: in.Spec.Expose.toConsul(), - ExternalSNI: in.Spec.ExternalSNI, - TransparentProxy: in.Spec.TransparentProxy.toConsul(), - UpstreamConfig: in.Spec.UpstreamConfig.toConsul(), - Destination: in.Spec.Destination.toConsul(), - Meta: meta(datacenter), - MaxInboundConnections: in.Spec.MaxInboundConnections, - LocalConnectTimeoutMs: in.Spec.LocalConnectTimeoutMs, - LocalRequestTimeoutMs: in.Spec.LocalRequestTimeoutMs, - BalanceInboundConnections: in.Spec.BalanceInboundConnections, - EnvoyExtensions: in.Spec.EnvoyExtensions.toConsul(), + Kind: in.ConsulKind(), + Name: in.ConsulName(), + Protocol: in.Spec.Protocol, + MeshGateway: in.Spec.MeshGateway.toConsul(), + Expose: in.Spec.Expose.toConsul(), + ExternalSNI: in.Spec.ExternalSNI, + TransparentProxy: in.Spec.TransparentProxy.toConsul(), + UpstreamConfig: in.Spec.UpstreamConfig.toConsul(), + Destination: in.Spec.Destination.toConsul(), + Meta: meta(datacenter), + MaxInboundConnections: in.Spec.MaxInboundConnections, + LocalConnectTimeoutMs: in.Spec.LocalConnectTimeoutMs, + LocalRequestTimeoutMs: in.Spec.LocalRequestTimeoutMs, } } @@ -322,13 +310,8 @@ func (in *ServiceDefaults) Validate(consulMeta common.ConsulMeta) error { allErrs = append(allErrs, field.Invalid(path.Child("localRequestTimeoutMs"), in.Spec.LocalRequestTimeoutMs, "LocalRequestTimeoutMs must be > 0")) } - if in.Spec.BalanceInboundConnections != "" && in.Spec.BalanceInboundConnections != "exact_balance" { - allErrs = append(allErrs, field.Invalid(path.Child("balanceInboundConnections"), in.Spec.BalanceInboundConnections, "BalanceInboundConnections must be an empty string or exact_balance")) - } - allErrs = append(allErrs, in.Spec.UpstreamConfig.validate(path.Child("upstreamConfig"), consulMeta.PartitionsEnabled)...) allErrs = append(allErrs, in.Spec.Expose.validate(path.Child("expose"))...) - allErrs = append(allErrs, in.Spec.EnvoyExtensions.validate(path.Child("envoyExtensions"))...) if len(allErrs) > 0 { return apierrors.NewInvalid( @@ -377,25 +360,10 @@ func (in *Upstream) validate(path *field.Path, kind string, partitionsEnabled bo if in.Name != "" { errs = append(errs, field.Invalid(path.Child("name"), in.Name, "upstream.name for a default upstream must be \"\"")) } - if in.Namespace != "" { - errs = append(errs, field.Invalid(path.Child("namespace"), in.Namespace, "upstream.namespace for a default upstream must be \"\"")) - } - if in.Partition != "" { - errs = append(errs, field.Invalid(path.Child("partition"), in.Partition, "upstream.partition for a default upstream must be \"\"")) - } - if in.Peer != "" { - errs = append(errs, field.Invalid(path.Child("peer"), in.Peer, "upstream.peer for a default upstream must be \"\"")) - } } else if kind == overrideUpstream { if in.Name == "" { errs = append(errs, field.Invalid(path.Child("name"), in.Name, "upstream.name for an override upstream cannot be \"\"")) } - if in.Namespace != "" && in.Peer != "" { - errs = append(errs, field.Invalid(path, in, "both namespace and peer cannot be specified.")) - } - if in.Partition != "" && in.Peer != "" { - errs = append(errs, field.Invalid(path, in, "both partition and peer cannot be specified.")) - } } if !partitionsEnabled && in.Partition != "" { errs = append(errs, field.Invalid(path.Child("partition"), in.Partition, "Consul Enterprise Admin Partitions must be enabled to set upstream.partition")) @@ -414,7 +382,6 @@ func (in *Upstream) toConsul() *capi.UpstreamConfig { Name: in.Name, Namespace: in.Namespace, Partition: in.Partition, - Peer: in.Peer, EnvoyListenerJSON: in.EnvoyListenerJSON, EnvoyClusterJSON: in.EnvoyClusterJSON, Protocol: in.Protocol, diff --git a/control-plane/api/v1alpha1/servicedefaults_types_test.go b/control-plane/api/v1alpha1/servicedefaults_types_test.go index 33ec6d2f40..e7fdae2575 100644 --- a/control-plane/api/v1alpha1/servicedefaults_types_test.go +++ b/control-plane/api/v1alpha1/servicedefaults_types_test.go @@ -1,17 +1,15 @@ package v1alpha1 import ( - "encoding/json" "testing" "time" + "github.com/hashicorp/consul-k8s/control-plane/api/common" capi "github.com/hashicorp/consul/api" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" - - "github.com/hashicorp/consul-k8s/control-plane/api/common" ) func TestServiceDefaults_ToConsul(t *testing.T) { @@ -143,19 +141,6 @@ func TestServiceDefaults_ToConsul(t *testing.T) { }, }, }, - BalanceInboundConnections: "exact_balance", - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"AWSServiceName": "s3", "Region": "us-west-2"}`), - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: json.RawMessage(`{"ClusterName": "zipkin_cluster", "Port": "9411", "CollectorEndpoint":"/api/v2/spans"}`), - Required: true, - }, - }, Destination: &ServiceDefaultsDestination{ Addresses: []string{"api.google.com"}, Port: 443, @@ -264,26 +249,6 @@ func TestServiceDefaults_ToConsul(t *testing.T) { }, }, }, - BalanceInboundConnections: "exact_balance", - EnvoyExtensions: []capi.EnvoyExtension{ - { - Name: "aws_request_signing", - Arguments: map[string]interface{}{ - "AWSServiceName": "s3", - "Region": "us-west-2", - }, - Required: false, - }, - { - Name: "zipkin", - Arguments: map[string]interface{}{ - "ClusterName": "zipkin_cluster", - "Port": "9411", - "CollectorEndpoint": "/api/v2/spans", - }, - Required: true, - }, - }, Destination: &capi.DestinationConfig{ Addresses: []string{"api.google.com"}, Port: 443, @@ -437,19 +402,6 @@ func TestServiceDefaults_MatchesConsul(t *testing.T) { }, }, }, - BalanceInboundConnections: "exact_balance", - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"AWSServiceName": "s3", "Region": "us-west-2"}`), - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: json.RawMessage(`{"ClusterName": "zipkin_cluster", "Port": "9411", "CollectorEndpoint":"/api/v2/spans"}`), - Required: true, - }, - }, Destination: &ServiceDefaultsDestination{ Addresses: []string{"api.google.com"}, Port: 443, @@ -551,26 +503,6 @@ func TestServiceDefaults_MatchesConsul(t *testing.T) { }, }, }, - BalanceInboundConnections: "exact_balance", - EnvoyExtensions: []capi.EnvoyExtension{ - { - Name: "aws_request_signing", - Arguments: map[string]interface{}{ - "AWSServiceName": "s3", - "Region": "us-west-2", - }, - Required: false, - }, - { - Name: "zipkin", - Arguments: map[string]interface{}{ - "ClusterName": "zipkin_cluster", - "Port": "9411", - "CollectorEndpoint": "/api/v2/spans", - }, - Required: true, - }, - }, Destination: &capi.DestinationConfig{ Addresses: []string{"api.google.com"}, Port: 443, @@ -706,39 +638,6 @@ func TestServiceDefaults_Validate(t *testing.T) { }, expectedErrMsg: "", }, - "valid - balanceInboundConnections": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - BalanceInboundConnections: "exact_balance", - }, - }, - expectedErrMsg: "", - }, - "valid - envoyExtension": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"AWSServiceName": "s3", "Region": "us-west-2"}`), - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: json.RawMessage(`{"ClusterName": "zipkin_cluster", "Port": "9411", "CollectorEndpoint":"/api/v2/spans"}`), - Required: true, - }, - }, - }, - }, - expectedErrMsg: "", - }, "protocol": { input: &ServiceDefaults{ ObjectMeta: metav1.ObjectMeta{ @@ -855,21 +754,6 @@ func TestServiceDefaults_Validate(t *testing.T) { }, expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.upstreamConfig.defaults.name: Invalid value: "foobar": upstream.name for a default upstream must be ""`, }, - "upstreamConfig.defaults.namespace": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - UpstreamConfig: &Upstreams{ - Defaults: &Upstream{ - Namespace: "foobar", - }, - }, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.upstreamConfig.defaults.namespace: Invalid value: "foobar": upstream.namespace for a default upstream must be ""`, - }, "upstreamConfig.defaults.partition": { input: &ServiceDefaults{ ObjectMeta: metav1.ObjectMeta{ @@ -884,22 +768,7 @@ func TestServiceDefaults_Validate(t *testing.T) { }, }, partitionsEnabled: false, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: [spec.upstreamConfig.defaults.partition: Invalid value: "upstream": upstream.partition for a default upstream must be "", spec.upstreamConfig.defaults.partition: Invalid value: "upstream": Consul Enterprise Admin Partitions must be enabled to set upstream.partition]`, - }, - "upstreamConfig.defaults.peer": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - UpstreamConfig: &Upstreams{ - Defaults: &Upstream{ - Peer: "foobar", - }, - }, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.upstreamConfig.defaults.peer: Invalid value: "foobar": upstream.peer for a default upstream must be ""`, + expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.upstreamConfig.defaults.partition: Invalid value: "upstream": Consul Enterprise Admin Partitions must be enabled to set upstream.partition`, }, "upstreamConfig.overrides.meshGateway": { input: &ServiceDefaults{ @@ -956,44 +825,6 @@ func TestServiceDefaults_Validate(t *testing.T) { }, expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.upstreamConfig.overrides[0].partition: Invalid value: "upstream": Consul Enterprise Admin Partitions must be enabled to set upstream.partition`, }, - "upstreamConfig.overrides.partition and namespace": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - UpstreamConfig: &Upstreams{ - Overrides: []*Upstream{ - { - Name: "service", - Namespace: "namespace", - Peer: "peer", - }, - }, - }, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.upstreamConfig.overrides[0]: Invalid value: v1alpha1.Upstream{Name:"service", Namespace:"namespace", Partition:"", Peer:"peer", EnvoyListenerJSON:"", EnvoyClusterJSON:"", Protocol:"", ConnectTimeoutMs:0, Limits:(*v1alpha1.UpstreamLimits)(nil), PassiveHealthCheck:(*v1alpha1.PassiveHealthCheck)(nil), MeshGateway:v1alpha1.MeshGateway{Mode:""}}: both namespace and peer cannot be specified.`, - }, - "upstreamConfig.overrides.partition and peer": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - UpstreamConfig: &Upstreams{ - Overrides: []*Upstream{ - { - Name: "service", - Partition: "upstream", - Peer: "peer", - }, - }, - }, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: [spec.upstreamConfig.overrides[0]: Invalid value: v1alpha1.Upstream{Name:"service", Namespace:"", Partition:"upstream", Peer:"peer", EnvoyListenerJSON:"", EnvoyClusterJSON:"", Protocol:"", ConnectTimeoutMs:0, Limits:(*v1alpha1.UpstreamLimits)(nil), PassiveHealthCheck:(*v1alpha1.PassiveHealthCheck)(nil), MeshGateway:v1alpha1.MeshGateway{Mode:""}}: both partition and peer cannot be specified., spec.upstreamConfig.overrides[0].partition: Invalid value: "upstream": Consul Enterprise Admin Partitions must be enabled to set upstream.partition]`, - }, "multi-error": { input: &ServiceDefaults{ ObjectMeta: metav1.ObjectMeta{ @@ -1084,111 +915,6 @@ func TestServiceDefaults_Validate(t *testing.T) { }, expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.destination.port: Invalid value: 0x0: invalid port number`, }, - "MaxInboundConnections (invalid value)": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - MaxInboundConnections: -1, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.maxinboundconnections: Invalid value: -1: MaxInboundConnections must be > 0`, - }, - "LocalConnectTimeoutMs (invalid value)": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - LocalConnectTimeoutMs: -1, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.localConnectTimeoutMs: Invalid value: -1: LocalConnectTimeoutMs must be > 0`, - }, - "LocalRequestTimeoutMs (invalid value)": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - LocalRequestTimeoutMs: -1, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.localRequestTimeoutMs: Invalid value: -1: LocalRequestTimeoutMs must be > 0`, - }, - "balanceInboundConnections (invalid value)": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - BalanceInboundConnections: "not_exact_balance", - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.balanceInboundConnections: Invalid value: "not_exact_balance": BalanceInboundConnections must be an empty string or exact_balance`, - }, - "envoyExtension.arguments (single empty)": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"AWSServiceName": "s3", "Region": "us-west-2"}`), - Required: false, - }, - EnvoyExtension{ - Name: "zipkin", - Arguments: nil, - Required: true, - }, - }, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.envoyExtensions.envoyExtension[1].arguments: Required value: arguments must be defined`, - }, - "envoyExtension.arguments (multi empty)": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: nil, - Required: false, - }, - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: nil, - Required: false, - }, - }, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: [spec.envoyExtensions.envoyExtension[0].arguments: Required value: arguments must be defined, spec.envoyExtensions.envoyExtension[1].arguments: Required value: arguments must be defined]`, - }, - "envoyExtension.arguments (invalid json)": { - input: &ServiceDefaults{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-service", - }, - Spec: ServiceDefaultsSpec{ - EnvoyExtensions: EnvoyExtensions{ - EnvoyExtension{ - Name: "aws_request_signing", - Arguments: json.RawMessage(`{"SOME_INVALID_JSON"}`), - Required: false, - }, - }, - }, - }, - expectedErrMsg: `servicedefaults.consul.hashicorp.com "my-service" is invalid: spec.envoyExtensions.envoyExtension[0].arguments: Invalid value: "{\"SOME_INVALID_JSON\"}": must be valid map value: invalid character '}' after object key`, - }, } for name, testCase := range cases { diff --git a/control-plane/api/v1alpha1/servicedefaults_webhook.go b/control-plane/api/v1alpha1/servicedefaults_webhook.go index f79e68bcde..a196a6d941 100644 --- a/control-plane/api/v1alpha1/servicedefaults_webhook.go +++ b/control-plane/api/v1alpha1/servicedefaults_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type ServiceDefaultsWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/serviceintentions_webhook.go b/control-plane/api/v1alpha1/serviceintentions_webhook.go index ddc6488690..0287ddfeb8 100644 --- a/control-plane/api/v1alpha1/serviceintentions_webhook.go +++ b/control-plane/api/v1alpha1/serviceintentions_webhook.go @@ -8,6 +8,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -17,9 +18,10 @@ import ( type ServiceIntentionsWebhook struct { client.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + ConsulClient *capi.Client + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/serviceintentions_webhook_test.go b/control-plane/api/v1alpha1/serviceintentions_webhook_test.go index e6095e8351..17b1881c5a 100644 --- a/control-plane/api/v1alpha1/serviceintentions_webhook_test.go +++ b/control-plane/api/v1alpha1/serviceintentions_webhook_test.go @@ -249,9 +249,10 @@ func TestHandle_ServiceIntentions_Create(t *testing.T) { require.NoError(t, err) validator := &ServiceIntentionsWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, ConsulMeta: common.ConsulMeta{ NamespacesEnabled: true, Mirroring: c.mirror, @@ -438,9 +439,10 @@ func TestHandle_ServiceIntentions_Update(t *testing.T) { require.NoError(t, err) validator := &ServiceIntentionsWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, ConsulMeta: common.ConsulMeta{ NamespacesEnabled: true, Mirroring: c.mirror, @@ -598,9 +600,10 @@ func TestHandle_ServiceIntentions_Patches(t *testing.T) { require.NoError(t, err) validator := &ServiceIntentionsWebhook{ - Client: client, - Logger: logrtest.TestLogger{T: t}, - decoder: decoder, + Client: client, + ConsulClient: nil, + Logger: logrtest.TestLogger{T: t}, + decoder: decoder, ConsulMeta: common.ConsulMeta{ NamespacesEnabled: namespacesEnabled, Mirroring: true, diff --git a/control-plane/api/v1alpha1/serviceresolver_types.go b/control-plane/api/v1alpha1/serviceresolver_types.go index 4fc637b35f..2d5fc1e8c8 100644 --- a/control-plane/api/v1alpha1/serviceresolver_types.go +++ b/control-plane/api/v1alpha1/serviceresolver_types.go @@ -91,9 +91,6 @@ type ServiceResolverRedirect struct { // Datacenter is the datacenter to resolve the service from instead of the // current one. Datacenter string `json:"datacenter,omitempty"` - // Peer is the name of the cluster peer to resolve the service from instead - // of the current one. - Peer string `json:"peer,omitempty"` } type ServiceResolverSubsetMap map[string]ServiceResolverSubset @@ -126,23 +123,6 @@ type ServiceResolverFailover struct { Namespace string `json:"namespace,omitempty"` // Datacenters is a fixed list of datacenters to try during failover. Datacenters []string `json:"datacenters,omitempty"` - // Targets specifies a fixed list of failover targets to try during failover. - Targets []ServiceResolverFailoverTarget `json:"targets,omitempty"` -} - -type ServiceResolverFailoverTarget struct { - // Service specifies the name of the service to try during failover. - Service string `json:"service,omitempty"` - // ServiceSubset specifies the service subset to try during failover. - ServiceSubset string `json:"serviceSubset,omitempty"` - // Partition specifies the partition to try during failover. - Partition string `json:"partition,omitempty"` - // Namespace specifies the namespace to try during failover. - Namespace string `json:"namespace,omitempty"` - // Datacenter specifies the datacenter to try during failover. - Datacenter string `json:"datacenter,omitempty"` - // Peer specifies the name of the cluster peer to try during failover. - Peer string `json:"peer,omitempty"` } type LoadBalancer struct { @@ -367,8 +347,6 @@ func (in *ServiceResolverRedirect) toConsul() *capi.ServiceResolverRedirect { ServiceSubset: in.ServiceSubset, Namespace: in.Namespace, Datacenter: in.Datacenter, - Partition: in.Partition, - Peer: in.Peer, } } @@ -384,28 +362,11 @@ func (in ServiceResolverFailoverMap) toConsul() map[string]capi.ServiceResolverF } func (in ServiceResolverFailover) toConsul() capi.ServiceResolverFailover { - var targets []capi.ServiceResolverFailoverTarget - for _, target := range in.Targets { - targets = append(targets, target.toConsul()) - } - return capi.ServiceResolverFailover{ Service: in.Service, ServiceSubset: in.ServiceSubset, Namespace: in.Namespace, Datacenters: in.Datacenters, - Targets: targets, - } -} - -func (in ServiceResolverFailoverTarget) toConsul() capi.ServiceResolverFailoverTarget { - return capi.ServiceResolverFailoverTarget{ - Service: in.Service, - ServiceSubset: in.ServiceSubset, - Namespace: in.Namespace, - Partition: in.Partition, - Datacenter: in.Datacenter, - Peer: in.Peer, } } @@ -503,16 +464,12 @@ func (in *ServiceResolver) validateEnterprise(consulMeta common.ConsulMeta) fiel return errs } -func (in *ServiceResolverFailover) isEmpty() bool { - return in.Service == "" && in.ServiceSubset == "" && in.Namespace == "" && len(in.Datacenters) == 0 && len(in.Targets) == 0 -} - func (in *ServiceResolverFailover) validate(path *field.Path) *field.Error { - if in.isEmpty() { + if in.Service == "" && in.ServiceSubset == "" && in.Namespace == "" && len(in.Datacenters) == 0 { // NOTE: We're passing "{}" here as our value because we know that the // error is we have an empty object. return field.Invalid(path, "{}", - "service, serviceSubset, namespace, datacenters, and targets cannot all be empty at once") + "service, serviceSubset, namespace and datacenters cannot all be empty at once") } return nil } diff --git a/control-plane/api/v1alpha1/serviceresolver_types_test.go b/control-plane/api/v1alpha1/serviceresolver_types_test.go index fd4fc25a60..44b838cc50 100644 --- a/control-plane/api/v1alpha1/serviceresolver_types_test.go +++ b/control-plane/api/v1alpha1/serviceresolver_types_test.go @@ -59,7 +59,6 @@ func TestServiceResolver_MatchesConsul(t *testing.T) { ServiceSubset: "redirect_subset", Namespace: "redirect_namespace", Datacenter: "redirect_datacenter", - Peer: "redirect_peer", }, Failover: map[string]ServiceResolverFailover{ "failover1": { @@ -74,12 +73,6 @@ func TestServiceResolver_MatchesConsul(t *testing.T) { Namespace: "failover_namespace2", Datacenters: []string{"failover2_dc1", "failover2_dc2"}, }, - "failover3": { - Targets: []ServiceResolverFailoverTarget{ - {Peer: "failover_peer3"}, - {Partition: "failover_partition3", Namespace: "failover_namespace3"}, - }, - }, }, ConnectTimeout: metav1.Duration{Duration: 1 * time.Second}, LoadBalancer: &LoadBalancer{ @@ -126,7 +119,6 @@ func TestServiceResolver_MatchesConsul(t *testing.T) { ServiceSubset: "redirect_subset", Namespace: "redirect_namespace", Datacenter: "redirect_datacenter", - Peer: "redirect_peer", }, Failover: map[string]capi.ServiceResolverFailover{ "failover1": { @@ -141,12 +133,6 @@ func TestServiceResolver_MatchesConsul(t *testing.T) { Namespace: "failover_namespace2", Datacenters: []string{"failover2_dc1", "failover2_dc2"}, }, - "failover3": { - Targets: []capi.ServiceResolverFailoverTarget{ - {Peer: "failover_peer3"}, - {Partition: "failover_partition3", Namespace: "failover_namespace3"}, - }, - }, }, ConnectTimeout: 1 * time.Second, LoadBalancer: &capi.LoadBalancer{ @@ -242,7 +228,6 @@ func TestServiceResolver_ToConsul(t *testing.T) { ServiceSubset: "redirect_subset", Namespace: "redirect_namespace", Datacenter: "redirect_datacenter", - Partition: "redirect_partition", }, Failover: map[string]ServiceResolverFailover{ "failover1": { @@ -257,12 +242,6 @@ func TestServiceResolver_ToConsul(t *testing.T) { Namespace: "failover_namespace2", Datacenters: []string{"failover2_dc1", "failover2_dc2"}, }, - "failover3": { - Targets: []ServiceResolverFailoverTarget{ - {Peer: "failover_peer3"}, - {Partition: "failover_partition3", Namespace: "failover_namespace3"}, - }, - }, }, ConnectTimeout: metav1.Duration{Duration: 1 * time.Second}, LoadBalancer: &LoadBalancer{ @@ -309,7 +288,6 @@ func TestServiceResolver_ToConsul(t *testing.T) { ServiceSubset: "redirect_subset", Namespace: "redirect_namespace", Datacenter: "redirect_datacenter", - Partition: "redirect_partition", }, Failover: map[string]capi.ServiceResolverFailover{ "failover1": { @@ -324,12 +302,6 @@ func TestServiceResolver_ToConsul(t *testing.T) { Namespace: "failover_namespace2", Datacenters: []string{"failover2_dc1", "failover2_dc2"}, }, - "failover3": { - Targets: []capi.ServiceResolverFailoverTarget{ - {Peer: "failover_peer3"}, - {Partition: "failover_partition3", Namespace: "failover_namespace3"}, - }, - }, }, ConnectTimeout: 1 * time.Second, LoadBalancer: &capi.LoadBalancer{ @@ -595,8 +567,8 @@ func TestServiceResolver_Validate(t *testing.T) { }, namespacesEnabled: false, expectedErrMsgs: []string{ - "spec.failover[failA]: Invalid value: \"{}\": service, serviceSubset, namespace, datacenters, and targets cannot all be empty at once", - "spec.failover[failB]: Invalid value: \"{}\": service, serviceSubset, namespace, datacenters, and targets cannot all be empty at once", + "spec.failover[failA]: Invalid value: \"{}\": service, serviceSubset, namespace and datacenters cannot all be empty at once", + "spec.failover[failB]: Invalid value: \"{}\": service, serviceSubset, namespace and datacenters cannot all be empty at once", }, }, "hashPolicy.field invalid": { diff --git a/control-plane/api/v1alpha1/serviceresolver_webhook.go b/control-plane/api/v1alpha1/serviceresolver_webhook.go index ca5f9d9482..1af2fa0383 100644 --- a/control-plane/api/v1alpha1/serviceresolver_webhook.go +++ b/control-plane/api/v1alpha1/serviceresolver_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type ServiceResolverWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/servicerouter_webhook.go b/control-plane/api/v1alpha1/servicerouter_webhook.go index f6837fcf7b..03644432e6 100644 --- a/control-plane/api/v1alpha1/servicerouter_webhook.go +++ b/control-plane/api/v1alpha1/servicerouter_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type ServiceRouterWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/servicesplitter_webhook.go b/control-plane/api/v1alpha1/servicesplitter_webhook.go index c0020c88b8..f90c49f45a 100644 --- a/control-plane/api/v1alpha1/servicesplitter_webhook.go +++ b/control-plane/api/v1alpha1/servicesplitter_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type ServiceSplitterWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/shared_types.go b/control-plane/api/v1alpha1/shared_types.go index edcaba5a46..9b884cf476 100644 --- a/control-plane/api/v1alpha1/shared_types.go +++ b/control-plane/api/v1alpha1/shared_types.go @@ -1,7 +1,6 @@ package v1alpha1 import ( - "encoding/json" "fmt" "strings" @@ -80,19 +79,6 @@ type HTTPHeaderModifiers struct { Remove []string `json:"remove,omitempty"` } -// EnvoyExtension has configuration for an extension that patches Envoy resources. -type EnvoyExtension struct { - Name string `json:"name,omitempty"` - Required bool `json:"required,omitempty"` - // +kubebuilder:validation:Type=object - // +kubebuilder:validation:Schemaless - // +kubebuilder:pruning:PreserveUnknownFields - Arguments json.RawMessage `json:"arguments,omitempty"` -} - -// EnvoyExtensions represents a list of the EnvoyExtension configuration. -type EnvoyExtensions []EnvoyExtension - func (in MeshGateway) toConsul() capi.MeshGatewayConfig { mode := capi.MeshGatewayMode(in.Mode) switch mode { @@ -190,58 +176,6 @@ func (in *HTTPHeaderModifiers) toConsul() *capi.HTTPHeaderModifiers { } } -func (in EnvoyExtensions) toConsul() []capi.EnvoyExtension { - if in == nil { - return nil - } - - outConfig := make([]capi.EnvoyExtension, 0) - - for _, e := range in { - consulExtension := capi.EnvoyExtension{ - Name: e.Name, - Required: e.Required, - } - - // We already validate that arguments is present - var args map[string]interface{} - _ = json.Unmarshal(e.Arguments, &args) - consulExtension.Arguments = args - outConfig = append(outConfig, consulExtension) - } - - return outConfig -} - -func (in EnvoyExtensions) validate(path *field.Path) field.ErrorList { - if len(in) == 0 { - return nil - } - - var errs field.ErrorList - for i, e := range in { - if err := e.validate(path.Child("envoyExtension").Index(i)); err != nil { - errs = append(errs, err) - } - } - - return errs -} - -func (in EnvoyExtension) validate(path *field.Path) *field.Error { - // Validate that the arguments are not nil - if in.Arguments == nil { - err := field.Required(path.Child("arguments"), "arguments must be defined") - return err - } - // Validate that the arguments are valid json - var outConfig map[string]interface{} - if err := json.Unmarshal(in.Arguments, &outConfig); err != nil { - return field.Invalid(path.Child("arguments"), string(in.Arguments), fmt.Sprintf(`must be valid map value: %s`, err)) - } - return nil -} - func notInSliceMessage(slice []string) string { return fmt.Sprintf(`must be one of "%s"`, strings.Join(slice, `", "`)) } diff --git a/control-plane/api/v1alpha1/terminatinggateway_webhook.go b/control-plane/api/v1alpha1/terminatinggateway_webhook.go index b0427b87ca..2d3367fcaa 100644 --- a/control-plane/api/v1alpha1/terminatinggateway_webhook.go +++ b/control-plane/api/v1alpha1/terminatinggateway_webhook.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -13,7 +14,8 @@ import ( // +kubebuilder:object:generate=false type TerminatingGatewayWebhook struct { - Logger logr.Logger + ConsulClient *capi.Client + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/zz_generated.deepcopy.go b/control-plane/api/v1alpha1/zz_generated.deepcopy.go index d12db29d14..0ad0cbf254 100644 --- a/control-plane/api/v1alpha1/zz_generated.deepcopy.go +++ b/control-plane/api/v1alpha1/zz_generated.deepcopy.go @@ -10,21 +10,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AccessLogs) DeepCopyInto(out *AccessLogs) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogs. -func (in *AccessLogs) DeepCopy() *AccessLogs { - if in == nil { - return nil - } - out := new(AccessLogs) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Condition) DeepCopyInto(out *Condition) { *out = *in @@ -78,47 +63,6 @@ func (in *CookieConfig) DeepCopy() *CookieConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvoyExtension) DeepCopyInto(out *EnvoyExtension) { - *out = *in - if in.Arguments != nil { - in, out := &in.Arguments, &out.Arguments - *out = make(json.RawMessage, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyExtension. -func (in *EnvoyExtension) DeepCopy() *EnvoyExtension { - if in == nil { - return nil - } - out := new(EnvoyExtension) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in EnvoyExtensions) DeepCopyInto(out *EnvoyExtensions) { - { - in := &in - *out = make(EnvoyExtensions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyExtensions. -func (in EnvoyExtensions) DeepCopy() EnvoyExtensions { - if in == nil { - return nil - } - out := new(EnvoyExtensions) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExportedService) DeepCopyInto(out *ExportedService) { *out = *in @@ -853,11 +797,6 @@ func (in *MeshSpec) DeepCopyInto(out *MeshSpec) { *out = new(MeshHTTPConfig) **out = **in } - if in.Peering != nil { - in, out := &in.Peering, &out.Peering - *out = new(PeeringMeshConfig) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshSpec. @@ -1166,21 +1105,6 @@ func (in *PeeringDialerStatus) DeepCopy() *PeeringDialerStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PeeringMeshConfig) DeepCopyInto(out *PeeringMeshConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeeringMeshConfig. -func (in *PeeringMeshConfig) DeepCopy() *PeeringMeshConfig { - if in == nil { - return nil - } - out := new(PeeringMeshConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyDefaults) DeepCopyInto(out *ProxyDefaults) { *out = *in @@ -1260,18 +1184,6 @@ func (in *ProxyDefaultsSpec) DeepCopyInto(out *ProxyDefaultsSpec) { } out.MeshGateway = in.MeshGateway in.Expose.DeepCopyInto(&out.Expose) - if in.AccessLogs != nil { - in, out := &in.AccessLogs, &out.AccessLogs - *out = new(AccessLogs) - **out = **in - } - if in.EnvoyExtensions != nil { - in, out := &in.EnvoyExtensions, &out.EnvoyExtensions - *out = make(EnvoyExtensions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyDefaultsSpec. @@ -1449,13 +1361,6 @@ func (in *ServiceDefaultsSpec) DeepCopyInto(out *ServiceDefaultsSpec) { *out = new(ServiceDefaultsDestination) (*in).DeepCopyInto(*out) } - if in.EnvoyExtensions != nil { - in, out := &in.EnvoyExtensions, &out.EnvoyExtensions - *out = make(EnvoyExtensions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDefaultsSpec. @@ -1589,11 +1494,6 @@ func (in *ServiceResolverFailover) DeepCopyInto(out *ServiceResolverFailover) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.Targets != nil { - in, out := &in.Targets, &out.Targets - *out = make([]ServiceResolverFailoverTarget, len(*in)) - copy(*out, *in) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceResolverFailover. @@ -1627,21 +1527,6 @@ func (in ServiceResolverFailoverMap) DeepCopy() ServiceResolverFailoverMap { return *out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceResolverFailoverTarget) DeepCopyInto(out *ServiceResolverFailoverTarget) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceResolverFailoverTarget. -func (in *ServiceResolverFailoverTarget) DeepCopy() *ServiceResolverFailoverTarget { - if in == nil { - return nil - } - out := new(ServiceResolverFailoverTarget) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceResolverList) DeepCopyInto(out *ServiceResolverList) { *out = *in diff --git a/control-plane/build-support/functions/10-util.sh b/control-plane/build-support/functions/10-util.sh index b807d35397..90cfd9660e 100644 --- a/control-plane/build-support/functions/10-util.sh +++ b/control-plane/build-support/functions/10-util.sh @@ -643,7 +643,6 @@ function update_version_helm { # $1 - Path to the directory where the root of the Helm chart is # $2 - Version string # $3 - PreRelease version (if unset will become an empty string) - # $4 - Image base path # # Returns: # 0 - success @@ -670,10 +669,11 @@ function update_version_helm { then full_version="$2-$3" fi + local image_k8s="hashicorp\/consul-k8s-control-plane:$full_version" - sed_i ${SED_EXT} -e "s/(imageK8S:.*\/consul-k8s-control-plane:)[^\"]*/imageK8S: $4${full_version}/g" "${vfile}" + sed_i ${SED_EXT} -e "s/(imageK8S:[[:space:]]*hashicorp\/consul-k8s-control-plane:)[^\"]*/\1${full_version}/g" "${vfile}" sed_i ${SED_EXT} -e "s/(version:[[:space:]]*)[^\"]*/\1${full_version}/g" "${cfile}" - sed_i ${SED_EXT} -e "s/(image:.*\/consul-k8s-control-plane:)[^\"]*/image: $4${full_version}/g" "${cfile}" + sed_i ${SED_EXT} -e "s/(image:[[:space:]]*hashicorp\/consul-k8s-control-plane:)[^\"]*/\1${full_version}/g" "${cfile}" if test -z "$3" then @@ -778,7 +778,6 @@ function set_version { # $2 - The version of the release # $3 - The release date # $4 - The pre-release version - # $5 - The helm docker image base path # # # Returns: @@ -815,7 +814,7 @@ function set_version { fi status_stage "==> Updating Helm chart versions with version info: ${vers} "$4"" - if ! update_version_helm "${sdir}/charts/consul" "${vers}" "$4" "$5" + if ! update_version_helm "${sdir}/charts/consul" "${vers}" "$4" then unset_changelog_version "${sdir}" return 1 @@ -864,7 +863,7 @@ function prepare_release { # 0 - success # * - error echo "release version: " $1 $2 $3 $4 - set_version "$1" "$2" "$3" "$4" "hashicorp\/consul-k8s-control-plane:" + set_version "$1" "$2" "$3" "$4" set_changelog "$1" "$2" "$3" "$4" } @@ -885,7 +884,7 @@ function prepare_dev { local sdir="$1" set_changelog "$1" "$2" "$3" "$5" - set_version "$1" "$4" "$3" "dev" "docker.mirror.hashicorp.services\/hashicorppreview\/consul-k8s-control-plane:" + set_version "$1" "$4" "$3" "dev" status_stage "==> Adding new UNRELEASED label in CHANGELOG.md" add_unreleased_to_changelog "${sdir}" || return 1 diff --git a/control-plane/build-support/scripts/terraformfmtcheck.sh b/control-plane/build-support/scripts/terraformfmtcheck.sh deleted file mode 100755 index 0f962a1c1b..0000000000 --- a/control-plane/build-support/scripts/terraformfmtcheck.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -# Check terraform fmt -echo "==> Checking that code complies with terraform fmt requirements..." -tffmt_files=$(terraform fmt -check -recursive "$1") -if [[ -n ${tffmt_files} ]]; then - echo 'terraform fmt needs to be run on the following files:' - echo "${tffmt_files}" - echo "You can use the command: \`make terraform-fmt\` to reformat all terraform code." - exit 1 -fi - -echo "==> Check code compile completed successfully" -exit 0 \ No newline at end of file diff --git a/control-plane/catalog/to-consul/consul_node_services_client.go b/control-plane/catalog/to-consul/consul_node_services_client.go new file mode 100644 index 0000000000..1e3d01003f --- /dev/null +++ b/control-plane/catalog/to-consul/consul_node_services_client.go @@ -0,0 +1,114 @@ +package catalog + +import ( + "fmt" + + "github.com/hashicorp/consul/api" +) + +// ConsulService is service registered in Consul. +type ConsulService struct { + // Namespace is the Consul namespace the service is registered in. + // If namespaces are disabled this will always be the empty string even + // though the namespace is technically "default". + Namespace string + // Name is the name of the service in Consul. + Name string +} + +// ConsulNodeServicesClient is used to query for node services. +type ConsulNodeServicesClient interface { + // NodeServices returns consul services with the corresponding tag + // registered to the Consul node with nodeName. opts is used as the + // query options in the API call to consul. It returns the list of services + // (not service instances) and the query meta from the API call. + NodeServices(tag string, nodeName string, opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) +} + +// PreNamespacesNodeServicesClient implements ConsulNodeServicesClient +// for Consul < 1.7 which does not support namespaces. +type PreNamespacesNodeServicesClient struct { + Client *api.Client +} + +// NodeServices returns Consul services tagged with +// tag registered on nodeName using a Consul API that is supported in +// Consul versions before 1.7. Consul versions after 1.7 still support +// this API but the API is not namespace-aware. +func (s *PreNamespacesNodeServicesClient) NodeServices( + tag string, + nodeName string, + opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) { + // NOTE: We're not using tag filtering here so we can support Consul + // < 1.5. + node, meta, err := s.Client.Catalog().Node(nodeName, &opts) + if err != nil { + return nil, nil, err + } + if node == nil { + return nil, meta, nil + } + + var svcs []ConsulService + // seenServices is used to ensure the svcs list is unique. + seenServices := make(map[string]bool) + for _, svcInstance := range node.Services { + svcName := svcInstance.Service + if _, ok := seenServices[svcName]; ok { + continue + } + for _, svcTag := range svcInstance.Tags { + if svcTag == tag { + if _, ok := seenServices[svcName]; !ok { + svcs = append(svcs, ConsulService{ + // If namespaces are not enabled we use empty + // string. + Namespace: "", + Name: svcName, + }) + seenServices[svcName] = true + } + break + } + } + } + return svcs, meta, nil +} + +// NamespacesNodeServicesClient implements ConsulNodeServicesClient +// for Consul >= 1.7 which supports namespaces. +type NamespacesNodeServicesClient struct { + Client *api.Client +} + +// NodeServices returns Consul services tagged with +// tag registered on nodeName using a Consul API that is supported in +// Consul versions >= 1.7. If opts.Namespace is set to +// "*", services from all namespaces will be returned. +func (s *NamespacesNodeServicesClient) NodeServices( + tag string, + nodeName string, + opts api.QueryOptions) ([]ConsulService, *api.QueryMeta, error) { + opts.Filter = fmt.Sprintf("\"%s\" in Tags", tag) + nodeCatalog, meta, err := s.Client.Catalog().NodeServiceList(nodeName, &opts) + if err != nil { + return nil, nil, err + } + + var svcs []ConsulService + // seenServices is used to ensure the svcs list is unique. Its keys are + // /. + seenSvcs := make(map[string]bool) + for _, svcInstance := range nodeCatalog.Services { + svcName := svcInstance.Service + key := fmt.Sprintf("%s/%s", svcInstance.Namespace, svcName) + if _, ok := seenSvcs[key]; !ok { + svcs = append(svcs, ConsulService{ + Namespace: svcInstance.Namespace, + Name: svcName, + }) + seenSvcs[key] = true + } + } + return svcs, meta, nil +} diff --git a/control-plane/catalog/to-consul/consul_node_services_client_ent_test.go b/control-plane/catalog/to-consul/consul_node_services_client_ent_test.go new file mode 100644 index 0000000000..ac570948f5 --- /dev/null +++ b/control-plane/catalog/to-consul/consul_node_services_client_ent_test.go @@ -0,0 +1,362 @@ +//go:build enterprise + +package catalog + +import ( + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" +) + +// Test the Consul 1.7 client against Consul Enterprise. +func TestNamespacesNodeServicesClient_NodeServices(t *testing.T) { + t.Parallel() + cases := map[string]struct { + ConsulServices []api.CatalogRegistration + Exp []ConsulService + }{ + "no services": { + ConsulServices: nil, + Exp: nil, + }, + "no services on k8s node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + }, + }, + }, + Exp: nil, + }, + "service with k8s tag on different node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without any tags": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: nil, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"not-k8s", "foo"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node with k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + }, + }, + "multiple services": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc1-id", + Service: "svc1", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc1", + }, + { + Namespace: "default", + Name: "svc2", + }, + }, + }, + "multiple service instances": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + }, + }, + "services across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-ns-id", + Service: "svc-ns", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc-ns", + }, + }, + }, + "services with same name across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc", + }, + }, + }, + "multiple services across multiple namespaces": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id1", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id1", + Service: "svc2", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + Namespace: "ns", + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "default", + Name: "svc", + }, + { + Namespace: "default", + Name: "svc2", + }, + { + Namespace: "ns", + Name: "svc", + }, + { + Namespace: "ns", + Name: "svc2", + }, + }, + }, + } + + for name, c := range cases { + if name != "multiple services across multiple namespaces" { + continue + } + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + svr, err := testutil.NewTestServerConfigT(tt, nil) + require.NoError(err) + defer svr.Stop() + + consulClient, err := api.NewClient(&api.Config{ + Address: svr.HTTPAddr, + }) + require.NoError(err) + for _, registration := range c.ConsulServices { + if registration.Service.Namespace != "" && registration.Service.Namespace != "default" { + _, _, err = consulClient.Namespaces().Create(&api.Namespace{ + Name: registration.Service.Namespace, + }, nil) + require.NoError(err) + } + _, err = consulClient.Catalog().Register(®istration, nil) + require.NoError(err) + } + + client := NamespacesNodeServicesClient{ + Client: consulClient, + } + svcs, _, err := client.NodeServices("k8s", ConsulSyncNodeName, api.QueryOptions{ + Namespace: "*", + }) + require.NoError(err) + require.Len(svcs, len(c.Exp)) + for _, expSvc := range c.Exp { + require.Contains(svcs, expSvc) + } + }) + } +} diff --git a/control-plane/catalog/to-consul/consul_node_services_client_test.go b/control-plane/catalog/to-consul/consul_node_services_client_test.go new file mode 100644 index 0000000000..83354e640a --- /dev/null +++ b/control-plane/catalog/to-consul/consul_node_services_client_test.go @@ -0,0 +1,184 @@ +package catalog + +import ( + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" +) + +func TestPreNamespacesNodeServicesClient_NodeServices(t *testing.T) { + t.Parallel() + cases := map[string]struct { + ConsulServices []api.CatalogRegistration + Exp []ConsulService + }{ + "no services": { + ConsulServices: nil, + Exp: nil, + }, + "no services on k8s node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + }, + }, + }, + Exp: nil, + }, + "service with k8s tag on different node": { + ConsulServices: []api.CatalogRegistration{ + { + Node: "not-k8s", + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without any tags": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: nil, + }, + }, + }, + Exp: nil, + }, + "service on k8s node without k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"not-k8s", "foo"}, + }, + }, + }, + Exp: nil, + }, + "service on k8s node with k8s tag": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc", + }, + }, + }, + "multiple services": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc1-id", + Service: "svc1", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc2-id2", + Service: "svc2", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc1", + }, + { + Namespace: "", + Name: "svc2", + }, + }, + }, + "multiple service instances": { + ConsulServices: []api.CatalogRegistration{ + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id1", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + { + Node: ConsulSyncNodeName, + Address: "127.0.0.1", + Service: &api.AgentService{ + ID: "svc-id2", + Service: "svc", + Tags: []string{"k8s"}, + }, + }, + }, + Exp: []ConsulService{ + { + Namespace: "", + Name: "svc", + }, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + svr, err := testutil.NewTestServerConfigT(tt, nil) + require.NoError(err) + defer svr.Stop() + + consulClient, err := api.NewClient(&api.Config{ + Address: svr.HTTPAddr, + }) + require.NoError(err) + for _, registration := range c.ConsulServices { + _, err = consulClient.Catalog().Register(®istration, nil) + require.NoError(err) + } + + client := PreNamespacesNodeServicesClient{ + Client: consulClient, + } + svcs, _, err := client.NodeServices("k8s", ConsulSyncNodeName, api.QueryOptions{}) + require.NoError(err) + require.Len(svcs, len(c.Exp)) + for _, expSvc := range c.Exp { + require.Contains(svcs, expSvc) + } + }) + } +} diff --git a/control-plane/catalog/to-consul/resource.go b/control-plane/catalog/to-consul/resource.go index 09d8aa6c5d..a8ba6d20bd 100644 --- a/control-plane/catalog/to-consul/resource.go +++ b/control-plane/catalog/to-consul/resource.go @@ -499,7 +499,7 @@ func (t *ServiceResource) generateRegistrations(key string) { continue } - if _, ok = seen[addr]; ok { + if _, ok := seen[addr]; ok { continue } seen[addr] = struct{}{} diff --git a/control-plane/catalog/to-consul/syncer.go b/control-plane/catalog/to-consul/syncer.go index 19e0aaca6f..2e2edad61a 100644 --- a/control-plane/catalog/to-consul/syncer.go +++ b/control-plane/catalog/to-consul/syncer.go @@ -2,13 +2,11 @@ package catalog import ( "context" - "fmt" "sync" "time" "github.com/cenkalti/backoff" mapset "github.com/deckarep/golang-set" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" @@ -38,12 +36,8 @@ type Syncer interface { // services and ensures the local set of registrations represents the // source of truth, overwriting any external changes to the services. type ConsulSyncer struct { - // ConsulClientConfig is the config for the Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager - - Log hclog.Logger + Client *api.Client + Log hclog.Logger // EnableNamespaces indicates that a user is running Consul Enterprise // with version 1.7+ which is namespace aware. It enables Consul namespaces, @@ -75,6 +69,10 @@ type ConsulSyncer struct { // The Consul node name to register services with. ConsulNodeName string + // ConsulNodeServicesClient is used to list services for a node. We use a + // separate client for this API call that handles older version of Consul. + ConsulNodeServicesClient ConsulNodeServicesClient + lock sync.Mutex once sync.Once @@ -174,7 +172,6 @@ func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { AllowStale: true, WaitIndex: 1, WaitTime: 1 * time.Minute, - Filter: fmt.Sprintf("\"%s\" in Tags", s.ConsulK8STag), } if s.EnableNamespaces { @@ -186,17 +183,11 @@ func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { minWait := s.SyncPeriod / 4 minWaitCh := time.After(0) for { - // Create a new consul client. - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client", "err", err) - return - } - - var services *api.CatalogNodeServiceList + var services []ConsulService var meta *api.QueryMeta - err = backoff.Retry(func() error { - services, meta, err = consulClient.Catalog().NodeServiceList(s.ConsulNodeName, opts) + err := backoff.Retry(func() error { + var err error + services, meta, err = s.ConsulNodeServicesClient.NodeServices(s.ConsulK8STag, s.ConsulNodeName, *opts) return err }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) @@ -226,30 +217,25 @@ func (s *ConsulSyncer) watchReapableServices(ctx context.Context) { s.lock.Lock() // Go through the service array and find services that should be reaped - for _, service := range services.Services { + for _, service := range services { // Check that the namespace exists in the valid service names map // before checking whether it contains the service - svcNs := service.Namespace - if !s.EnableNamespaces { - // Set namespace to empty when namespaces are not enabled. - svcNs = "" - } - if _, ok := s.serviceNames[svcNs]; ok { + if _, ok := s.serviceNames[service.Namespace]; ok { // We only care if we don't know about this service at all. - if s.serviceNames[svcNs].Contains(service.Service) { + if s.serviceNames[service.Namespace].Contains(service.Name) { s.Log.Debug("[watchReapableServices] serviceNames contains service", - "namespace", svcNs, - "service-name", service.Service) + "namespace", service.Namespace, + "service-name", service.Name) continue } } s.Log.Info("invalid service found, scheduling for delete", - "service-name", service.Service, "service-id", service.ID, "service-consul-namespace", svcNs) - if err = s.scheduleReapServiceLocked(service.Service, svcNs); err != nil { + "service-name", service.Name, "service-consul-namespace", service.Namespace) + if err := s.scheduleReapServiceLocked(service.Name, service.Namespace); err != nil { s.Log.Info("error querying service for delete", - "service-name", service.Service, - "service-consul-namespace", svcNs, + "service-name", service.Name, + "service-consul-namespace", service.Namespace, "err", err) } } @@ -283,16 +269,11 @@ func (s *ConsulSyncer) watchService(ctx context.Context, name, namespace string) queryOpts.Namespace = namespace } - // Create a new consul client. - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client; will retry", "err", err) - continue - } // Wait for service changes var services []*api.CatalogService - err = backoff.Retry(func() error { - services, _, err = consulClient.Catalog().Service(name, s.ConsulK8STag, queryOpts) + err := backoff.Retry(func() error { + var err error + services, _, err = s.Client.Catalog().Service(name, s.ConsulK8STag, queryOpts) return err }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) if err != nil { @@ -344,15 +325,8 @@ func (s *ConsulSyncer) scheduleReapServiceLocked(name, namespace string) error { opts.Namespace = namespace } - // Create a new consul client. - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client", "err", err) - return err - } - // Only consider services that are tagged from k8s - services, _, err := consulClient.Catalog().Service(name, s.ConsulK8STag, &opts) + services, _, err := s.Client.Catalog().Service(name, s.ConsulK8STag, &opts) if err != nil { return err } @@ -383,13 +357,6 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { s.lock.Lock() defer s.lock.Unlock() - // Create a new consul client. - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client", "err", err) - return - } - s.Log.Info("registering services") // Update the service watchers @@ -424,13 +391,13 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { } } - // Do all deregistrations first. + // Do all deregistrations first for _, r := range s.deregs { s.Log.Info("deregistering service", "node-name", r.Node, "service-id", r.ServiceID, "service-consul-namespace", r.Namespace) - _, err = consulClient.Catalog().Deregister(r, nil) + _, err := s.Client.Catalog().Deregister(r, nil) if err != nil { s.Log.Warn("error deregistering service", "node-name", r.Node, @@ -448,7 +415,7 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { for _, services := range s.namespaces { for _, r := range services { if s.EnableNamespaces { - _, err = namespaces.EnsureExists(consulClient, r.Service.Namespace, s.CrossNamespaceACLPolicy) + _, err := namespaces.EnsureExists(s.Client, r.Service.Namespace, s.CrossNamespaceACLPolicy) if err != nil { s.Log.Warn("error checking and creating Consul namespace", "node-name", r.Node, @@ -459,8 +426,8 @@ func (s *ConsulSyncer) syncFull(ctx context.Context) { } } - // Register the service. - _, err = consulClient.Catalog().Register(r, nil) + // Register the service + _, err := s.Client.Catalog().Register(r, nil) if err != nil { s.Log.Warn("error registering service", "node-name", r.Node, diff --git a/control-plane/catalog/to-consul/syncer_ent_test.go b/control-plane/catalog/to-consul/syncer_ent_test.go index fbe2cbd494..2cc206f908 100644 --- a/control-plane/catalog/to-consul/syncer_ent_test.go +++ b/control-plane/catalog/to-consul/syncer_ent_test.go @@ -5,8 +5,8 @@ package catalog import ( "testing" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" ) @@ -15,11 +15,20 @@ import ( func TestConsulSyncer_ConsulNamespaces(t *testing.T) { t.Parallel() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) - s, closer := testConsulSyncerWithConfig(testClient, func(s *ConsulSyncer) { + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } }) defer closer() @@ -57,11 +66,20 @@ func TestConsulSyncer_ConsulNamespaces(t *testing.T) { func TestConsulSyncer_ReapConsulNamespace(t *testing.T) { t.Parallel() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() - s, closer := testConsulSyncerWithConfig(testClient, func(s *ConsulSyncer) { + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } }) defer closer() @@ -117,11 +135,18 @@ func TestConsulSyncer_ReapConsulNamespace(t *testing.T) { func TestConsulSyncer_reapServiceInstanceNamespacesEnabled(t *testing.T) { t.Parallel() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() - s, closer := testConsulSyncerWithConfig(testClient, func(s *ConsulSyncer) { + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { s.EnableNamespaces = true + s.ConsulNodeServicesClient = &NamespacesNodeServicesClient{ + Client: client, + } }) defer closer() @@ -132,7 +157,7 @@ func TestConsulSyncer_reapServiceInstanceNamespacesEnabled(t *testing.T) { }) // Create an invalid instance service directly in Consul. - _, _, err := client.Namespaces().Create(&api.Namespace{ + _, _, err = client.Namespaces().Create(&api.Namespace{ Name: "foo", }, nil) require.NoError(t, err) diff --git a/control-plane/catalog/to-consul/syncer_test.go b/control-plane/catalog/to-consul/syncer_test.go index d8d9b0f402..f42f6fee46 100644 --- a/control-plane/catalog/to-consul/syncer_test.go +++ b/control-plane/catalog/to-consul/syncer_test.go @@ -5,14 +5,11 @@ import ( "fmt" "net/http" "net/http/httptest" - "net/url" - "strconv" "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" @@ -26,13 +23,19 @@ const ( func TestConsulSyncer_register(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client, syncer - // Create test consulServer server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() - s, closer := testConsulSyncer(testClient) + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + s, closer := testConsulSyncer(client) defer closer() // Sync @@ -54,9 +57,9 @@ func TestConsulSyncer_register(t *testing.T) { }) // Verify the settings - require.Equal(t, "k8s-sync", service.Node) - require.Equal(t, "bar", service.ServiceName) - require.Equal(t, "127.0.0.1", service.Address) + require.Equal("k8s-sync", service.Node) + require.Equal("bar", service.ServiceName) + require.Equal("127.0.0.1", service.Address) } // Test that the syncer reaps individual invalid service instances. @@ -66,11 +69,19 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { for _, node := range []string{ConsulSyncNodeName, "test-node"} { name := fmt.Sprintf("consul node name: %s", node) t.Run(name, func(t *testing.T) { + require := require.New(t) + // Set up server, client, syncer - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() - s, closer := testConsulSyncer(testClient) + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + s, closer := testConsulSyncer(client) defer closer() // Sync @@ -80,7 +91,7 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { // Wait for the first service retry.Run(t, func(r *retry.R) { - services, _, err := client.Catalog().Service("bar", s.ConsulK8STag, nil) + services, _, err := client.Catalog().Service("bar", "", nil) if err != nil { r.Fatalf("err: %s", err) } @@ -92,14 +103,13 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { // Create an invalid service directly in Consul svc := testRegistration(node, "bar", "default") svc.Service.ID = serviceID(node, "bar2") - fmt.Println("invalid service id", svc.Service.ID) - _, err := client.Catalog().Register(svc, nil) - require.NoError(t, err) + _, err = client.Catalog().Register(svc, nil) + require.NoError(err) // Valid service should exist var service *api.CatalogService retry.Run(t, func(r *retry.R) { - services, _, err := client.Catalog().Service("bar", s.ConsulK8STag, nil) + services, _, err := client.Catalog().Service("bar", "", nil) if err != nil { r.Fatalf("err: %s", err) } @@ -110,10 +120,10 @@ func TestConsulSyncer_reapServiceInstance(t *testing.T) { }) // Verify the settings - require.Equal(t, serviceID(node, "bar"), service.ServiceID) - require.Equal(t, node, service.Node) - require.Equal(t, "bar", service.ServiceName) - require.Equal(t, "127.0.0.1", service.Address) + require.Equal(serviceID(node, "bar"), service.ServiceID) + require.Equal(node, service.Node) + require.Equal("bar", service.ServiceName) + require.Equal("127.0.0.1", service.Address) }) } } @@ -126,10 +136,17 @@ func TestConsulSyncer_reapService(t *testing.T) { sourceK8sNamespaceAnnotations := []string{"", "other", "default"} for _, k8sNS := range sourceK8sNamespaceAnnotations { t.Run(k8sNS, func(tt *testing.T) { - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + // Set up server, client, syncer + a, err := testutil.NewTestServerConfigT(tt, nil) + require.NoError(tt, err) + defer a.Stop() - s, closer := testConsulSyncer(testClient) + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(tt, err) + + s, closer := testConsulSyncer(client) defer closer() // Run the sync with a test service @@ -141,7 +158,7 @@ func TestConsulSyncer_reapService(t *testing.T) { // expect it to be deleted. svc := testRegistration(ConsulSyncNodeName, "baz", "default") svc.Service.Meta[ConsulK8SNS] = k8sNS - _, err := client.Catalog().Register(svc, nil) + _, err = client.Catalog().Register(svc, nil) require.NoError(tt, err) retry.Run(tt, func(r *retry.R) { @@ -168,9 +185,14 @@ func TestConsulSyncer_reapService(t *testing.T) { func TestConsulSyncer_noReapingUntilInitialSync(t *testing.T) { t.Parallel() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient - s, closer := testConsulSyncerWithConfig(testClient, func(s *ConsulSyncer) { + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + s, closer := testConsulSyncerWithConfig(client, func(s *ConsulSyncer) { // Set the sync period to 5ms so we know it will have run at least once // after we wait 100ms. s.SyncPeriod = 5 * time.Millisecond @@ -181,7 +203,7 @@ func TestConsulSyncer_noReapingUntilInitialSync(t *testing.T) { // synthetic sync node and has the sync-associated tag, we expect // it to be deleted but not until the initial sync is performed. svc := testRegistration(ConsulSyncNodeName, "baz", "default") - _, err := client.Catalog().Register(svc, nil) + _, err = client.Catalog().Register(svc, nil) require.NoError(t, err) // We wait until the syncer has had the time to delete the service. @@ -198,7 +220,7 @@ func TestConsulSyncer_noReapingUntilInitialSync(t *testing.T) { s.Sync(nil) // The service should get deleted. retry.Run(t, func(r *retry.R) { - bazInstances, _, err = client.Catalog().Service("baz", "", nil) + bazInstances, _, err := client.Catalog().Service("baz", "", nil) require.NoError(r, err) require.Len(r, bazInstances, 0) }) @@ -222,19 +244,12 @@ func TestConsulSyncer_stopsGracefully(t *testing.T) { })) defer consulServer.Close() - parsedURL, err := url.Parse(consulServer.URL) - require.NoError(t, err) - - port, err := strconv.Atoi(parsedURL.Port()) - require.NoError(t, err) - - testClient := &test.TestServerClient{ - Cfg: &consul.Config{APIClientConfig: &api.Config{}, HTTPPort: port}, - Watcher: test.MockConnMgrForIPAndPort(parsedURL.Host, port), - } - // Start the syncer. - s, closer := testConsulSyncer(testClient) + client, err := api.NewClient(&api.Config{ + Address: consulServer.URL, + }) + require.NoError(t, err) + s, closer := testConsulSyncer(client) // Sync s.Sync([]*api.CatalogRegistration{ @@ -267,21 +282,23 @@ func testRegistration(node, service, k8sSrcNamespace string) *api.CatalogRegistr } } -func testConsulSyncer(testClient *test.TestServerClient) (*ConsulSyncer, func()) { - return testConsulSyncerWithConfig(testClient, func(syncer *ConsulSyncer) {}) +func testConsulSyncer(client *api.Client) (*ConsulSyncer, func()) { + return testConsulSyncerWithConfig(client, func(syncer *ConsulSyncer) {}) } // testConsulSyncerWithConfig starts a consul syncer that can be configured // prior to starting via the configurator method. -func testConsulSyncerWithConfig(testClient *test.TestServerClient, configurator func(*ConsulSyncer)) (*ConsulSyncer, func()) { +func testConsulSyncerWithConfig(client *api.Client, configurator func(*ConsulSyncer)) (*ConsulSyncer, func()) { s := &ConsulSyncer{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Log: hclog.Default(), - SyncPeriod: 200 * time.Millisecond, - ServicePollPeriod: 50 * time.Millisecond, - ConsulK8STag: TestConsulK8STag, - ConsulNodeName: ConsulSyncNodeName, + Client: client, + Log: hclog.Default(), + SyncPeriod: 200 * time.Millisecond, + ServicePollPeriod: 50 * time.Millisecond, + ConsulK8STag: TestConsulK8STag, + ConsulNodeName: ConsulSyncNodeName, + ConsulNodeServicesClient: &PreNamespacesNodeServicesClient{ + Client: client, + }, } configurator(s) s.init() diff --git a/control-plane/catalog/to-k8s/source.go b/control-plane/catalog/to-k8s/source.go index 5a384e760a..410dbc60da 100644 --- a/control-plane/catalog/to-k8s/source.go +++ b/control-plane/catalog/to-k8s/source.go @@ -6,7 +6,6 @@ import ( "time" "github.com/cenkalti/backoff" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" ) @@ -14,15 +13,12 @@ import ( // Source is the source for the sync that watches Consul services and // updates a Sink whenever the set of services to register changes. type Source struct { - // ConsulClientConfig is the config for the Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager - Domain string // Consul DNS domain - Sink Sink // Sink is the sink to update with services - Prefix string // Prefix is a prefix to prepend to services - Log hclog.Logger // Logger - ConsulK8STag string // The tag value for services registered + Client *api.Client // Consul API client + Domain string // Consul DNS domain + Sink Sink // Sink is the sink to update with services + Prefix string // Prefix is a prefix to prepend to services + Log hclog.Logger // Logger + ConsulK8STag string // The tag value for services registered } // Run is the long-running runloop for watching Consul services and @@ -34,17 +30,12 @@ func (s *Source) Run(ctx context.Context) { WaitTime: 1 * time.Minute, }).WithContext(ctx) for { - consulClient, err := consul.NewClientFromConnMgr(s.ConsulClientConfig, s.ConsulServerConnMgr) - if err != nil { - s.Log.Error("failed to create Consul API client", "err", err) - return - } - // Get all services with tags. var serviceMap map[string][]string var meta *api.QueryMeta - err = backoff.Retry(func() error { - serviceMap, meta, err = consulClient.Catalog().Services(opts) + err := backoff.Retry(func() error { + var err error + serviceMap, meta, err = s.Client.Catalog().Services(opts) return err }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) diff --git a/control-plane/catalog/to-k8s/source_test.go b/control-plane/catalog/to-k8s/source_test.go index ca00a1e954..d3ed4a8a26 100644 --- a/control-plane/catalog/to-k8s/source_test.go +++ b/control-plane/catalog/to-k8s/source_test.go @@ -6,9 +6,8 @@ import ( "testing" toconsul "github.com/hashicorp/consul-k8s/control-plane/catalog/to-consul" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" @@ -17,20 +16,27 @@ import ( // Test that the source works with services registered before hand. func TestSource_initServices(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", nil), nil) - require.NoError(t, err) + require.NoError(err) - _, sink, closer := testSource(testClient.Cfg, testClient.Watcher) + _, sink, closer := testSource(client) defer closer() var actual map[string]string @@ -48,29 +54,36 @@ func TestSource_initServices(t *testing.T) { "svcA": "svcA.service.test", "svcB": "svcB.service.test", } - require.Equal(t, expected, actual) + require.Equal(expected, actual) } // Test that we can specify a prefix to prepend to all destination services. func TestSource_prefix(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() - _, sink, closer := testSourceWithConfig(testClient.Cfg, testClient.Watcher, func(s *Source) { + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + _, sink, closer := testSourceWithConfig(client, func(s *Source) { s.Prefix = "foo-" }) defer closer() // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", nil), nil) - require.NoError(t, err) + require.NoError(err) var actual map[string]string retry.Run(t, func(r *retry.R) { @@ -87,26 +100,33 @@ func TestSource_prefix(t *testing.T) { "foo-svcA": "svcA.service.test", "foo-svcB": "svcB.service.test", } - require.Equal(t, expected, actual) + require.Equal(expected, actual) } // Test that the source ignores K8S services. func TestSource_ignoreK8S(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", []string{toconsul.TestConsulK8STag}), nil) - require.NoError(t, err) + require.NoError(err) - _, sink, closer := testSource(testClient.Cfg, testClient.Watcher) + _, sink, closer := testSource(client) defer closer() var actual map[string]string @@ -123,27 +143,34 @@ func TestSource_ignoreK8S(t *testing.T) { "consul": "consul.service.test", "svcA": "svcA.service.test", } - require.Equal(t, expected, actual) + require.Equal(expected, actual) } // Test that the source deletes services properly. func TestSource_deleteService(t *testing.T) { // Unable to be run in parallel with other tests that // check for the existence of `consul.service.test` + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", nil), nil) - require.NoError(t, err) + require.NoError(err) - _, sink, closer := testSource(testClient.Cfg, testClient.Watcher) + _, sink, closer := testSource(client) defer closer() var actual map[string]string @@ -159,7 +186,7 @@ func TestSource_deleteService(t *testing.T) { // Delete the service _, err = client.Catalog().Deregister(&api.CatalogDeregistration{ Node: "hostB", ServiceID: "svcB"}, nil) - require.NoError(t, err) + require.NoError(err) retry.Run(t, func(r *retry.R) { sink.Lock() @@ -176,7 +203,7 @@ func TestSource_deleteService(t *testing.T) { "consul": "consul.service.test", "svcA": "svcA.service.test", } - require.Equal(t, expected, actual) + require.Equal(expected, actual) } // Test that the source deletes services properly. This case tests @@ -184,20 +211,27 @@ func TestSource_deleteService(t *testing.T) { // anything. func TestSource_deleteServiceInstance(t *testing.T) { t.Parallel() + require := require.New(t) // Set up server, client - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) // Create services before the source is running - _, err := client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) - require.NoError(t, err) + _, err = client.Catalog().Register(testRegistration("hostA", "svcA", nil), nil) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcA", nil), nil) - require.NoError(t, err) + require.NoError(err) _, err = client.Catalog().Register(testRegistration("hostB", "svcB", nil), nil) - require.NoError(t, err) + require.NoError(err) - _, sink, closer := testSource(testClient.Cfg, testClient.Watcher) + _, sink, closer := testSource(client) defer closer() var actual map[string]string @@ -213,7 +247,7 @@ func TestSource_deleteServiceInstance(t *testing.T) { // Delete the service _, err = client.Catalog().Deregister(&api.CatalogDeregistration{ Node: "hostB", ServiceID: "svcA"}, nil) - require.NoError(t, err) + require.NoError(err) retry.Run(t, func(r *retry.R) { sink.Lock() @@ -238,21 +272,20 @@ func testRegistration(node, service string, tags []string) *api.CatalogRegistrat } // testSource creates a Source and Sink for testing. -func testSource(clientCfg *consul.Config, connMgr consul.ServerConnectionManager) (*Source, *TestSink, func()) { - return testSourceWithConfig(clientCfg, connMgr, func(source *Source) {}) +func testSource(client *api.Client) (*Source, *TestSink, func()) { + return testSourceWithConfig(client, func(source *Source) {}) } // testSourceWithConfig starts a Source that can be configured // prior to starting via the configurator method. -func testSourceWithConfig(clientCfg *consul.Config, connMgr consul.ServerConnectionManager, configurator func(*Source)) (*Source, *TestSink, func()) { +func testSourceWithConfig(client *api.Client, configurator func(*Source)) (*Source, *TestSink, func()) { sink := &TestSink{} s := &Source{ - ConsulClientConfig: clientCfg, - ConsulServerConnMgr: connMgr, - Domain: "test", - Sink: sink, - Log: hclog.Default(), - ConsulK8STag: toconsul.TestConsulK8STag, + Client: client, + Domain: "test", + Sink: sink, + Log: hclog.Default(), + ConsulK8STag: toconsul.TestConsulK8STag, } configurator(s) diff --git a/control-plane/cni/go.mod b/control-plane/cni/go.mod index c7820438cb..660b720d43 100644 --- a/control-plane/cni/go.mod +++ b/control-plane/cni/go.mod @@ -1,9 +1,10 @@ module github.com/hashicorp/consul-k8s/control-plane/cni require ( + github.com/cenkalti/backoff v2.1.1+incompatible github.com/containernetworking/cni v1.1.1 github.com/containernetworking/plugins v1.1.1 - github.com/hashicorp/consul/sdk v0.13.0 + github.com/hashicorp/consul/sdk v0.9.0 github.com/hashicorp/go-hclog v0.16.1 github.com/stretchr/testify v1.7.1 k8s.io/api v0.22.2 @@ -30,6 +31,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.1.0 // indirect golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect @@ -48,6 +50,6 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) -replace github.com/hashicorp/consul/sdk => github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892 +replace github.com/hashicorp/consul/sdk v0.9.0 => github.com/hashicorp/consul/sdk v0.4.1-0.20220531155537-364758ef2f50 -go 1.20 +go 1.18 diff --git a/control-plane/cni/go.sum b/control-plane/cni/go.sum index 1188cc5dd4..03309565a1 100644 --- a/control-plane/cni/go.sum +++ b/control-plane/cni/go.sum @@ -34,6 +34,8 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -62,7 +64,6 @@ github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -131,8 +132,8 @@ github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9 github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892 h1:jw0NwPmNPr5CxAU04hACdj61JSaJBKZ0FdBo+kwfNp4= -github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/sdk v0.4.1-0.20220531155537-364758ef2f50 h1:GwbRRT+QxMRbYI608FGwTfcZ0iOVLX69B2ePjpQoyXw= +github.com/hashicorp/consul/sdk v0.4.1-0.20220531155537-364758ef2f50/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o= @@ -183,7 +184,6 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -211,11 +211,13 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -456,7 +458,6 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/control-plane/cni/main.go b/control-plane/cni/main.go index 7b05b5c6cd..24c473a853 100644 --- a/control-plane/cni/main.go +++ b/control-plane/cni/main.go @@ -23,7 +23,7 @@ import ( ) const ( - // These annotations are duplicated from control-plane/connect-inject/annotations_and_labels.go in + // These annotations are duplicated from control-plane/connect-inject/annotations.go in // order to prevent pulling in dependencies. // keyInjectStatus is the key of the annotation that is added to diff --git a/control-plane/commands.go b/control-plane/commands.go index ec3b7ca612..4b0afa6731 100644 --- a/control-plane/commands.go +++ b/control-plane/commands.go @@ -6,6 +6,8 @@ import ( cmdACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/acl-init" cmdConnectInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/connect-init" cmdConsulLogout "github.com/hashicorp/consul-k8s/control-plane/subcommand/consul-logout" + cmdConsulSidecar "github.com/hashicorp/consul-k8s/control-plane/subcommand/consul-sidecar" + cmdController "github.com/hashicorp/consul-k8s/control-plane/subcommand/controller" cmdCreateFederationSecret "github.com/hashicorp/consul-k8s/control-plane/subcommand/create-federation-secret" cmdDeleteCompletedJob "github.com/hashicorp/consul-k8s/control-plane/subcommand/delete-completed-job" cmdGetConsulClientCA "github.com/hashicorp/consul-k8s/control-plane/subcommand/get-consul-client-ca" @@ -14,6 +16,7 @@ import ( cmdInstallCNI "github.com/hashicorp/consul-k8s/control-plane/subcommand/install-cni" cmdPartitionInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/partition-init" cmdServerACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/server-acl-init" + cmdServiceAddress "github.com/hashicorp/consul-k8s/control-plane/subcommand/service-address" cmdSyncCatalog "github.com/hashicorp/consul-k8s/control-plane/subcommand/sync-catalog" cmdTLSInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/tls-init" cmdVersion "github.com/hashicorp/consul-k8s/control-plane/subcommand/version" @@ -41,6 +44,10 @@ func init() { return &cmdInjectConnect.Command{UI: ui}, nil }, + "consul-sidecar": func() (cli.Command, error) { + return &cmdConsulSidecar.Command{UI: ui}, nil + }, + "consul-logout": func() (cli.Command, error) { return &cmdConsulLogout.Command{UI: ui}, nil }, @@ -61,6 +68,10 @@ func init() { return &cmdDeleteCompletedJob.Command{UI: ui}, nil }, + "service-address": func() (cli.Command, error) { + return &cmdServiceAddress.Command{UI: ui}, nil + }, + "get-consul-client-ca": func() (cli.Command, error) { return &cmdGetConsulClientCA.Command{UI: ui}, nil }, @@ -73,6 +84,10 @@ func init() { return &cmdCreateFederationSecret.Command{UI: ui}, nil }, + "controller": func() (cli.Command, error) { + return &cmdController.Command{UI: ui}, nil + }, + "webhook-cert-manager": func() (cli.Command, error) { return &webhookCertManager.Command{UI: ui}, nil }, diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml index 7ad173afbf..54137f9cc1 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_meshes.yaml @@ -56,18 +56,6 @@ spec: required: - sanitizeXForwardedClientCert type: object - peering: - description: Peering defines the peering configuration for the service - mesh. - properties: - peerThroughMeshGateways: - description: PeerThroughMeshGateways determines whether peering - traffic between control planes should flow through mesh gateways. - If enabled, Consul servers will advertise mesh gateway addresses - as their own. Additionally, mesh gateways will configure themselves - to expose the local servers using a peering-specific SNI. - type: boolean - type: object tls: description: TLS defines the TLS configuration for the service mesh. properties: diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_proxydefaults.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_proxydefaults.yaml index 2563cbcf77..6b9628cd74 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_proxydefaults.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_proxydefaults.yaml @@ -50,60 +50,12 @@ spec: spec: description: ProxyDefaultsSpec defines the desired state of ProxyDefaults. properties: - accessLogs: - description: AccessLogs controls all envoy instances' access logging - configuration. - properties: - disableListenerLogs: - description: DisableListenerLogs turns off just listener logs - for connections rejected by Envoy because they don't have a - matching listener filter. - type: boolean - enabled: - description: Enabled turns on all access logging - type: boolean - jsonFormat: - description: 'JSONFormat is a JSON-formatted string of an Envoy - access log format dictionary. See for more info on formatting: - https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-dictionaries - Defining JSONFormat and TextFormat is invalid.' - type: string - path: - description: Path is the output file to write logs for file-type - logging - type: string - textFormat: - description: 'TextFormat is a representation of Envoy access logs - format. See for more info on formatting: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-strings - Defining JSONFormat and TextFormat is invalid.' - type: string - type: - description: Type selects the output for logs one of "file", "stderr". - "stdout" - type: string - type: object config: description: Config is an arbitrary map of configuration values used by Connect proxies. Any values that your proxy allows can be configured globally here. Supports JSON config values. See https://www.consul.io/docs/connect/proxies/envoy#configuration-formatting type: object x-kubernetes-preserve-unknown-fields: true - envoyExtensions: - description: EnvoyExtensions are a list of extensions to modify Envoy - proxy configuration. - items: - description: EnvoyExtension has configuration for an extension that - patches Envoy resources. - properties: - arguments: - type: object - x-kubernetes-preserve-unknown-fields: true - name: - type: string - required: - type: boolean - type: object - type: array expose: description: Expose controls the default expose path configuration for Envoy. diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_servicedefaults.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_servicedefaults.yaml index 4f335a923d..944f494f98 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_servicedefaults.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_servicedefaults.yaml @@ -50,12 +50,6 @@ spec: spec: description: ServiceDefaultsSpec defines the desired state of ServiceDefaults. properties: - balanceInboundConnections: - description: BalanceInboundConnections sets the strategy for allocating - inbound connections to the service across proxy threads. The only - supported value is exact_balance. By default, no connection balancing - is used. Refer to the Envoy Connection Balance config for details. - type: string destination: description: Destination is an address(es)/port combination that represents an endpoint outside the mesh. This is only valid when the mesh is @@ -75,22 +69,6 @@ spec: format: int32 type: integer type: object - envoyExtensions: - description: EnvoyExtensions are a list of extensions to modify Envoy - proxy configuration. - items: - description: EnvoyExtension has configuration for an extension that - patches Envoy resources. - properties: - arguments: - type: object - x-kubernetes-preserve-unknown-fields: true - name: - type: string - required: - type: boolean - type: object - type: array expose: description: Expose controls the default expose path configuration for Envoy. @@ -129,15 +107,15 @@ spec: with an external system. type: string localConnectTimeoutMs: - description: LocalConnectTimeoutMs is the number of milliseconds allowed - to make connections to the local application instance before timing - out. Defaults to 5000. + description: The number of milliseconds allowed to make connections + to the local application instance before timing out. Defaults to + 5000. type: integer localRequestTimeoutMs: - description: LocalRequestTimeoutMs is the timeout for HTTP requests - to the local application instance in milliseconds. Applies to HTTP-based - protocols only. If not specified, inherits the Envoy default for - route timeouts (15s). + description: In milliseconds, the timeout for HTTP requests to the + local application instance. Applies to HTTP-based protocols only. + If not specified, inherits the Envoy default for route timeouts + (15s). type: integer maxInboundConnections: description: MaxInboundConnections is the maximum number of concurrent @@ -251,15 +229,15 @@ spec: type: string type: object name: - description: Name is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Name is only accepted within a service-defaults config entry. type: string namespace: - description: Namespace is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Namespace is only accepted within a service-defaults config entry. type: string partition: - description: Partition is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Partition is only accepted within a service-defaults config entry. type: string passiveHealthCheck: @@ -284,10 +262,6 @@ spec: format: int32 type: integer type: object - peer: - description: Peer is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides - config entry. - type: string protocol: description: Protocol describes the upstream's service protocol. Valid values are "tcp", "http" and "grpc". Anything else @@ -354,15 +328,15 @@ spec: type: string type: object name: - description: Name is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Name is only accepted within a service-defaults config entry. type: string namespace: - description: Namespace is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Namespace is only accepted within a service-defaults config entry. type: string partition: - description: Partition is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides + description: Partition is only accepted within a service-defaults config entry. type: string passiveHealthCheck: @@ -389,10 +363,6 @@ spec: format: int32 type: integer type: object - peer: - description: Peer is only accepted within service ServiceDefaultsSpec.UpstreamConfig.Overrides - config entry. - type: string protocol: description: Protocol describes the upstream's service protocol. Valid values are "tcp", "http" and "grpc". Anything else diff --git a/control-plane/config/crd/bases/consul.hashicorp.com_serviceresolvers.yaml b/control-plane/config/crd/bases/consul.hashicorp.com_serviceresolvers.yaml index a84fc0bd88..1793f36e28 100644 --- a/control-plane/config/crd/bases/consul.hashicorp.com_serviceresolvers.yaml +++ b/control-plane/config/crd/bases/consul.hashicorp.com_serviceresolvers.yaml @@ -81,37 +81,6 @@ spec: service to resolve as the failover group of instances. If empty the default subset for the requested service is used. type: string - targets: - description: Targets specifies a fixed list of failover targets - to try during failover. - items: - properties: - datacenter: - description: Datacenter specifies the datacenter to try - during failover. - type: string - namespace: - description: Namespace specifies the namespace to try - during failover. - type: string - partition: - description: Partition specifies the partition to try - during failover. - type: string - peer: - description: Peer specifies the name of the cluster peer - to try during failover. - type: string - service: - description: Service specifies the name of the service - to try during failover. - type: string - serviceSubset: - description: ServiceSubset specifies the service subset - to try during failover. - type: string - type: object - type: array type: object description: Failover controls when and how to reroute traffic to an alternate pool of service instances. The map is keyed by the @@ -221,10 +190,6 @@ spec: service from instead of the current partition. If empty the current partition is assumed. type: string - peer: - description: Peer is the name of the cluster peer to resolve the - service from instead of the current one. - type: string service: description: Service is a service to resolve instead of the current service. diff --git a/control-plane/connect-inject/annotations.go b/control-plane/connect-inject/annotations.go new file mode 100644 index 0000000000..e63a7b1935 --- /dev/null +++ b/control-plane/connect-inject/annotations.go @@ -0,0 +1,195 @@ +package connectinject + +const ( + // keyInjectStatus is the key of the annotation that is added to + // a pod after an injection is done. + keyInjectStatus = "consul.hashicorp.com/connect-inject-status" + + // keyTransparentProxyStatus is the key of the annotation that is added to + // a pod when transparent proxy is done. + keyTransparentProxyStatus = "consul.hashicorp.com/transparent-proxy-status" + + // keyManagedBy is the key of the label that is added to pods managed + // by the Endpoints controller. This is to support upgrading from consul-k8s + // without Endpoints controller to consul-k8s with Endpoints controller + // without disrupting services managed the old way. + keyManagedBy = "consul.hashicorp.com/connect-inject-managed-by" + + // annotationInject is the key of the annotation that controls whether + // injection is explicitly enabled or disabled for a pod. This should + // be set to a truthy or falsy value, as parseable by strconv.ParseBool. + annotationInject = "consul.hashicorp.com/connect-inject" + + // annotationInjectMountVolumes is the key of the annotation that controls whether + // the data volume that connect inject uses to store data including the Consul ACL token + // is mounted to other containers in the pod. It is a comma-separated list of container names + // to mount the volume on. It will be mounted at the path `/consul/connect-inject`. + annotationInjectMountVolumes = "consul.hashicorp.com/connect-inject-mount-volume" + + // annotationService is the name of the service to proxy. + // This defaults to the name of the Kubernetes service associated with the pod. + annotationService = "consul.hashicorp.com/connect-service" + + // annotationKubernetesService is the name of the Kubernetes service to register. + // This allows a pod to specify what Kubernetes service should trigger a Consul + // service registration in the case of multiple services referencing a deployment. + annotationKubernetesService = "consul.hashicorp.com/kubernetes-service" + + // annotationPort is the name or value of the port to proxy incoming + // connections to. + annotationPort = "consul.hashicorp.com/connect-service-port" + + // annotationProtocol contains the protocol that should be used for + // the service that is being injected. Valid values are "http", "http2", + // "grpc" and "tcp". + // + // Deprecated: This annotation is no longer supported. + annotationProtocol = "consul.hashicorp.com/connect-service-protocol" + + // annotationUpstreams is a list of upstreams to register with the + // proxy in the format of `:,...`. The + // service name should map to a Consul service namd and the local port + // is the local port in the pod that the listener will bind to. It can + // be a named port. + annotationUpstreams = "consul.hashicorp.com/connect-service-upstreams" + + // annotationTags is a list of tags to register with the service + // this is specified as a comma separated list e.g. abc,123. + annotationTags = "consul.hashicorp.com/service-tags" + + // annotationConnectTags is a list of tags to register with the service + // this is specified as a comma separated list e.g. abc,123 + // + // Deprecated: 'consul.hashicorp.com/service-tags' is the new annotation + // and should be used instead. We made this change because the tagging is + // not specific to connect as both the connect proxy *and* the Consul + // service that gets registered is tagged. + annotationConnectTags = "consul.hashicorp.com/connect-service-tags" + + // annotationMeta is a list of metadata key/value pairs to add to the service + // registration. This is specified in the format `:` + // e.g. consul.hashicorp.com/service-meta-foo:bar. + annotationMeta = "consul.hashicorp.com/service-meta-" + + // annotationSyncPeriod controls the -sync-period flag passed to the + // consul-k8s consul-sidecar command. This flag controls how often the + // service is synced (i.e. re-registered) with the local agent. + // + // Deprecated: This annotation is no longer supported. + annotationSyncPeriod = "consul.hashicorp.com/connect-sync-period" + + // annotationUseProxyHealthCheck creates a readiness listener on the sidecar proxy and + // queries this instead of the application health check for the status of the application. + // Enable this only if the application does not support health checks. + annotationUseProxyHealthCheck = "consul.hashicorp.com/use-proxy-health-check" + + // annotations for sidecar proxy resource limits. + annotationSidecarProxyCPULimit = "consul.hashicorp.com/sidecar-proxy-cpu-limit" + annotationSidecarProxyCPURequest = "consul.hashicorp.com/sidecar-proxy-cpu-request" + annotationSidecarProxyMemoryLimit = "consul.hashicorp.com/sidecar-proxy-memory-limit" + annotationSidecarProxyMemoryRequest = "consul.hashicorp.com/sidecar-proxy-memory-request" + + // annotations for consul sidecar resource limits. + annotationConsulSidecarCPULimit = "consul.hashicorp.com/consul-sidecar-cpu-limit" + annotationConsulSidecarCPURequest = "consul.hashicorp.com/consul-sidecar-cpu-request" + annotationConsulSidecarMemoryLimit = "consul.hashicorp.com/consul-sidecar-memory-limit" + annotationConsulSidecarMemoryRequest = "consul.hashicorp.com/consul-sidecar-memory-request" + + // annotations for sidecar volumes. + annotationConsulSidecarUserVolume = "consul.hashicorp.com/consul-sidecar-user-volume" + annotationConsulSidecarUserVolumeMount = "consul.hashicorp.com/consul-sidecar-user-volume-mount" + + // annotations for sidecar concurrency. + annotationEnvoyProxyConcurrency = "consul.hashicorp.com/consul-envoy-proxy-concurrency" + + // annotations for metrics to configure where Prometheus scrapes + // metrics from, whether to run a merged metrics endpoint on the consul + // sidecar, and configure the connect service metrics. + annotationEnableMetrics = "consul.hashicorp.com/enable-metrics" + annotationEnableMetricsMerging = "consul.hashicorp.com/enable-metrics-merging" + annotationMergedMetricsPort = "consul.hashicorp.com/merged-metrics-port" + annotationPrometheusScrapePort = "consul.hashicorp.com/prometheus-scrape-port" + annotationPrometheusScrapePath = "consul.hashicorp.com/prometheus-scrape-path" + annotationServiceMetricsPort = "consul.hashicorp.com/service-metrics-port" + annotationServiceMetricsPath = "consul.hashicorp.com/service-metrics-path" + + // annotations for configuring TLS for Prometheus. + annotationPrometheusCAFile = "consul.hashicorp.com/prometheus-ca-file" + annotationPrometheusCAPath = "consul.hashicorp.com/prometheus-ca-path" + annotationPrometheusCertFile = "consul.hashicorp.com/prometheus-cert-file" + annotationPrometheusKeyFile = "consul.hashicorp.com/prometheus-key-file" + + // annotationEnvoyExtraArgs is a space-separated list of arguments to be passed to the + // envoy binary. See list of args here: https://www.envoyproxy.io/docs/envoy/latest/operations/cli + // e.g. consul.hashicorp.com/envoy-extra-args: "--log-level debug --disable-hot-restart" + // The arguments passed in via this annotation will take precendence over arguments + // passed via the -envoy-extra-args flag. + annotationEnvoyExtraArgs = "consul.hashicorp.com/envoy-extra-args" + + // annotationConsulNamespace is the Consul namespace the service is registered into. + annotationConsulNamespace = "consul.hashicorp.com/consul-namespace" + + // keyConsulDNS enables or disables Consul DNS for a given pod. It can also be set as a label + // on a namespace to define the default behaviour for connect-injected pods which do not otherwise override this setting + // with their own annotation. + // This annotation/label takes a boolean value (true/false). + keyConsulDNS = "consul.hashicorp.com/consul-dns" + + // keyTransparentProxy enables or disables transparent proxy for a given pod. It can also be set as a label + // on a namespace to define the default behaviour for connect-injected pods which do not otherwise override this setting + // with their own annotation. + // This annotation/label takes a boolean value (true/false). + keyTransparentProxy = "consul.hashicorp.com/transparent-proxy" + + // annotationTProxyExcludeInboundPorts is a comma-separated list of inbound ports to exclude from traffic redirection. + annotationTProxyExcludeInboundPorts = "consul.hashicorp.com/transparent-proxy-exclude-inbound-ports" + + // annotationTProxyExcludeOutboundPorts is a comma-separated list of outbound ports to exclude from traffic redirection. + annotationTProxyExcludeOutboundPorts = "consul.hashicorp.com/transparent-proxy-exclude-outbound-ports" + + // annotationTProxyExcludeOutboundCIDRs is a comma-separated list of outbound CIDRs to exclude from traffic redirection. + annotationTProxyExcludeOutboundCIDRs = "consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs" + + // annotationTProxyExcludeUIDs is a comma-separated list of additional user IDs to exclude from traffic redirection. + annotationTProxyExcludeUIDs = "consul.hashicorp.com/transparent-proxy-exclude-uids" + + // annotationTransparentProxyOverwriteProbes controls whether the Kubernetes probes should be overwritten + // to point to the Envoy proxy when running in Transparent Proxy mode. + annotationTransparentProxyOverwriteProbes = "consul.hashicorp.com/transparent-proxy-overwrite-probes" + + // annotationRedirectTraffic stores iptables.Config information so that the CNI plugin can use it to apply + // iptables rules. + annotationRedirectTraffic = "consul.hashicorp.com/redirect-traffic-config" + + // annotationOriginalPod is the value of the pod before being overwritten by the consul + // webhook/meshWebhook. + annotationOriginalPod = "consul.hashicorp.com/original-pod" + + // annotationPeeringVersion is the version of the peering resource and can be utilized + // to explicitly perform the peering operation again. + annotationPeeringVersion = "consul.hashicorp.com/peering-version" + + // labelServiceIgnore is a label that can be added to a service to prevent it from being + // registered with Consul. + labelServiceIgnore = "consul.hashicorp.com/service-ignore" + + // labelPeeringToken is a label that can be added to a secret to allow it to be watched + // by the peering controllers. + labelPeeringToken = "consul.hashicorp.com/peering-token" + + // injected is used as the annotation value for keyInjectStatus and annotationInjected. + injected = "injected" + + // enabled is used as the annotation value for keyTransparentProxyStatus. + enabled = "enabled" + + // endpointsController is the value for keyManagedBy. + managedByValue = "consul-k8s-endpoints-controller" +) + +// Annotations used by Prometheus. +const ( + annotationPrometheusScrape = "prometheus.io/scrape" + annotationPrometheusPath = "prometheus.io/path" + annotationPrometheusPort = "prometheus.io/port" +) diff --git a/control-plane/connect-inject/common/common.go b/control-plane/connect-inject/common/common.go deleted file mode 100644 index 9611797c9f..0000000000 --- a/control-plane/connect-inject/common/common.go +++ /dev/null @@ -1,58 +0,0 @@ -package common - -import ( - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - corev1 "k8s.io/api/core/v1" -) - -// PortValue returns the port of the container for the string value passed -// in as an argument on the provided pod. -func PortValue(pod corev1.Pod, value string) (int32, error) { - value = strings.Split(value, ",")[0] - // First search for the named port. - for _, c := range pod.Spec.Containers { - for _, p := range c.Ports { - if p.Name == value { - return p.ContainerPort, nil - } - } - } - - // Named port not found, return the parsed value. - raw, err := strconv.ParseInt(value, 0, 32) - return int32(raw), err -} - -// TransparentProxyEnabled returns true if transparent proxy should be enabled for this pod. -// It returns an error when the annotation value cannot be parsed by strconv.ParseBool or if we are unable -// to read the pod's namespace label when it exists. -func TransparentProxyEnabled(namespace corev1.Namespace, pod corev1.Pod, globalEnabled bool) (bool, error) { - // First check to see if the pod annotation exists to override the namespace or global settings. - if raw, ok := pod.Annotations[constants.KeyTransparentProxy]; ok { - return strconv.ParseBool(raw) - } - // Next see if the namespace has been defaulted. - if raw, ok := namespace.Labels[constants.KeyTransparentProxy]; ok { - return strconv.ParseBool(raw) - } - // Else fall back to the global default. - return globalEnabled, nil -} - -// ShouldOverwriteProbes returns true if we need to overwrite readiness/liveness probes for this pod. -// It returns an error when the annotation value cannot be parsed by strconv.ParseBool. -func ShouldOverwriteProbes(pod corev1.Pod, globalOverwrite bool) (bool, error) { - if raw, ok := pod.Annotations[constants.AnnotationTransparentProxyOverwriteProbes]; ok { - return strconv.ParseBool(raw) - } - - return globalOverwrite, nil -} - -func ConsulNodeNameFromK8sNode(nodeName string) string { - return fmt.Sprintf("%s-virtual", nodeName) -} diff --git a/control-plane/connect-inject/common/common_test.go b/control-plane/connect-inject/common/common_test.go deleted file mode 100644 index e43ccf8255..0000000000 --- a/control-plane/connect-inject/common/common_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" -) - -func TestPortValue(t *testing.T) { - cases := []struct { - Name string - Pod *corev1.Pod - Value string - Expected int32 - Err string - }{ - { - "empty", - &corev1.Pod{}, - "", - 0, - "strconv.ParseInt: parsing \"\": invalid syntax", - }, - - { - "basic pod, with ports", - &corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - Ports: []corev1.ContainerPort{ - { - Name: "http", - ContainerPort: 8080, - }, - }, - }, - - { - Name: "web-side", - }, - }, - }, - }, - "http", - int32(8080), - "", - }, - - { - "basic pod, with unnamed ports", - &corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - Ports: []corev1.ContainerPort{ - { - ContainerPort: 8080, - }, - }, - }, - - { - Name: "web-side", - }, - }, - }, - }, - "8080", - int32(8080), - "", - }, - } - - for _, tt := range cases { - t.Run(tt.Name, func(t *testing.T) { - port, err := PortValue(*tt.Pod, tt.Value) - if (tt.Err != "") != (err != nil) { - t.Fatalf("actual: %v, expected err: %v", err, tt.Err) - } - if tt.Err != "" { - require.Contains(t, err.Error(), tt.Err) - return - } - - require.Equal(t, tt.Expected, port) - }) - } -} diff --git a/control-plane/connect-inject/constants/annotations_and_labels.go b/control-plane/connect-inject/constants/annotations_and_labels.go deleted file mode 100644 index 637e028202..0000000000 --- a/control-plane/connect-inject/constants/annotations_and_labels.go +++ /dev/null @@ -1,200 +0,0 @@ -package constants - -const ( - // KeyInjectStatus is the key of the annotation that is added to - // a pod after an injection is done. - KeyInjectStatus = "consul.hashicorp.com/connect-inject-status" - - // KeyTransparentProxyStatus is the key of the annotation that is added to - // a pod when transparent proxy is done. - KeyTransparentProxyStatus = "consul.hashicorp.com/transparent-proxy-status" - - // KeyManagedBy is the key of the label that is added to pods managed - // by the Endpoints controller. This is to support upgrading from consul-k8s - // without Endpoints controller to consul-k8s with Endpoints controller - // without disrupting services managed the old way. - KeyManagedBy = "consul.hashicorp.com/connect-inject-managed-by" - - // AnnotationInject is the key of the annotation that controls whether - // injection is explicitly enabled or disabled for a pod. This should - // be set to a truthy or falsy value, as parseable by strconv.ParseBool. - AnnotationInject = "consul.hashicorp.com/connect-inject" - - // AnnotationGatewayKind is the key of the annotation that indicates pods - // that represent Consul Connect Gateways. This should be set to a - // value that is either "mesh", "ingress" or "terminating". - AnnotationGatewayKind = "consul.hashicorp.com/gateway-kind" - - // AnnotationGatewayConsulServiceName is the key of the annotation whose value - // is the service name with which the mesh gateway is registered. - AnnotationGatewayConsulServiceName = "consul.hashicorp.com/gateway-consul-service-name" - - // AnnotationMeshGatewayContainerPort is the key of the annotation whose value is - // used as the port and also registered as the LAN port when the mesh-gateway - // service is registered. - AnnotationMeshGatewayContainerPort = "consul.hashicorp.com/mesh-gateway-container-port" - - // AnnotationGatewayWANSource is the key of the annotation that determines which - // source to use to determine the wan address and wan port for the mesh-gateway - // service registration. - AnnotationGatewayWANSource = "consul.hashicorp.com/gateway-wan-address-source" - - // AnnotationGatewayWANAddress is the key of the annotation that when the source - // of the mesh-gateway is 'Static', is the value of the WAN address for the gateway. - AnnotationGatewayWANAddress = "consul.hashicorp.com/gateway-wan-address-static" - - // AnnotationGatewayWANPort is the key of the annotation whose value is the - // WAN port for the mesh-gateway service registration. - AnnotationGatewayWANPort = "consul.hashicorp.com/gateway-wan-port" - - // AnnotationGatewayNamespace is the key of the annotation that indicates the - // Consul namespace where a Terminating or Ingress Gateway pod is deployed. - AnnotationGatewayNamespace = "consul.hashicorp.com/gateway-namespace" - - // AnnotationInjectMountVolumes is the key of the annotation that controls whether - // the data volume that connect inject uses to store data including the Consul ACL token - // is mounted to other containers in the pod. It is a comma-separated list of container names - // to mount the volume on. It will be mounted at the path `/consul/connect-inject`. - AnnotationInjectMountVolumes = "consul.hashicorp.com/connect-inject-mount-volume" - - // AnnotationService is the name of the service to proxy. - // This defaults to the name of the Kubernetes service associated with the pod. - AnnotationService = "consul.hashicorp.com/connect-service" - - // AnnotationKubernetesService is the name of the Kubernetes service to register. - // This allows a pod to specify what Kubernetes service should trigger a Consul - // service registration in the case of multiple services referencing a deployment. - AnnotationKubernetesService = "consul.hashicorp.com/kubernetes-service" - - // AnnotationPort is the name or value of the port to proxy incoming - // connections to. - AnnotationPort = "consul.hashicorp.com/connect-service-port" - - // AnnotationUpstreams is a list of upstreams to register with the - // proxy in the format of `:,...`. The - // service name should map to a Consul service namd and the local port - // is the local port in the pod that the listener will bind to. It can - // be a named port. - AnnotationUpstreams = "consul.hashicorp.com/connect-service-upstreams" - - // AnnotationTags is a list of tags to register with the service - // this is specified as a comma separated list e.g. abc,123. - AnnotationTags = "consul.hashicorp.com/service-tags" - - // AnnotationMeta is a list of metadata key/value pairs to add to the service - // registration. This is specified in the format `:` - // e.g. consul.hashicorp.com/service-meta-foo:bar. - AnnotationMeta = "consul.hashicorp.com/service-meta-" - - // AnnotationUseProxyHealthCheck creates a readiness listener on the sidecar proxy and - // queries this instead of the application health check for the status of the application. - // Enable this only if the application does not support health checks. - AnnotationUseProxyHealthCheck = "consul.hashicorp.com/use-proxy-health-check" - - // annotations for sidecar proxy resource limits. - AnnotationSidecarProxyCPULimit = "consul.hashicorp.com/sidecar-proxy-cpu-limit" - AnnotationSidecarProxyCPURequest = "consul.hashicorp.com/sidecar-proxy-cpu-request" - AnnotationSidecarProxyMemoryLimit = "consul.hashicorp.com/sidecar-proxy-memory-limit" - AnnotationSidecarProxyMemoryRequest = "consul.hashicorp.com/sidecar-proxy-memory-request" - - // annotations for sidecar volumes. - AnnotationConsulSidecarUserVolume = "consul.hashicorp.com/consul-sidecar-user-volume" - AnnotationConsulSidecarUserVolumeMount = "consul.hashicorp.com/consul-sidecar-user-volume-mount" - - // annotations for sidecar concurrency. - AnnotationEnvoyProxyConcurrency = "consul.hashicorp.com/consul-envoy-proxy-concurrency" - - // annotations for metrics to configure where Prometheus scrapes - // metrics from, whether to run a merged metrics endpoint on the consul - // sidecar, and configure the connect service metrics. - AnnotationEnableMetrics = "consul.hashicorp.com/enable-metrics" - AnnotationEnableMetricsMerging = "consul.hashicorp.com/enable-metrics-merging" - AnnotationMergedMetricsPort = "consul.hashicorp.com/merged-metrics-port" - AnnotationPrometheusScrapePort = "consul.hashicorp.com/prometheus-scrape-port" - AnnotationPrometheusScrapePath = "consul.hashicorp.com/prometheus-scrape-path" - AnnotationServiceMetricsPort = "consul.hashicorp.com/service-metrics-port" - AnnotationServiceMetricsPath = "consul.hashicorp.com/service-metrics-path" - - // annotations for configuring TLS for Prometheus. - AnnotationPrometheusCAFile = "consul.hashicorp.com/prometheus-ca-file" - AnnotationPrometheusCAPath = "consul.hashicorp.com/prometheus-ca-path" - AnnotationPrometheusCertFile = "consul.hashicorp.com/prometheus-cert-file" - AnnotationPrometheusKeyFile = "consul.hashicorp.com/prometheus-key-file" - - // AnnotationEnvoyExtraArgs is a space-separated list of arguments to be passed to the - // envoy binary. See list of args here: https://www.envoyproxy.io/docs/envoy/latest/operations/cli - // e.g. consul.hashicorp.com/envoy-extra-args: "--log-level debug --disable-hot-restart" - // The arguments passed in via this annotation will take precendence over arguments - // passed via the -envoy-extra-args flag. - AnnotationEnvoyExtraArgs = "consul.hashicorp.com/envoy-extra-args" - - // AnnotationConsulNamespace is the Consul namespace the service is registered into. - AnnotationConsulNamespace = "consul.hashicorp.com/consul-namespace" - - // KeyConsulDNS enables or disables Consul DNS for a given pod. It can also be set as a label - // on a namespace to define the default behaviour for connect-injected pods which do not otherwise override this setting - // with their own annotation. - // This annotation/label takes a boolean value (true/false). - KeyConsulDNS = "consul.hashicorp.com/consul-dns" - - // KeyTransparentProxy enables or disables transparent proxy for a given pod. It can also be set as a label - // on a namespace to define the default behaviour for connect-injected pods which do not otherwise override this setting - // with their own annotation. - // This annotation/label takes a boolean value (true/false). - KeyTransparentProxy = "consul.hashicorp.com/transparent-proxy" - - // AnnotationTProxyExcludeInboundPorts is a comma-separated list of inbound ports to exclude from traffic redirection. - AnnotationTProxyExcludeInboundPorts = "consul.hashicorp.com/transparent-proxy-exclude-inbound-ports" - - // AnnotationTProxyExcludeOutboundPorts is a comma-separated list of outbound ports to exclude from traffic redirection. - AnnotationTProxyExcludeOutboundPorts = "consul.hashicorp.com/transparent-proxy-exclude-outbound-ports" - - // AnnotationTProxyExcludeOutboundCIDRs is a comma-separated list of outbound CIDRs to exclude from traffic redirection. - AnnotationTProxyExcludeOutboundCIDRs = "consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs" - - // AnnotationTProxyExcludeUIDs is a comma-separated list of additional user IDs to exclude from traffic redirection. - AnnotationTProxyExcludeUIDs = "consul.hashicorp.com/transparent-proxy-exclude-uids" - - // AnnotationTransparentProxyOverwriteProbes controls whether the Kubernetes probes should be overwritten - // to point to the Envoy proxy when running in Transparent Proxy mode. - AnnotationTransparentProxyOverwriteProbes = "consul.hashicorp.com/transparent-proxy-overwrite-probes" - - // AnnotationRedirectTraffic stores iptables.Config information so that the CNI plugin can use it to apply - // iptables rules. - AnnotationRedirectTraffic = "consul.hashicorp.com/redirect-traffic-config" - - // AnnotationOriginalPod is the value of the pod before being overwritten by the consul - // webhook/meshWebhook. - AnnotationOriginalPod = "consul.hashicorp.com/original-pod" - - // AnnotationPeeringVersion is the version of the peering resource and can be utilized - // to explicitly perform the peering operation again. - AnnotationPeeringVersion = "consul.hashicorp.com/peering-version" - - // AnnotationConsulK8sVersion is the current version of this binary. - AnnotationConsulK8sVersion = "consul.hashicorp.com/connect-k8s-version" - - // LabelServiceIgnore is a label that can be added to a service to prevent it from being - // registered with Consul. - LabelServiceIgnore = "consul.hashicorp.com/service-ignore" - - // LabelPeeringToken is a label that can be added to a secret to allow it to be watched - // by the peering controllers. - LabelPeeringToken = "consul.hashicorp.com/peering-token" - - // Injected is used as the annotation value for keyInjectStatus and annotationInjected. - Injected = "injected" - - // Enabled is used as the annotation value for keyTransparentProxyStatus. - Enabled = "enabled" - - // ManagedByValue is the value for keyManagedBy. - ManagedByValue = "consul-k8s-endpoints-controller" -) - -// Annotations used by Prometheus. -const ( - AnnotationPrometheusScrape = "prometheus.io/scrape" - AnnotationPrometheusPath = "prometheus.io/path" - AnnotationPrometheusPort = "prometheus.io/port" -) diff --git a/control-plane/connect-inject/constants/constants.go b/control-plane/connect-inject/constants/constants.go deleted file mode 100644 index e371677629..0000000000 --- a/control-plane/connect-inject/constants/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -package constants - -const ( - // ConsulCAFile is the location of the Consul CA file inside the injected pod. - ConsulCAFile = "/consul/connect-inject/consul-ca.pem" - - // ProxyDefaultInboundPort is the default inbound port for the proxy. - ProxyDefaultInboundPort = 20000 - - // ProxyDefaultHealthPort is the default HTTP health check port for the proxy. - ProxyDefaultHealthPort = 21000 - - // MetaKeyKubeNS is the meta key name for Kubernetes namespace used for the Consul services. - MetaKeyKubeNS = "k8s-namespace" - - // MetaKeyPodName is the meta key name for Kubernetes pod name used for the Consul services. - MetaKeyPodName = "pod-name" -) diff --git a/control-plane/connect-inject/consul_sidecar.go b/control-plane/connect-inject/consul_sidecar.go new file mode 100644 index 0000000000..a19eebb5ef --- /dev/null +++ b/control-plane/connect-inject/consul_sidecar.go @@ -0,0 +1,115 @@ +package connectinject + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// consulSidecar starts the consul-sidecar command to only run +// the metrics merging server when metrics merging feature is enabled. +// It always disables service registration because for connect we no longer +// need to keep services registered as this is handled in the endpoints-controller. +func (w *MeshWebhook) consulSidecar(pod corev1.Pod) (corev1.Container, error) { + metricsPorts, err := w.MetricsConfig.mergedMetricsServerConfiguration(pod) + if err != nil { + return corev1.Container{}, err + } + + resources, err := w.consulSidecarResources(pod) + if err != nil { + return corev1.Container{}, err + } + + command := []string{ + "consul-k8s-control-plane", + "consul-sidecar", + "-enable-service-registration=false", + "-enable-metrics-merging=true", + fmt.Sprintf("-merged-metrics-port=%s", metricsPorts.mergedPort), + fmt.Sprintf("-service-metrics-port=%s", metricsPorts.servicePort), + fmt.Sprintf("-service-metrics-path=%s", metricsPorts.servicePath), + fmt.Sprintf("-log-level=%s", w.LogLevel), + fmt.Sprintf("-log-json=%t", w.LogJSON), + } + + return corev1.Container{ + Name: "consul-sidecar", + Image: w.ImageConsulK8S, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }, + Command: command, + Resources: resources, + }, nil +} + +func (w *MeshWebhook) consulSidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { + resources := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + } + // zeroQuantity is used for comparison to see if a quantity was explicitly + // set. + var zeroQuantity resource.Quantity + + // NOTE: We only want to set the limit/request if the default or annotation + // was explicitly set. If it's not explicitly set, it will be the zero value + // which would show up in the pod spec as being explicitly set to zero if we + // set that key, e.g. "cpu" to zero. + // We want it to not show up in the pod spec at all if if it's not explicitly + // set so that users aren't wondering why it's set to 0 when they didn't specify + // a request/limit. If they have explicitly set it to 0 then it will be set + // to 0 in the pod spec because we're doing a comparison to the zero-valued + // struct. + + // CPU Limit. + if anno, ok := pod.Annotations[annotationConsulSidecarCPULimit]; ok { + cpuLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarCPULimit, anno, err) + } + resources.Limits[corev1.ResourceCPU] = cpuLimit + } else if w.DefaultConsulSidecarResources.Limits[corev1.ResourceCPU] != zeroQuantity { + resources.Limits[corev1.ResourceCPU] = w.DefaultConsulSidecarResources.Limits[corev1.ResourceCPU] + } + + // CPU Request. + if anno, ok := pod.Annotations[annotationConsulSidecarCPURequest]; ok { + cpuRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarCPURequest, anno, err) + } + resources.Requests[corev1.ResourceCPU] = cpuRequest + } else if w.DefaultConsulSidecarResources.Requests[corev1.ResourceCPU] != zeroQuantity { + resources.Requests[corev1.ResourceCPU] = w.DefaultConsulSidecarResources.Requests[corev1.ResourceCPU] + } + + // Memory Limit. + if anno, ok := pod.Annotations[annotationConsulSidecarMemoryLimit]; ok { + memoryLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarMemoryLimit, anno, err) + } + resources.Limits[corev1.ResourceMemory] = memoryLimit + } else if w.DefaultConsulSidecarResources.Limits[corev1.ResourceMemory] != zeroQuantity { + resources.Limits[corev1.ResourceMemory] = w.DefaultConsulSidecarResources.Limits[corev1.ResourceMemory] + } + + // Memory Request. + if anno, ok := pod.Annotations[annotationConsulSidecarMemoryRequest]; ok { + memoryRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationConsulSidecarMemoryRequest, anno, err) + } + resources.Requests[corev1.ResourceMemory] = memoryRequest + } else if w.DefaultConsulSidecarResources.Requests[corev1.ResourceMemory] != zeroQuantity { + resources.Requests[corev1.ResourceMemory] = w.DefaultConsulSidecarResources.Requests[corev1.ResourceMemory] + } + + return resources, nil +} diff --git a/control-plane/connect-inject/consul_sidecar_test.go b/control-plane/connect-inject/consul_sidecar_test.go new file mode 100644 index 0000000000..bafaad104a --- /dev/null +++ b/control-plane/connect-inject/consul_sidecar_test.go @@ -0,0 +1,343 @@ +package connectinject + +import ( + "testing" + + logrtest "github.com/go-logr/logr/testing" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Test that if the conditions for running a merged metrics server are true, +// that we pass the metrics flags to consul sidecar. +func TestConsulSidecar_MetricsFlags(t *testing.T) { + meshWebhook := MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + } + container, err := meshWebhook.consulSidecar(corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + }) + + require.NoError(t, err) + require.Contains(t, container.Command, "-enable-metrics-merging=true") + require.Contains(t, container.Command, "-merged-metrics-port=20100") + require.Contains(t, container.Command, "-service-metrics-port=8080") + require.Contains(t, container.Command, "-service-metrics-path=/metrics") +} + +func TestHandlerConsulSidecar_Resources(t *testing.T) { + mem1 := resource.MustParse("100Mi") + mem2 := resource.MustParse("200Mi") + cpu1 := resource.MustParse("100m") + cpu2 := resource.MustParse("200m") + zero := resource.MustParse("0") + + cases := map[string]struct { + meshWebhook MeshWebhook + annotations map[string]string + expResources corev1.ResourceRequirements + expErr string + }{ + "no defaults, no annotations": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + "all defaults, no annotations": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + DefaultConsulSidecarResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "no defaults, all annotations": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPURequest: "100m", + annotationConsulSidecarMemoryRequest: "100Mi", + annotationConsulSidecarCPULimit: "200m", + annotationConsulSidecarMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "annotations override defaults": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + DefaultConsulSidecarResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPURequest: "100m", + annotationConsulSidecarMemoryRequest: "100Mi", + annotationConsulSidecarCPULimit: "200m", + annotationConsulSidecarMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "defaults set to zero, no annotations": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + DefaultConsulSidecarResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "annotations set to 0": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPURequest: "0", + annotationConsulSidecarMemoryRequest: "0", + annotationConsulSidecarCPULimit: "0", + annotationConsulSidecarMemoryLimit: "0", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "invalid cpu request": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPURequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-cpu-request:\"invalid\": quantities must match the regular expression", + }, + "invalid cpu limit": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarCPULimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-cpu-limit:\"invalid\": quantities must match the regular expression", + }, + "invalid memory request": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarMemoryRequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-memory-request:\"invalid\": quantities must match the regular expression", + }, + "invalid memory limit": { + meshWebhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + ImageConsulK8S: "hashicorp/consul-k8s:9.9.9", + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + }, + annotations: map[string]string{ + annotationMergedMetricsPort: "20100", + annotationServiceMetricsPort: "8080", + annotationServiceMetricsPath: "/metrics", + annotationConsulSidecarMemoryLimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/consul-sidecar-memory-limit:\"invalid\": quantities must match the regular expression", + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := c.meshWebhook.consulSidecar(pod) + if c.expErr != "" { + require.NotNil(err) + require.Contains(err.Error(), c.expErr) + } else { + require.NoError(err) + require.Equal(c.expResources, container.Resources) + } + }) + } +} diff --git a/control-plane/connect-inject/webhook/container_env.go b/control-plane/connect-inject/container_env.go similarity index 71% rename from control-plane/connect-inject/webhook/container_env.go rename to control-plane/connect-inject/container_env.go index 7c65dcd77c..4ad2b156cd 100644 --- a/control-plane/connect-inject/webhook/container_env.go +++ b/control-plane/connect-inject/container_env.go @@ -1,17 +1,15 @@ -package webhook +package connectinject import ( "fmt" "strconv" "strings" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" corev1 "k8s.io/api/core/v1" ) func (w *MeshWebhook) containerEnvVars(pod corev1.Pod) []corev1.EnvVar { - raw, ok := pod.Annotations[constants.AnnotationUpstreams] + raw, ok := pod.Annotations[annotationUpstreams] if !ok || raw == "" { return []corev1.EnvVar{} } @@ -19,7 +17,7 @@ func (w *MeshWebhook) containerEnvVars(pod corev1.Pod) []corev1.EnvVar { var result []corev1.EnvVar for _, raw := range strings.Split(raw, ",") { parts := strings.SplitN(raw, ":", 3) - port, _ := common.PortValue(pod, strings.TrimSpace(parts[1])) + port, _ := portValue(pod, strings.TrimSpace(parts[1])) if port > 0 { name := strings.TrimSpace(parts[0]) name = strings.ToUpper(strings.Replace(name, "-", "_", -1)) diff --git a/control-plane/connect-inject/webhook/container_env_test.go b/control-plane/connect-inject/container_env_test.go similarity index 82% rename from control-plane/connect-inject/webhook/container_env_test.go rename to control-plane/connect-inject/container_env_test.go index 62200e7bbd..cb29b6e742 100644 --- a/control-plane/connect-inject/webhook/container_env_test.go +++ b/control-plane/connect-inject/container_env_test.go @@ -1,9 +1,8 @@ -package webhook +package connectinject import ( "testing" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,8 +32,8 @@ func TestContainerEnvVars(t *testing.T) { envVars := w.containerEnvVars(corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "foo", - constants.AnnotationUpstreams: tt.Upstream, + annotationService: "foo", + annotationUpstreams: tt.Upstream, }, }, }) diff --git a/control-plane/connect-inject/container_init.go b/control-plane/connect-inject/container_init.go new file mode 100644 index 0000000000..47dabbbe0b --- /dev/null +++ b/control-plane/connect-inject/container_init.go @@ -0,0 +1,526 @@ +package connectinject + +import ( + "bytes" + "fmt" + "os" + "strconv" + "strings" + "text/template" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" +) + +const ( + InjectInitCopyContainerName = "copy-consul-bin" + InjectInitContainerName = "consul-connect-inject-init" + rootUserAndGroupID = 0 + envoyUserAndGroupID = 5995 + initContainersUserAndGroupID = 5996 + netAdminCapability = "NET_ADMIN" + dnsServiceHostEnvSuffix = "DNS_SERVICE_HOST" +) + +type initContainerCommandData struct { + ServiceName string + ServiceAccountName string + AuthMethod string + // ConsulPartition is the Consul admin partition to register the service + // and proxy in. An empty string indicates partitions are not + // enabled in Consul (necessary for OSS). + ConsulPartition string + // ConsulNamespace is the Consul namespace to register the service + // and proxy in. An empty string indicates namespaces are not + // enabled in Consul (necessary for OSS). + ConsulNamespace string + NamespaceMirroringEnabled bool + + // The PEM-encoded CA certificate to use when + // communicating with Consul clients + ConsulCACert string + // EnableMetrics adds a listener to Envoy where Prometheus will scrape + // metrics from. + EnableMetrics bool + // PrometheusScrapePath configures the path on the listener on Envoy where + // Prometheus will scrape metrics from. + PrometheusScrapePath string + // PrometheusBackendPort configures where the listener on Envoy will point to. + PrometheusBackendPort string + // The file paths to use for configuring TLS on the Prometheus metrics endpoint. + PrometheusCAFile string + PrometheusCAPath string + PrometheusCertFile string + PrometheusKeyFile string + // EnvoyUID is the Linux user id that will be used when tproxy is enabled. + EnvoyUID int + + // EnableProxyHealthChecks configures a readiness endpoint on the envoy sidecar. + EnableProxyHealthChecks bool + // EnableProxyHealthChecks is the port on which the readiness endpoint is configured + // on the envoy sidecar. + EnvoyHealthCheckPort int + + // EnableTransparentProxy configures this init container to run in transparent proxy mode, + // i.e. run consul connect redirect-traffic command and add the required privileges to the + // container to do that. + EnableTransparentProxy bool + + // EnableCNI configures this init container to skip the redirect-traffic command as traffic + // redirection is handled by the CNI plugin on pod creation. + EnableCNI bool + + // TProxyExcludeInboundPorts is a list of inbound ports to exclude from traffic redirection via + // the consul connect redirect-traffic command. + TProxyExcludeInboundPorts []string + + // TProxyExcludeOutboundPorts is a list of outbound ports to exclude from traffic redirection via + // the consul connect redirect-traffic command. + TProxyExcludeOutboundPorts []string + + // TProxyExcludeOutboundCIDRs is a list of outbound CIDRs to exclude from traffic redirection via + // the consul connect redirect-traffic command. + TProxyExcludeOutboundCIDRs []string + + // TProxyExcludeUIDs is a list of additional user IDs to exclude from traffic redirection via + // the consul connect redirect-traffic command. + TProxyExcludeUIDs []string + + // ConsulDNSClusterIP is the IP of the Consul DNS Service. + ConsulDNSClusterIP string + + // MultiPort determines whether this is a multi port Pod, which configures the init container to be specific to one + // of the services on the multi port Pod. + MultiPort bool + + // EnvoyAdminPort configures the admin port of the Envoy sidecar. This will be unique per service in a multi port + // Pod. + EnvoyAdminPort int + + // BearerTokenFile configures where the service account token can be found. This will be unique per service in a + // multi port Pod. + BearerTokenFile string + + // ConsulAPITimeout is the duration that the consul API client will + // wait for a response from the API before cancelling the request. + ConsulAPITimeout time.Duration +} + +// initCopyContainer returns the init container spec for the copy container which places +// the consul binary into the shared volume. +func (w *MeshWebhook) initCopyContainer() corev1.Container { + // Copy the Consul binary from the image to the shared volume. + cmd := "cp /bin/consul /consul/connect-inject/consul" + container := corev1.Container{ + Name: InjectInitCopyContainerName, + Image: w.ImageConsul, + Resources: w.InitContainerResources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }, + Command: []string{"/bin/sh", "-ec", cmd}, + } + // If running on OpenShift, don't set the security context and instead let OpenShift set a random user/group for us. + if !w.EnableOpenShift { + container.SecurityContext = &corev1.SecurityContext{ + // Set RunAsUser because the default user for the consul container is root and we want to run non-root. + RunAsUser: pointer.Int64(initContainersUserAndGroupID), + RunAsGroup: pointer.Int64(initContainersUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + } + } + return container +} + +// containerInit returns the init container spec for connect-init that polls for the service and the connect proxy service to be registered +// so that it can save the proxy service id to the shared volume and boostrap Envoy with the proxy-id. +func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { + // Check if tproxy is enabled on this pod. + tproxyEnabled, err := transparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) + if err != nil { + return corev1.Container{}, err + } + + dnsEnabled, err := consulDNSEnabled(namespace, pod, w.EnableConsulDNS) + if err != nil { + return corev1.Container{}, err + } + + var consulDNSClusterIP string + if dnsEnabled { + // If Consul DNS is enabled, we find the environment variable that has the value + // of the ClusterIP of the Consul DNS Service. constructDNSServiceHostName returns + // the name of the env variable whose value is the ClusterIP of the Consul DNS Service. + consulDNSClusterIP = os.Getenv(w.constructDNSServiceHostName()) + if consulDNSClusterIP == "" { + return corev1.Container{}, fmt.Errorf("environment variable %s is not found", w.constructDNSServiceHostName()) + } + } + + multiPort := mpi.serviceName != "" + + data := initContainerCommandData{ + AuthMethod: w.AuthMethod, + ConsulPartition: w.ConsulPartition, + ConsulNamespace: w.consulNamespace(namespace.Name), + NamespaceMirroringEnabled: w.EnableK8SNSMirroring, + ConsulCACert: w.ConsulCACert, + EnableTransparentProxy: tproxyEnabled, + EnableCNI: w.EnableCNI, + EnableProxyHealthChecks: useProxyHealthCheck(pod), + EnvoyHealthCheckPort: proxyDefaultHealthPort + mpi.serviceIndex, + TProxyExcludeInboundPorts: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeInboundPorts, pod), + TProxyExcludeOutboundPorts: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundPorts, pod), + TProxyExcludeOutboundCIDRs: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundCIDRs, pod), + TProxyExcludeUIDs: splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeUIDs, pod), + ConsulDNSClusterIP: consulDNSClusterIP, + EnvoyUID: envoyUserAndGroupID, + MultiPort: multiPort, + EnvoyAdminPort: 19000 + mpi.serviceIndex, + ConsulAPITimeout: w.ConsulAPITimeout, + } + + // Create expected volume mounts + volMounts := []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + } + + if multiPort { + data.ServiceName = mpi.serviceName + } else { + data.ServiceName = pod.Annotations[annotationService] + } + if w.AuthMethod != "" { + if multiPort { + // If multi port then we require that the service account name + // matches the service name. + data.ServiceAccountName = mpi.serviceName + } else { + data.ServiceAccountName = pod.Spec.ServiceAccountName + } + // Extract the service account token's volume mount + saTokenVolumeMount, bearerTokenFile, err := findServiceAccountVolumeMount(pod, multiPort, mpi.serviceName) + if err != nil { + return corev1.Container{}, err + } + data.BearerTokenFile = bearerTokenFile + + // Append to volume mounts + volMounts = append(volMounts, saTokenVolumeMount) + } + + // This determines how to configure the consul connect envoy command: what + // metrics backend to use and what path to expose on the + // envoy_prometheus_bind_addr listener for scraping. + metricsServer, err := w.MetricsConfig.shouldRunMergedMetricsServer(pod) + if err != nil { + return corev1.Container{}, err + } + if metricsServer { + prometheusScrapePath := w.MetricsConfig.prometheusScrapePath(pod) + mergedMetricsPort, err := w.MetricsConfig.mergedMetricsPort(pod) + if err != nil { + return corev1.Container{}, err + } + data.PrometheusScrapePath = prometheusScrapePath + data.PrometheusBackendPort = mergedMetricsPort + } + // Pull the TLS config from the relevant annotations. + if raw, ok := pod.Annotations[annotationPrometheusCAFile]; ok && raw != "" { + data.PrometheusCAFile = raw + } + if raw, ok := pod.Annotations[annotationPrometheusCAPath]; ok && raw != "" { + data.PrometheusCAPath = raw + } + if raw, ok := pod.Annotations[annotationPrometheusCertFile]; ok && raw != "" { + data.PrometheusCertFile = raw + } + if raw, ok := pod.Annotations[annotationPrometheusKeyFile]; ok && raw != "" { + data.PrometheusKeyFile = raw + } + + // Validate required Prometheus TLS config is present if set. + if data.PrometheusCertFile != "" || data.PrometheusKeyFile != "" || data.PrometheusCAFile != "" || data.PrometheusCAPath != "" { + if data.PrometheusCAFile == "" && data.PrometheusCAPath == "" { + return corev1.Container{}, fmt.Errorf("Must set one of %q or %q when providing prometheus TLS config", annotationPrometheusCAFile, annotationPrometheusCAPath) + } + if data.PrometheusCertFile == "" { + return corev1.Container{}, fmt.Errorf("Must set %q when providing prometheus TLS config", annotationPrometheusCertFile) + } + if data.PrometheusKeyFile == "" { + return corev1.Container{}, fmt.Errorf("Must set %q when providing prometheus TLS config", annotationPrometheusKeyFile) + } + } + + // Render the command + var buf bytes.Buffer + tpl := template.Must(template.New("root").Parse(strings.TrimSpace( + initContainerCommandTpl))) + err = tpl.Execute(&buf, &data) + if err != nil { + return corev1.Container{}, err + } + + initContainerName := InjectInitContainerName + if multiPort { + initContainerName = fmt.Sprintf("%s-%s", InjectInitContainerName, mpi.serviceName) + } + container := corev1.Container{ + Name: initContainerName, + Image: w.ImageConsulK8S, + Env: []corev1.EnvVar{ + { + Name: "HOST_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.hostIP"}, + }, + }, + { + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, + }, + }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, + }, + }, + Resources: w.InitContainerResources, + VolumeMounts: volMounts, + Command: []string{"/bin/sh", "-ec", buf.String()}, + } + + if tproxyEnabled { + // Running consul connect redirect-traffic with iptables + // requires both being a root user and having NET_ADMIN capability. + if !w.EnableCNI { + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(rootUserAndGroupID), + RunAsGroup: pointer.Int64(rootUserAndGroupID), + // RunAsNonRoot overrides any setting in the Pod so that we can still run as root here as required. + RunAsNonRoot: pointer.Bool(false), + Privileged: pointer.Bool(true), + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{netAdminCapability}, + }, + } + } else { + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(initContainersUserAndGroupID), + RunAsGroup: pointer.Int64(initContainersUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + Privileged: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + } + } + } + + return container, nil +} + +// constructDNSServiceHostName use the resource prefix and the DNS Service hostname suffix to construct the +// key of the env variable whose value is the cluster IP of the Consul DNS Service. +// It translates "resource-prefix" into "RESOURCE_PREFIX_DNS_SERVICE_HOST". +func (w *MeshWebhook) constructDNSServiceHostName() string { + upcaseResourcePrefix := strings.ToUpper(w.ResourcePrefix) + upcaseResourcePrefixWithUnderscores := strings.ReplaceAll(upcaseResourcePrefix, "-", "_") + return strings.Join([]string{upcaseResourcePrefixWithUnderscores, dnsServiceHostEnvSuffix}, "_") +} + +// transparentProxyEnabled returns true if transparent proxy should be enabled for this pod. +// It returns an error when the annotation value cannot be parsed by strconv.ParseBool or if we are unable +// to read the pod's namespace label when it exists. +func transparentProxyEnabled(namespace corev1.Namespace, pod corev1.Pod, globalEnabled bool) (bool, error) { + // First check to see if the pod annotation exists to override the namespace or global settings. + if raw, ok := pod.Annotations[keyTransparentProxy]; ok { + return strconv.ParseBool(raw) + } + // Next see if the namespace has been defaulted. + if raw, ok := namespace.Labels[keyTransparentProxy]; ok { + return strconv.ParseBool(raw) + } + // Else fall back to the global default. + return globalEnabled, nil +} + +// consulDNSEnabled returns true if Consul DNS should be enabled for this pod. +// It returns an error when the annotation value cannot be parsed by strconv.ParseBool or if we are unable +// to read the pod's namespace label when it exists. +func consulDNSEnabled(namespace corev1.Namespace, pod corev1.Pod, globalEnabled bool) (bool, error) { + // First check to see if the pod annotation exists to override the namespace or global settings. + if raw, ok := pod.Annotations[keyConsulDNS]; ok { + return strconv.ParseBool(raw) + } + // Next see if the namespace has been defaulted. + if raw, ok := namespace.Labels[keyConsulDNS]; ok { + return strconv.ParseBool(raw) + } + // Else fall back to the global default. + return globalEnabled, nil +} + +// splitCommaSeparatedItemsFromAnnotation takes an annotation and a pod +// and returns the comma-separated value of the annotation as a list of strings. +func splitCommaSeparatedItemsFromAnnotation(annotation string, pod corev1.Pod) []string { + var items []string + if raw, ok := pod.Annotations[annotation]; ok { + items = append(items, strings.Split(raw, ",")...) + } + + return items +} + +// initContainerCommandTpl is the template for the command executed by +// the init container. +const initContainerCommandTpl = ` +{{- if .ConsulCACert}} +export CONSUL_HTTP_ADDR="https://${HOST_IP}:8501" +export CONSUL_GRPC_ADDR="https://${HOST_IP}:8502" +export CONSUL_CACERT=/consul/connect-inject/consul-ca.pem +cat </consul/connect-inject/consul-ca.pem +{{ .ConsulCACert }} +EOF +{{- else}} +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +{{- end}} +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout={{ .ConsulAPITimeout }} \ + {{- if .AuthMethod }} + -acl-auth-method="{{ .AuthMethod }}" \ + -service-account-name="{{ .ServiceAccountName }}" \ + -service-name="{{ .ServiceName }}" \ + -bearer-token-file={{ .BearerTokenFile }} \ + {{- if .MultiPort }} + -acl-token-sink=/consul/connect-inject/acl-token-{{ .ServiceName }} \ + {{- end }} + {{- if .ConsulNamespace }} + {{- if .NamespaceMirroringEnabled }} + {{- /* If namespace mirroring is enabled, the auth method is + defined in the default namespace */}} + -auth-method-namespace="default" \ + {{- else }} + -auth-method-namespace="{{ .ConsulNamespace }}" \ + {{- end }} + {{- end }} + {{- end }} + {{- if .MultiPort }} + -multiport=true \ + -proxy-id-file=/consul/connect-inject/proxyid-{{ .ServiceName }} \ + {{- if not .AuthMethod }} + -service-name="{{ .ServiceName }}" \ + {{- end }} + {{- end }} + {{- if .ConsulPartition }} + -partition="{{ .ConsulPartition }}" \ + {{- end }} + {{- if .ConsulNamespace }} + -consul-service-namespace="{{ .ConsulNamespace }}" \ + {{- end }} + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + {{- if .MultiPort }} + -proxy-id="$(cat /consul/connect-inject/proxyid-{{.ServiceName}})" \ + {{- else }} + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + {{- end }} + {{- if .EnableProxyHealthChecks }} + -envoy-ready-bind-address="${POD_IP}" \ + -envoy-ready-bind-port={{ .EnvoyHealthCheckPort }} \ + {{- end }} + {{- if .PrometheusScrapePath }} + -prometheus-scrape-path="{{ .PrometheusScrapePath }}" \ + {{- end }} + {{- if .PrometheusBackendPort }} + -prometheus-backend-port="{{ .PrometheusBackendPort }}" \ + {{- end }} + {{- if .PrometheusCAFile }} + -prometheus-ca-file="{{ .PrometheusCAFile }}" \ + {{- end }} + {{- if .PrometheusCAPath }} + -prometheus-ca-path="{{ .PrometheusCAPath }}" \ + {{- end }} + {{- if .PrometheusCertFile }} + -prometheus-cert-file="{{ .PrometheusCertFile }}" \ + {{- end }} + {{- if .PrometheusKeyFile }} + -prometheus-key-file="{{ .PrometheusKeyFile }}" \ + {{- end }} + {{- if .AuthMethod }} + {{- if .MultiPort }} + -token-file="/consul/connect-inject/acl-token-{{ .ServiceName }}" \ + {{- else }} + -token-file="/consul/connect-inject/acl-token" \ + {{- end }} + {{- end }} + {{- if .ConsulPartition }} + -partition="{{ .ConsulPartition }}" \ + {{- end }} + {{- if .ConsulNamespace }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} + {{- if .MultiPort }} + -admin-bind=127.0.0.1:{{ .EnvoyAdminPort }} \ + {{- end }} + -bootstrap > {{ if .MultiPort }}/consul/connect-inject/envoy-bootstrap-{{.ServiceName}}.yaml{{ else }}/consul/connect-inject/envoy-bootstrap.yaml{{ end }} + + +{{- if .EnableTransparentProxy }} +{{- if not .EnableCNI }} +{{- /* The newline below is intentional to allow extra space + in the rendered template between this and the previous commands. */}} + +# Apply traffic redirection rules. +/consul/connect-inject/consul connect redirect-traffic \ + {{- if .AuthMethod }} + -token-file="/consul/connect-inject/acl-token" \ + {{- end }} + {{- if .ConsulPartition }} + -partition="{{ .ConsulPartition }}" \ + {{- end }} + {{- if .ConsulNamespace }} + -namespace="{{ .ConsulNamespace }}" \ + {{- end }} + {{- if .ConsulDNSClusterIP }} + -consul-dns-ip="{{ .ConsulDNSClusterIP }}" \ + {{- end }} + {{- range .TProxyExcludeInboundPorts }} + -exclude-inbound-port="{{ . }}" \ + {{- end }} + {{- range .TProxyExcludeOutboundPorts }} + -exclude-outbound-port="{{ . }}" \ + {{- end }} + {{- if .EnableProxyHealthChecks }} + -exclude-inbound-port={{ .EnvoyHealthCheckPort }} \ + {{- end }} + {{- range .TProxyExcludeOutboundCIDRs }} + -exclude-outbound-cidr="{{ . }}" \ + {{- end }} + {{- range .TProxyExcludeUIDs }} + -exclude-uid="{{ . }}" \ + {{- end }} + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid={{ .EnvoyUID }} +{{- end }} +{{- end }} +` diff --git a/control-plane/connect-inject/container_init_test.go b/control-plane/connect-inject/container_init_test.go new file mode 100644 index 0000000000..f4ab3eed47 --- /dev/null +++ b/control-plane/connect-inject/container_init_test.go @@ -0,0 +1,1325 @@ +package connectinject + +import ( + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +const k8sNamespace = "k8snamespace" + +func TestHandlerContainerInit(t *testing.T) { + minimal := func() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + }, + }, + Status: corev1.PodStatus{ + HostIP: "1.1.1.1", + PodIP: "2.2.2.2", + }, + } + } + + cases := []struct { + Name string + Pod func(*corev1.Pod) *corev1.Pod + Webhook MeshWebhook + Cmd string // Strings.Contains test + CmdNot string // Not contains + ErrStr string // Error contains + }{ + // The first test checks the whole template. Subsequent tests check + // the parts that change. + { + "Whole template by default", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{}, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=0s \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + "", + "", + }, + { + "Proxy Health Checks", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationUseProxyHealthCheck] = "true" + return pod + }, + MeshWebhook{}, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=0s \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -envoy-ready-bind-address="${POD_IP}" \ + -envoy-ready-bind-port=21000 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + "", + "", + }, + { + "When auth method is set -service-account-name and -service-name are passed in", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Spec.ServiceAccountName = "a-service-account-name" + pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: "sa", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + } + return pod + }, + MeshWebhook{ + AuthMethod: "an-auth-method", + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="an-auth-method" \ + -service-account-name="a-service-account-name" \ + -service-name="web" \ +`, + "", + "", + }, + { + "When running the merged metrics server, configures consul connect envoy command", + func(pod *corev1.Pod) *corev1.Pod { + // The annotations to enable metrics, enable merging, and + // service metrics port make the condition to run the merged + // metrics server true. When that is the case, + // prometheusScrapePath and mergedMetricsPort should get + // rendered as -prometheus-scrape-path and + // -prometheus-backend-port to the consul connect envoy command. + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" + pod.Annotations[annotationMergedMetricsPort] = "20100" + pod.Annotations[annotationServiceMetricsPort] = "1234" + pod.Annotations[annotationPrometheusScrapePort] = "22222" + pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" + pod.Annotations[annotationPrometheusCAFile] = "/certs/ca.crt" + pod.Annotations[annotationPrometheusCAPath] = "/certs/ca/" + pod.Annotations[annotationPrometheusCertFile] = "/certs/server.crt" + pod.Annotations[annotationPrometheusKeyFile] = "/certs/key.pem" + return pod + }, + MeshWebhook{ + ConsulAPITimeout: 5 * time.Second, + }, + `# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -prometheus-scrape-path="/scrape-path" \ + -prometheus-backend-port="20100" \ + -prometheus-ca-file="/certs/ca.crt" \ + -prometheus-ca-path="/certs/ca/" \ + -prometheus-cert-file="/certs/server.crt" \ + -prometheus-key-file="/certs/key.pem" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + "", + "", + }, + { + "When providing Prometheus TLS config, missing CA gives an error", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" + pod.Annotations[annotationMergedMetricsPort] = "20100" + pod.Annotations[annotationPrometheusScrapePort] = "22222" + pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" + pod.Annotations[annotationPrometheusCertFile] = "/certs/server.crt" + pod.Annotations[annotationPrometheusKeyFile] = "/certs/key.pem" + return pod + }, + MeshWebhook{ + ConsulAPITimeout: 5 * time.Second, + }, + "", + "", + fmt.Sprintf("Must set one of %q or %q", annotationPrometheusCAFile, annotationPrometheusCAPath), + }, + { + "When providing Prometheus TLS config, missing cert gives an error", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" + pod.Annotations[annotationMergedMetricsPort] = "20100" + pod.Annotations[annotationPrometheusScrapePort] = "22222" + pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" + pod.Annotations[annotationPrometheusCAFile] = "/certs/ca.crt" + pod.Annotations[annotationPrometheusKeyFile] = "/certs/key.pem" + return pod + }, + MeshWebhook{ + ConsulAPITimeout: 5 * time.Second, + }, + "", + "", + fmt.Sprintf("Must set %q", annotationPrometheusCertFile), + }, + { + "When providing Prometheus TLS config, missing key gives an error", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + pod.Annotations[annotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" + pod.Annotations[annotationMergedMetricsPort] = "20100" + pod.Annotations[annotationPrometheusScrapePort] = "22222" + pod.Annotations[annotationPrometheusScrapePath] = "/scrape-path" + pod.Annotations[annotationPrometheusCAPath] = "/certs/ca/" + pod.Annotations[annotationPrometheusCertFile] = "/certs/server.crt" + return pod + }, + MeshWebhook{ + ConsulAPITimeout: 5 * time.Second, + }, + "", + "", + fmt.Sprintf("Must set %q", annotationPrometheusKeyFile), + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + h := tt.Webhook + pod := *tt.Pod(minimal()) + container, err := h.containerInit(testNS, pod, multiPortInfo{}) + if tt.ErrStr == "" { + require.NoError(err) + } else { + require.Contains(err.Error(), tt.ErrStr) + } + actual := strings.Join(container.Command, " ") + require.Contains(actual, tt.Cmd) + if tt.CmdNot != "" { + require.NotContains(actual, tt.CmdNot) + } + }) + } +} + +func TestHandlerContainerInit_transparentProxy(t *testing.T) { + cases := map[string]struct { + globalEnabled bool + cniEnabled bool + annotations map[string]string + expectedContainsCmd string + expectedNotContainsCmd string + namespaceLabel map[string]string + }{ + "enabled globally, ns not set, annotation not provided, cni disabled": { + true, + false, + nil, + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "enabled globally, ns not set, annotation is false, cni disabled": { + true, + false, + map[string]string{keyTransparentProxy: "false"}, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + nil, + }, + "enabled globally, ns not set, annotation is true, cni disabled": { + true, + false, + map[string]string{keyTransparentProxy: "true"}, + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "disabled globally, ns not set, annotation not provided, cni disabled": { + false, + false, + nil, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + nil, + }, + "disabled globally, ns not set, annotation is false, cni disabled": { + false, + false, + map[string]string{keyTransparentProxy: "false"}, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + nil, + }, + "disabled globally, ns not set, annotation is true, cni disabled": { + false, + false, + map[string]string{keyTransparentProxy: "true"}, + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "exclude-inbound-ports, ns is not set, annotation is provided, cni disabled": { + true, + false, + map[string]string{ + keyTransparentProxy: "true", + annotationTProxyExcludeInboundPorts: "9090,9091", + }, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-inbound-port="9090" \ + -exclude-inbound-port="9091" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "exclude-outbound-ports, ns is not set, annotation is provided, cni disabled": { + true, + false, + map[string]string{ + keyTransparentProxy: "true", + annotationTProxyExcludeOutboundPorts: "9090,9091", + }, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-outbound-port="9090" \ + -exclude-outbound-port="9091" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "exclude-outbound-cidrs annotation is provided, cni disabled": { + true, + false, + map[string]string{ + keyTransparentProxy: "true", + annotationTProxyExcludeOutboundCIDRs: "1.1.1.1,2.2.2.2/24", + }, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-outbound-cidr="1.1.1.1" \ + -exclude-outbound-cidr="2.2.2.2/24" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "exclude-uids annotation is provided, ns is not set, cni disabled": { + true, + false, + map[string]string{ + keyTransparentProxy: "true", + annotationTProxyExcludeUIDs: "6000,7000", + }, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-uid="6000" \ + -exclude-uid="7000" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + "disabled globally, ns enabled, annotation not set, cni disabled": { + false, + false, + nil, + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + map[string]string{keyTransparentProxy: "true"}, + }, + "enabled globally, ns disabled, annotation not set, cni disabled": { + true, + false, + nil, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + map[string]string{keyTransparentProxy: "false"}, + }, + "disabled globally, ns enabled, annotation not set, cni enabled": { + false, + true, + nil, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + map[string]string{keyTransparentProxy: "true"}, + }, + "enabled globally, ns not set, annotation not set, cni enabled": { + true, + true, + nil, + "", + `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + nil, + }, + "enabled globally, ns not set, annotation is true, cni disabled, proxy health checks": { + true, + false, + map[string]string{keyTransparentProxy: "true", annotationUseProxyHealthCheck: "true"}, + `/consul/connect-inject/consul connect redirect-traffic \ + -exclude-inbound-port=21000 \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + "", + nil, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + EnableTransparentProxy: c.globalEnabled, + ConsulAPITimeout: 5 * time.Second, + EnableCNI: c.cniEnabled, + } + pod := minimal() + pod.Annotations = c.annotations + + expectedSecurityContext := &corev1.SecurityContext{} + if !c.cniEnabled { + expectedSecurityContext.RunAsUser = pointer.Int64(0) + expectedSecurityContext.RunAsGroup = pointer.Int64(0) + expectedSecurityContext.RunAsNonRoot = pointer.Bool(false) + expectedSecurityContext.Privileged = pointer.Bool(true) + expectedSecurityContext.Capabilities = &corev1.Capabilities{ + Add: []corev1.Capability{netAdminCapability}, + } + } else { + + expectedSecurityContext.RunAsUser = pointer.Int64(initContainersUserAndGroupID) + expectedSecurityContext.RunAsGroup = pointer.Int64(initContainersUserAndGroupID) + expectedSecurityContext.RunAsNonRoot = pointer.Bool(true) + expectedSecurityContext.Privileged = pointer.Bool(false) + expectedSecurityContext.Capabilities = &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + } + } + ns := testNS + ns.Labels = c.namespaceLabel + container, err := w.containerInit(ns, *pod, multiPortInfo{}) + require.NoError(t, err) + actualCmd := strings.Join(container.Command, " ") + + if c.expectedContainsCmd != "" { + require.Equal(t, expectedSecurityContext, container.SecurityContext) + require.Contains(t, actualCmd, c.expectedContainsCmd) + } else { + if !c.cniEnabled { + require.Nil(t, container.SecurityContext) + } else { + require.Equal(t, expectedSecurityContext, container.SecurityContext) + } + require.NotContains(t, actualCmd, c.expectedNotContainsCmd) + } + }) + } +} + +func TestHandlerContainerInit_consulDNS(t *testing.T) { + cases := map[string]struct { + globalEnabled bool + annotations map[string]string + expectedContainsCmd string + namespaceLabel map[string]string + }{ + "enabled globally, ns not set, annotation not provided": { + globalEnabled: true, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -consul-dns-ip="10.0.34.16" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "enabled globally, ns not set, annotation is false": { + globalEnabled: true, + annotations: map[string]string{keyConsulDNS: "false"}, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "enabled globally, ns not set, annotation is true": { + globalEnabled: true, + annotations: map[string]string{keyConsulDNS: "true"}, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -consul-dns-ip="10.0.34.16" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "disabled globally, ns not set, annotation not provided": { + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "disabled globally, ns not set, annotation is false": { + annotations: map[string]string{keyConsulDNS: "false"}, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "disabled globally, ns not set, annotation is true": { + annotations: map[string]string{keyConsulDNS: "true"}, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -consul-dns-ip="10.0.34.16" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + "disabled globally, ns enabled, annotation not set": { + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -consul-dns-ip="10.0.34.16" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + namespaceLabel: map[string]string{keyConsulDNS: "true"}, + }, + "enabled globally, ns disabled, annotation not set": { + globalEnabled: true, + expectedContainsCmd: `/consul/connect-inject/consul connect redirect-traffic \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + namespaceLabel: map[string]string{keyConsulDNS: "false"}, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + EnableConsulDNS: c.globalEnabled, + EnableTransparentProxy: true, + ResourcePrefix: "consul-consul", + ConsulAPITimeout: 5 * time.Second, + } + os.Setenv("CONSUL_CONSUL_DNS_SERVICE_HOST", "10.0.34.16") + defer os.Unsetenv("CONSUL_CONSUL_DNS_SERVICE_HOST") + + pod := minimal() + pod.Annotations = c.annotations + + ns := testNS + ns.Labels = c.namespaceLabel + container, err := w.containerInit(ns, *pod, multiPortInfo{}) + require.NoError(t, err) + actualCmd := strings.Join(container.Command, " ") + + require.Contains(t, actualCmd, c.expectedContainsCmd) + }) + } +} + +func TestHandler_constructDNSServiceHostName(t *testing.T) { + cases := []struct { + prefix string + result string + }{ + { + prefix: "consul-consul", + result: "CONSUL_CONSUL_DNS_SERVICE_HOST", + }, + { + prefix: "release", + result: "RELEASE_DNS_SERVICE_HOST", + }, + { + prefix: "consul-dc1", + result: "CONSUL_DC1_DNS_SERVICE_HOST", + }, + } + + for _, c := range cases { + t.Run(c.prefix, func(t *testing.T) { + w := MeshWebhook{ResourcePrefix: c.prefix, ConsulAPITimeout: 5 * time.Second} + require.Equal(t, c.result, w.constructDNSServiceHostName()) + }) + } +} + +func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { + minimal := func() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + { + Name: "auth-method-secret", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "service-account-secret", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + }, + }, + }, + ServiceAccountName: "web", + }, + } + } + + cases := []struct { + Name string + Pod func(*corev1.Pod) *corev1.Pod + Webhook MeshWebhook + Cmd string // Strings.Contains test + }{ + { + "whole template, default namespace, no partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + ConsulPartition: "", + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -consul-service-namespace="default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -namespace="default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + }, + { + "whole template, default namespace, default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + ConsulPartition: "default", + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -partition="default" \ + -consul-service-namespace="default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -partition="default" \ + -namespace="default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + }, + { + "whole template, non-default namespace, no partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + ConsulPartition: "", + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -consul-service-namespace="non-default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + }, + { + "whole template, non-default namespace, non-default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + ConsulPartition: "non-default-part", + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -partition="non-default-part" \ + -consul-service-namespace="non-default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -partition="non-default-part" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + }, + { + "Whole template, auth method, non-default namespace, mirroring disabled, default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "" + return pod + }, + MeshWebhook{ + AuthMethod: "auth-method", + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", + ConsulPartition: "default", + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ + -service-account-name="web" \ + -service-name="" \ + -bearer-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token \ + -auth-method-namespace="non-default" \ + -partition="default" \ + -consul-service-namespace="non-default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -token-file="/consul/connect-inject/acl-token" \ + -partition="default" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + }, + { + "Whole template, auth method, non-default namespace, mirroring enabled, non-default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "" + return pod + }, + MeshWebhook{ + AuthMethod: "auth-method", + EnableNamespaces: true, + ConsulDestinationNamespace: "non-default", // Overridden by mirroring + EnableK8SNSMirroring: true, + ConsulPartition: "non-default", + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ + -service-account-name="web" \ + -service-name="" \ + -bearer-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token \ + -auth-method-namespace="default" \ + -partition="non-default" \ + -consul-service-namespace="k8snamespace" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -token-file="/consul/connect-inject/acl-token" \ + -partition="non-default" \ + -namespace="k8snamespace" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`, + }, + { + "whole template, default namespace, tproxy enabled, no partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulDestinationNamespace: "default", + ConsulPartition: "", + EnableTransparentProxy: true, + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -consul-service-namespace="default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -namespace="default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Apply traffic redirection rules. +/consul/connect-inject/consul connect redirect-traffic \ + -namespace="default" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + { + "whole template, non-default namespace, tproxy enabled, default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + EnableNamespaces: true, + ConsulPartition: "default", + ConsulDestinationNamespace: "non-default", + EnableTransparentProxy: true, + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -partition="default" \ + -consul-service-namespace="non-default" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -partition="default" \ + -namespace="non-default" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Apply traffic redirection rules. +/consul/connect-inject/consul connect redirect-traffic \ + -partition="default" \ + -namespace="non-default" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + + { + "Whole template, auth method, non-default namespace, mirroring enabled, tproxy enabled, non-default partition", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationService] = "web" + return pod + }, + MeshWebhook{ + AuthMethod: "auth-method", + EnableNamespaces: true, + ConsulPartition: "non-default", + ConsulDestinationNamespace: "non-default", // Overridden by mirroring + EnableK8SNSMirroring: true, + EnableTransparentProxy: true, + ConsulAPITimeout: 5 * time.Second, + }, + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ + -service-account-name="web" \ + -service-name="web" \ + -bearer-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token \ + -auth-method-namespace="default" \ + -partition="non-default" \ + -consul-service-namespace="k8snamespace" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -token-file="/consul/connect-inject/acl-token" \ + -partition="non-default" \ + -namespace="k8snamespace" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml + +# Apply traffic redirection rules. +/consul/connect-inject/consul connect redirect-traffic \ + -token-file="/consul/connect-inject/acl-token" \ + -partition="non-default" \ + -namespace="k8snamespace" \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -proxy-uid=5995`, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + w := tt.Webhook + container, err := w.containerInit(testNS, *tt.Pod(minimal()), multiPortInfo{}) + require.NoError(err) + actual := strings.Join(container.Command, " ") + require.Equal(tt.Cmd, actual) + }) + } +} + +func TestHandlerContainerInit_Multiport(t *testing.T) { + minimal := func() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "web,web-admin", + }, + }, + + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "web-admin-service-account", + }, + }, + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-side", + }, + { + Name: "web-admin", + }, + { + Name: "web-admin-side", + }, + { + Name: "auth-method-secret", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "service-account-secret", + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + }, + }, + }, + ServiceAccountName: "web", + }, + } + } + + cases := []struct { + Name string + Pod func(*corev1.Pod) *corev1.Pod + Webhook MeshWebhook + NumInitContainers int + MultiPortInfos []multiPortInfo + Cmd []string // Strings.Contains test + }{ + { + "Whole template, multiport", + func(pod *corev1.Pod) *corev1.Pod { + return pod + }, + MeshWebhook{ConsulAPITimeout: 5 * time.Second}, + 2, + []multiPortInfo{ + { + serviceIndex: 0, + serviceName: "web", + }, + { + serviceIndex: 1, + serviceName: "web-admin", + }, + }, + []string{ + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -multiport=true \ + -proxy-id-file=/consul/connect-inject/proxyid-web \ + -service-name="web" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web)" \ + -admin-bind=127.0.0.1:19000 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web.yaml`, + + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -multiport=true \ + -proxy-id-file=/consul/connect-inject/proxyid-web-admin \ + -service-name="web-admin" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web-admin)" \ + -admin-bind=127.0.0.1:19001 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web-admin.yaml`, + }, + }, + { + "Whole template, multiport, proxy health check", + func(pod *corev1.Pod) *corev1.Pod { + pod.Annotations[annotationUseProxyHealthCheck] = "true" + return pod + }, + MeshWebhook{ConsulAPITimeout: 5 * time.Second}, + 2, + []multiPortInfo{ + { + serviceIndex: 0, + serviceName: "web", + }, + { + serviceIndex: 1, + serviceName: "web-admin", + }, + }, + []string{ + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -multiport=true \ + -proxy-id-file=/consul/connect-inject/proxyid-web \ + -service-name="web" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web)" \ + -envoy-ready-bind-address="${POD_IP}" \ + -envoy-ready-bind-port=21000 \ + -admin-bind=127.0.0.1:19000 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web.yaml`, + + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -multiport=true \ + -proxy-id-file=/consul/connect-inject/proxyid-web-admin \ + -service-name="web-admin" \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web-admin)" \ + -envoy-ready-bind-address="${POD_IP}" \ + -envoy-ready-bind-port=21001 \ + -admin-bind=127.0.0.1:19001 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web-admin.yaml`, + }, + }, + { + "Whole template, multiport, auth method", + func(pod *corev1.Pod) *corev1.Pod { + return pod + }, + MeshWebhook{ + AuthMethod: "auth-method", + ConsulAPITimeout: 5 * time.Second, + }, + 2, + []multiPortInfo{ + { + serviceIndex: 0, + serviceName: "web", + }, + { + serviceIndex: 1, + serviceName: "web-admin", + }, + }, + []string{ + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ + -service-account-name="web" \ + -service-name="web" \ + -bearer-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token \ + -acl-token-sink=/consul/connect-inject/acl-token-web \ + -multiport=true \ + -proxy-id-file=/consul/connect-inject/proxyid-web \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web)" \ + -token-file="/consul/connect-inject/acl-token-web" \ + -admin-bind=127.0.0.1:19000 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web.yaml`, + + `/bin/sh -ec +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502" +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="auth-method" \ + -service-account-name="web-admin" \ + -service-name="web-admin" \ + -bearer-token-file=/consul/serviceaccount-web-admin/token \ + -acl-token-sink=/consul/connect-inject/acl-token-web-admin \ + -multiport=true \ + -proxy-id-file=/consul/connect-inject/proxyid-web-admin \ + +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid-web-admin)" \ + -token-file="/consul/connect-inject/acl-token-web-admin" \ + -admin-bind=127.0.0.1:19001 \ + -bootstrap > /consul/connect-inject/envoy-bootstrap-web-admin.yaml`, + }, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + w := tt.Webhook + for i := 0; i < tt.NumInitContainers; i++ { + container, err := w.containerInit(testNS, *tt.Pod(minimal()), tt.MultiPortInfos[i]) + require.NoError(err) + actual := strings.Join(container.Command, " ") + require.Equal(tt.Cmd[i], actual) + } + }) + } +} + +func TestHandlerContainerInit_authMethod(t *testing.T) { + require := require.New(t) + w := MeshWebhook{ + AuthMethod: "release-name-consul-k8s-auth-method", + ConsulAPITimeout: 5 * time.Second, + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "default-token-podid", + ReadOnly: true, + MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + }, + }, + }, + ServiceAccountName: "foo", + }, + } + container, err := w.containerInit(testNS, *pod, multiPortInfo{}) + require.NoError(err) + actual := strings.Join(container.Command, " ") + require.Contains(actual, ` +consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ + -consul-api-timeout=5s \ + -acl-auth-method="release-name-consul-k8s-auth-method"`) + require.Contains(actual, ` +# Generate the envoy bootstrap code +/consul/connect-inject/consul connect envoy \ + -proxy-id="$(cat /consul/connect-inject/proxyid)" \ + -token-file="/consul/connect-inject/acl-token" \ + -bootstrap > /consul/connect-inject/envoy-bootstrap.yaml`) +} + +// If Consul CA cert is set, +// Consul addresses should use HTTPS +// and CA cert should be set as env variable. +func TestHandlerContainerInit_WithTLS(t *testing.T) { + require := require.New(t) + w := MeshWebhook{ + ConsulCACert: "consul-ca-cert", + ConsulAPITimeout: 5 * time.Second, + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := w.containerInit(testNS, *pod, multiPortInfo{}) + require.NoError(err) + actual := strings.Join(container.Command, " ") + require.Contains(actual, ` +export CONSUL_HTTP_ADDR="https://${HOST_IP}:8501" +export CONSUL_GRPC_ADDR="https://${HOST_IP}:8502" +export CONSUL_CACERT=/consul/connect-inject/consul-ca.pem +cat </consul/connect-inject/consul-ca.pem +consul-ca-cert +EOF`) + require.NotContains(actual, ` +export CONSUL_HTTP_ADDR="${HOST_IP}:8500" +export CONSUL_GRPC_ADDR="${HOST_IP}:8502"`) +} + +func TestHandlerContainerInit_Resources(t *testing.T) { + require := require.New(t) + w := MeshWebhook{ + InitContainerResources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("20m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + }, + ConsulAPITimeout: 5 * time.Second, + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := w.containerInit(testNS, *pod, multiPortInfo{}) + require.NoError(err) + require.Equal(corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("20m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, container.Resources) +} + +// Test that the init copy container has the correct command and SecurityContext. +func TestHandlerInitCopyContainer(t *testing.T) { + openShiftEnabledCases := []bool{false, true} + + for _, openShiftEnabled := range openShiftEnabledCases { + t.Run(fmt.Sprintf("openshift enabled: %t", openShiftEnabled), func(t *testing.T) { + w := MeshWebhook{EnableOpenShift: openShiftEnabled, ConsulAPITimeout: 5 * time.Second} + + container := w.initCopyContainer() + + if openShiftEnabled { + require.Nil(t, container.SecurityContext) + } else { + expectedSecurityContext := &corev1.SecurityContext{ + RunAsUser: pointer.Int64(initContainersUserAndGroupID), + RunAsGroup: pointer.Int64(initContainersUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + } + require.Equal(t, expectedSecurityContext, container.SecurityContext) + } + + actual := strings.Join(container.Command, " ") + require.Contains(t, actual, `cp /bin/consul /consul/connect-inject/consul`) + }) + } +} + +var testNS = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: k8sNamespace, + }, +} diff --git a/control-plane/connect-inject/webhook/container_volume.go b/control-plane/connect-inject/container_volume.go similarity index 82% rename from control-plane/connect-inject/webhook/container_volume.go rename to control-plane/connect-inject/container_volume.go index b33567469b..53ba985f3e 100644 --- a/control-plane/connect-inject/webhook/container_volume.go +++ b/control-plane/connect-inject/container_volume.go @@ -1,4 +1,4 @@ -package webhook +package connectinject import ( corev1 "k8s.io/api/core/v1" @@ -14,7 +14,7 @@ func (w *MeshWebhook) containerVolume() corev1.Volume { return corev1.Volume{ Name: volumeName, VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}, + EmptyDir: &corev1.EmptyDirVolumeSource{}, }, } } diff --git a/control-plane/connect-inject/controllers/endpoints/consul_client_health_checks.go b/control-plane/connect-inject/controllers/endpoints/consul_client_health_checks.go deleted file mode 100644 index f54fb71d11..0000000000 --- a/control-plane/connect-inject/controllers/endpoints/consul_client_health_checks.go +++ /dev/null @@ -1,114 +0,0 @@ -package endpoints - -import ( - "fmt" - - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-server-connection-manager/discovery" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-version" - corev1 "k8s.io/api/core/v1" -) - -const minSupportedConsulDataplaneVersion = "v1.0.0-beta1" - -// isConsulDataplaneSupported returns true if the consul-k8s version on the pod supports -// consul-dataplane architecture of Consul. -func isConsulDataplaneSupported(pod corev1.Pod) bool { - if anno, ok := pod.Annotations[constants.AnnotationConsulK8sVersion]; ok { - consulK8sVersion, err := version.NewVersion(anno) - if err != nil { - // Only consul-k8s v1.0.0+ (including pre-release versions) have the version annotation. So it would be - // reasonable to default to supporting dataplane even if the version is malformed or invalid. - return true - } - consulDPSupportedVersion, err := version.NewVersion(minSupportedConsulDataplaneVersion) - if err != nil { - return false - } - if !consulK8sVersion.LessThan(consulDPSupportedVersion) { - return true - } - } - return false -} - -func (r *Controller) consulClientCfgForNodeAgent(serverClient *api.Client, pod corev1.Pod, state discovery.State) (*api.Config, error) { - ccCfg := &api.Config{ - Scheme: r.ConsulClientConfig.APIClientConfig.Scheme, - } - - consulClientHttpPort := 8500 - if ccCfg.Scheme == "https" { - consulClientHttpPort = 8501 - ccCfg.TLSConfig.CAFile = r.ConsulClientConfig.APIClientConfig.TLSConfig.CAFile - } - if r.consulClientHttpPort != 0 { - consulClientHttpPort = r.consulClientHttpPort - } - ccCfg.Address = fmt.Sprintf("%s:%d", pod.Status.HostIP, consulClientHttpPort) - - ccCfg.Token = state.Token - - // Check if auto-encrypt is enabled. If it is, we need to retrieve and set a different CA for the Consul client. - if r.EnableAutoEncrypt { - // Get Connect CA. - caRoots, _, err := serverClient.Agent().ConnectCARoots(nil) - if err != nil { - return nil, err - } - if caRoots == nil { - return nil, fmt.Errorf("ca root list is nil") - } - if caRoots.Roots == nil { - return nil, fmt.Errorf("ca roots is nil") - } - if len(caRoots.Roots) == 0 { - return nil, fmt.Errorf("the list of root CAs is empty") - } - - for _, root := range caRoots.Roots { - if root.Active { - ccCfg.TLSConfig.CAFile = "" - ccCfg.TLSConfig.CAPem = []byte(root.RootCertPEM) - break - } - } - } - if r.EnableConsulNamespaces { - ccCfg.Namespace = r.consulNamespace(pod.Namespace) - } - return ccCfg, nil -} - -func (r *Controller) updateHealthCheckOnConsulClient(consulClientCfg *api.Config, pod corev1.Pod, endpoints corev1.Endpoints, status string) error { - consulClient, err := consul.NewClient(consulClientCfg, r.ConsulClientConfig.APITimeout) - if err != nil { - return err - } - filter := fmt.Sprintf(`Name == "Kubernetes Health Check" and ServiceID == %q`, serviceID(pod, endpoints)) - checks, err := consulClient.Agent().ChecksWithFilter(filter) - if err != nil { - return err - } - if len(checks) > 1 { - return fmt.Errorf("more than one Kubernetes health check found") - } - if len(checks) == 0 { - r.Log.Info("detected no health checks to update", "name", endpoints.Name, "ns", endpoints.Namespace, "service-id", serviceID(pod, endpoints)) - return nil - } - for checkID := range checks { - output := "Kubernetes health checks passing" - if status == api.HealthCritical { - output = fmt.Sprintf(`Pod "%s/%s" is not ready`, pod.Namespace, pod.Name) - } - r.Log.Info("updating health check status", "name", endpoints.Name, "ns", endpoints.Namespace, "status", status) - err = consulClient.Agent().UpdateTTL(checkID, output, status) - if err != nil { - return err - } - } - return nil -} diff --git a/control-plane/connect-inject/controllers/endpoints/consul_client_health_checks_test.go b/control-plane/connect-inject/controllers/endpoints/consul_client_health_checks_test.go deleted file mode 100644 index 189587106d..0000000000 --- a/control-plane/connect-inject/controllers/endpoints/consul_client_health_checks_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package endpoints - -import ( - "testing" - - logrtest "github.com/go-logr/logr/testing" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" - "github.com/hashicorp/consul-server-connection-manager/discovery" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestIsConsulDataplaneSupported(t *testing.T) { - versions := map[string]struct { - expIsConsulDataplaneSupported bool - }{ - "": {false}, - "v1.0.0": {true}, - "1.0.0": {true}, - "v0.49.0": {false}, - "0.49.0-beta2": {false}, - "0.49.2": {false}, - "v1.0.0-beta1": {true}, - "v1.0.0-beta3": {true}, - "v1.1.0-beta1": {true}, - "v1.0.0-dev": {true}, - "v1.0.0-dev (abcdef)": {true}, - "v1.0.0-dev+abcdef": {true}, - "invalid": {true}, - } - - for version, c := range versions { - t.Run(version, func(t *testing.T) { - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - Annotations: map[string]string{}, - }, - } - if version != "" { - pod.ObjectMeta.Annotations[constants.AnnotationConsulK8sVersion] = version - } - - require.Equal(t, c.expIsConsulDataplaneSupported, isConsulDataplaneSupported(pod)) - }) - } -} - -func TestConsulClientForNodeAgent(t *testing.T) { - cases := map[string]struct { - tls bool - autoEncrypt bool - enableNamespaces bool - }{ - "no tls and auto-encrypt": {}, - "with tls but no auto-encrypt": {tls: true}, - "with tls and auto-encrypt": {tls: true, autoEncrypt: true}, - "with namespaces": {enableNamespaces: true}, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - // Create test Consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - c.Connect["enabled"] = true - }) - testClient.TestServer.WaitForActiveCARoot(t) - if c.tls { - testClient.Cfg.APIClientConfig.Scheme = "https" - } - - ctrl := Controller{ - ConsulClientConfig: testClient.Cfg, - EnableConsulNamespaces: c.enableNamespaces, - // We are only testing with mirroring enabled because other cases are tested elsewhere. - EnableNSMirroring: true, - EnableAutoEncrypt: c.autoEncrypt, - } - - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "test-ns", - }, - Status: corev1.PodStatus{ - HostIP: "1.2.3.4", - }, - } - - ccCfg, err := ctrl.consulClientCfgForNodeAgent(testClient.APIClient, pod, discovery.State{Token: "test-token"}) - require.NoError(t, err) - require.Equal(t, "test-token", ccCfg.Token) - if c.tls { - require.Equal(t, "https", ccCfg.Scheme) - require.Equal(t, "1.2.3.4:8501", ccCfg.Address) - require.Empty(t, ccCfg.TLSConfig.Address) - if c.autoEncrypt { - caRoots, _, err := testClient.APIClient.Agent().ConnectCARoots(nil) - require.NoError(t, err) - require.Equal(t, []byte(caRoots.Roots[0].RootCertPEM), ccCfg.TLSConfig.CAPem) - } - } else { - require.Equal(t, ccCfg.Address, "1.2.3.4:8500") - } - - if c.enableNamespaces { - require.Equal(t, "test-ns", ccCfg.Namespace) - } - }) - } -} - -func TestUpdateHealthCheckOnConsulClient(t *testing.T) { - cases := map[string]struct { - checks []*api.AgentServiceCheck - updateToStatus string - expError string - }{ - "service with one existing kubernetes health check becoming unhealthy": { - checks: []*api.AgentServiceCheck{ - { - CheckID: "default/test-pod-test-service/kubernetes-health-check", - Name: "Kubernetes Health Check", - TTL: "100000h", - Status: api.HealthPassing, - SuccessBeforePassing: 1, - FailuresBeforeCritical: 1, - }, - }, - updateToStatus: api.HealthCritical, - }, - "service with one existing kubernetes health check becoming healthy": { - checks: []*api.AgentServiceCheck{ - { - CheckID: "default/test-pod-test-service/kubernetes-health-check", - Name: "Kubernetes Health Check", - TTL: "100000h", - Status: api.HealthCritical, - SuccessBeforePassing: 1, - FailuresBeforeCritical: 1, - }, - }, - updateToStatus: api.HealthPassing, - }, - "service without health check is a no-op": { - checks: nil, - updateToStatus: api.HealthPassing, - }, - "service with more than one existing kubernetes health check becoming healthy": { - checks: []*api.AgentServiceCheck{ - { - CheckID: "default/test-pod-test-service/kubernetes-health-check", - Name: "Kubernetes Health Check", - TTL: "100000h", - Status: api.HealthCritical, - SuccessBeforePassing: 1, - FailuresBeforeCritical: 1, - }, - { - CheckID: "default/test-pod-test-service/kubernetes-health-check-2", - Name: "Kubernetes Health Check", - TTL: "100000h", - Status: api.HealthPassing, - SuccessBeforePassing: 1, - FailuresBeforeCritical: 1, - }, - }, - updateToStatus: api.HealthPassing, - expError: "more than one Kubernetes health check found", - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient - - consulSvcs := []*api.AgentServiceRegistration{ - { - ID: "test-pod-test-service", - Name: "test-service", - Port: 80, - Address: "1.2.3.4", - Checks: c.checks, - }, - { - Kind: api.ServiceKindConnectProxy, - ID: "test-pod-test-service-sidecar-proxy", - Name: "test-service-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "test-service", - DestinationServiceID: "test-pod-test-service", - }, - }, - } - - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "default", - }, - Status: corev1.PodStatus{ - PodIP: "1.2.3.4", - }, - } - endpoints := corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-service", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - NotReadyAddresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "test-pod", - Namespace: "default", - }, - }, - }, - }, - }, - } - - for _, svc := range consulSvcs { - err := consulClient.Agent().ServiceRegister(svc) - require.NoError(t, err) - } - - ctrl := Controller{ - ConsulClientConfig: testClient.Cfg, - Log: logrtest.TestLogger{T: t}, - } - - err := ctrl.updateHealthCheckOnConsulClient(testClient.Cfg.APIClientConfig, pod, endpoints, c.updateToStatus) - if c.expError == "" { - require.NoError(t, err) - status, agentHealthInfo, err := consulClient.Agent().AgentHealthServiceByName("test-service") - require.NoError(t, err) - if c.checks != nil { - require.NotEmpty(t, agentHealthInfo) - require.Equal(t, c.updateToStatus, status) - } else { - require.Empty(t, agentHealthInfo[0].Checks) - } - } else { - require.EqualError(t, err, c.expError) - } - }) - } - -} diff --git a/control-plane/connect-inject/controllers/endpoints/endpoints_controller_ent_test.go b/control-plane/connect-inject/controllers/endpoints/endpoints_controller_ent_test.go deleted file mode 100644 index 4303bafbc8..0000000000 --- a/control-plane/connect-inject/controllers/endpoints/endpoints_controller_ent_test.go +++ /dev/null @@ -1,2176 +0,0 @@ -//go:build enterprise - -package endpoints - -import ( - "context" - "fmt" - "testing" - - mapset "github.com/deckarep/golang-set" - logrtest "github.com/go-logr/logr/testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" - "github.com/hashicorp/consul-k8s/control-plane/namespaces" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -// TestReconcileCreateEndpoint tests the logic to create service instances in Consul from the addresses in the Endpoints -// object. The cases test a basic endpoints object with two addresses. This test verifies that the services and their TTL -// health checks are created in the expected Consul namespace for various combinations of namespace flags. -// This test covers Controller.createServiceRegistrations. -func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { - t.Parallel() - cases := map[string]struct { - Mirror bool - MirrorPrefix string - SourceKubeNS string - DestConsulNS string - ExpConsulNS string - }{ - "SourceKubeNS=default, DestConsulNS=default": { - SourceKubeNS: "default", - DestConsulNS: "default", - ExpConsulNS: "default", - }, - "SourceKubeNS=kube, DestConsulNS=default": { - SourceKubeNS: "kube", - DestConsulNS: "default", - ExpConsulNS: "default", - }, - "SourceKubeNS=default, DestConsulNS=other": { - SourceKubeNS: "default", - DestConsulNS: "other", - ExpConsulNS: "other", - }, - "SourceKubeNS=kube, DestConsulNS=other": { - SourceKubeNS: "kube", - DestConsulNS: "other", - ExpConsulNS: "other", - }, - "SourceKubeNS=default, Mirror=true": { - SourceKubeNS: "default", - Mirror: true, - ExpConsulNS: "default", - }, - "SourceKubeNS=kube, Mirror=true": { - SourceKubeNS: "kube", - Mirror: true, - ExpConsulNS: "kube", - }, - "SourceKubeNS=default, Mirror=true, Prefix=prefix": { - SourceKubeNS: "default", - Mirror: true, - MirrorPrefix: "prefix-", - ExpConsulNS: "prefix-default", - }, - } - for name, testCase := range cases { - setup := struct { - consulSvcName string - k8sObjects func() []runtime.Object - expectedConsulSvcInstances []*api.CatalogService - expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck - }{ - consulSvcName: "service-created", - k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", testCase.SourceKubeNS, "1.2.3.4", true, true) - pod2 := createPodWithNamespace("pod2", testCase.SourceKubeNS, "2.2.3.4", true, true) - endpoints := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-created", - Namespace: testCase.SourceKubeNS, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: testCase.SourceKubeNS, - }, - }, - { - IP: "2.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod2", - Namespace: testCase.SourceKubeNS, - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, pod2, endpoints} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-created", - ServiceName: "service-created", - ServiceAddress: "1.2.3.4", - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: testCase.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - Namespace: testCase.ExpConsulNS, - }, - { - ServiceID: "pod2-service-created", - ServiceName: "service-created", - ServiceAddress: "2.2.3.4", - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod2", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: testCase.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - Namespace: testCase.ExpConsulNS, - }, - }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceAddress: "1.2.3.4", - ServicePort: 20000, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-created", - DestinationServiceID: "pod1-service-created", - }, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: testCase.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - Namespace: testCase.ExpConsulNS, - }, - { - ServiceID: "pod2-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceAddress: "2.2.3.4", - ServicePort: 20000, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-created", - DestinationServiceID: "pod2-service-created", - }, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod2", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: testCase.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - Namespace: testCase.ExpConsulNS, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: fmt.Sprintf("%s/pod1-service-created", testCase.SourceKubeNS), - ServiceName: "service-created", - ServiceID: "pod1-service-created", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - Namespace: testCase.ExpConsulNS, - }, - { - CheckID: fmt.Sprintf("%s/pod1-service-created-sidecar-proxy", testCase.SourceKubeNS), - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - Namespace: testCase.ExpConsulNS, - }, - { - CheckID: fmt.Sprintf("%s/pod2-service-created", testCase.SourceKubeNS), - ServiceName: "service-created", - ServiceID: "pod2-service-created", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - Namespace: testCase.ExpConsulNS, - }, - { - CheckID: fmt.Sprintf("%s/pod2-service-created-sidecar-proxy", testCase.SourceKubeNS), - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod2-service-created-sidecar-proxy", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - Namespace: testCase.ExpConsulNS, - }, - }, - } - t.Run(name, func(t *testing.T) { - // Add the pods namespace. - ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testCase.SourceKubeNS}} - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - // Create fake k8s client. - k8sObjects := append(setup.k8sObjects(), &ns, &node) - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - - // Create test consulServer server - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - - _, err := namespaces.EnsureExists(testClient.APIClient, testCase.ExpConsulNS, "") - require.NoError(t, err) - - // Create the endpoints controller. - ep := &Controller{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - EnableConsulNamespaces: true, - ConsulDestinationNamespace: testCase.DestConsulNS, - EnableNSMirroring: testCase.Mirror, - NSMirroringPrefix: testCase.MirrorPrefix, - } - namespacedName := types.NamespacedName{ - Namespace: testCase.SourceKubeNS, - Name: "service-created", - } - - resp, err := ep.Reconcile(context.Background(), ctrl.Request{ - NamespacedName: namespacedName, - }) - require.NoError(t, err) - require.False(t, resp.Requeue) - - consulConfig := testClient.Cfg - consulConfig.APIClientConfig.Namespace = testCase.ExpConsulNS - consulClient, err := api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - - // After reconciliation, Consul should have the service with the correct number of instances. - serviceInstances, _, err := consulClient.Catalog().Service(setup.consulSvcName, "", &api.QueryOptions{Namespace: testCase.ExpConsulNS}) - require.NoError(t, err) - require.Len(t, serviceInstances, len(setup.expectedConsulSvcInstances)) - for i, instance := range serviceInstances { - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTaggedAddresses, instance.ServiceTaggedAddresses) - } - proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", setup.consulSvcName), "", &api.QueryOptions{ - Namespace: testCase.ExpConsulNS, - }) - require.NoError(t, err) - require.Len(t, proxyServiceInstances, len(setup.expectedProxySvcInstances)) - for i, instance := range proxyServiceInstances { - require.Equal(t, setup.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, setup.expectedProxySvcInstances[i].ServiceName, instance.ServiceName) - require.Equal(t, setup.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) - require.Equal(t, setup.expectedProxySvcInstances[i].ServicePort, instance.ServicePort) - require.Equal(t, setup.expectedProxySvcInstances[i].ServiceProxy, instance.ServiceProxy) - require.Equal(t, setup.expectedProxySvcInstances[i].ServiceMeta, instance.ServiceMeta) - require.Equal(t, setup.expectedProxySvcInstances[i].ServiceTags, instance.ServiceTags) - } - - // Check that the Consul health checks was created for the k8s pod. - for _, expectedCheck := range setup.expectedHealthChecks { - var checks api.HealthChecks - filter := fmt.Sprintf("CheckID == `%s`", expectedCheck.CheckID) - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, &api.QueryOptions{Filter: filter}) - require.NoError(t, err) - require.Equal(t, len(checks), 1) - var ignoredFields = []string{"Node", "Definition", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) - } - }) - } -} - -// TestReconcileCreateGatewayWithNamespaces verifies that gateways created using -// the Endpoints Controller with Consul namespaces are correct. -func TestReconcileCreateGatewayWithNamespaces(t *testing.T) { - t.Parallel() - cases := map[string]struct { - ConsulNS string - }{ - "default Consul namespace": { - ConsulNS: "default", - }, - "other Consul namespace": { - ConsulNS: "other", - }, - } - for name, testCase := range cases { - setup := struct { - k8sObjects func() []runtime.Object - expectedConsulSvcInstances []*api.CatalogService - expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck - }{ - k8sObjects: func() []runtime.Object { - meshGateway := createGatewayWithNamespace("mesh-gateway", "default", "3.3.3.3", map[string]string{ - constants.AnnotationGatewayWANSource: "Static", - constants.AnnotationGatewayWANAddress: "2.3.4.5", - constants.AnnotationGatewayWANPort: "443", - constants.AnnotationMeshGatewayContainerPort: "8443", - constants.AnnotationGatewayKind: meshGateway, - constants.AnnotationGatewayConsulServiceName: "mesh-gateway"}) - terminatingGateway := createGatewayWithNamespace("terminating-gateway", "default", "4.4.4.4", map[string]string{ - constants.AnnotationGatewayKind: terminatingGateway, - constants.AnnotationGatewayNamespace: testCase.ConsulNS, - constants.AnnotationGatewayConsulServiceName: "terminating-gateway"}) - ingressGateway := createGatewayWithNamespace("ingress-gateway", "default", "5.5.5.5", map[string]string{ - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANPort: "8443", - constants.AnnotationGatewayNamespace: testCase.ConsulNS, - constants.AnnotationGatewayKind: ingressGateway, - constants.AnnotationGatewayConsulServiceName: "ingress-gateway"}) - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "5.6.7.8", - }, - }, - }, - }, - } - endpoints := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "3.3.3.3", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "mesh-gateway", - Namespace: "default", - }, - }, - { - IP: "4.4.4.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "terminating-gateway", - Namespace: "default", - }, - }, - { - IP: "5.5.5.5", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "ingress-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - return []runtime.Object{meshGateway, terminatingGateway, ingressGateway, svc, endpoints} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "mesh-gateway", - ServiceName: "mesh-gateway", - ServiceAddress: "3.3.3.3", - ServiceMeta: map[string]string{constants.MetaKeyPodName: "mesh-gateway", metaKeyKubeServiceName: "gateway", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServicePort: 8443, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "3.3.3.3", - Port: 8443, - }, - "wan": { - Address: "2.3.4.5", - Port: 443, - }, - }, - Namespace: "default", - }, - { - ServiceID: "terminating-gateway", - ServiceName: "terminating-gateway", - ServiceAddress: "4.4.4.4", - ServiceMeta: map[string]string{constants.MetaKeyPodName: "terminating-gateway", metaKeyKubeServiceName: "gateway", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServicePort: 8443, - Namespace: testCase.ConsulNS, - }, - { - ServiceID: "ingress-gateway", - ServiceName: "ingress-gateway", - ServiceAddress: "5.5.5.5", - ServiceMeta: map[string]string{constants.MetaKeyPodName: "ingress-gateway", metaKeyKubeServiceName: "gateway", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServicePort: 21000, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "5.5.5.5", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8443, - }, - }, - Namespace: testCase.ConsulNS, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/mesh-gateway", - ServiceName: "mesh-gateway", - ServiceID: "mesh-gateway", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - Namespace: "default", - }, - { - CheckID: "default/terminating-gateway", - ServiceName: "terminating-gateway", - ServiceID: "terminating-gateway", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - Namespace: testCase.ConsulNS, - }, - { - CheckID: "default/ingress-gateway", - ServiceName: "ingress-gateway", - ServiceID: "ingress-gateway", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - Namespace: testCase.ConsulNS, - }, - }, - } - t.Run(name, func(t *testing.T) { - // Create fake k8s client. - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(append(setup.k8sObjects(), &node)...).Build() - - // Create testCase Consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient - _, err := namespaces.EnsureExists(consulClient, testCase.ConsulNS, "") - require.NoError(t, err) - - // Create the endpoints controller. - ep := &Controller{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - EnableConsulNamespaces: true, - } - namespacedName := types.NamespacedName{ - Namespace: "default", - Name: "gateway", - } - - resp, err := ep.Reconcile(context.Background(), ctrl.Request{ - NamespacedName: namespacedName, - }) - require.NoError(t, err) - require.False(t, resp.Requeue) - - // After reconciliation, Consul should have the service with the correct number of instances. - var serviceInstances []*api.CatalogService - for _, expected := range setup.expectedConsulSvcInstances { - serviceInstance, _, err := consulClient.Catalog().Service(expected.ServiceName, "", &api.QueryOptions{Namespace: expected.Namespace}) - require.NoError(t, err) - serviceInstances = append(serviceInstances, serviceInstance...) - } - - require.Len(t, serviceInstances, len(setup.expectedConsulSvcInstances)) - for i, instance := range serviceInstances { - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) - require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTaggedAddresses, instance.ServiceTaggedAddresses) - } - - // Check that the Consul health checks was created for the k8s pod. - for _, expectedCheck := range setup.expectedHealthChecks { - var checks api.HealthChecks - filter := fmt.Sprintf("CheckID == `%s`", expectedCheck.CheckID) - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, &api.QueryOptions{Filter: filter, Namespace: expectedCheck.Namespace}) - require.NoError(t, err) - require.Equal(t, len(checks), 1) - var ignoredFields = []string{"Node", "Definition", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) - } - }) - } -} - -// Tests updating an Endpoints object when Consul namespaces are enabled. -// - Tests updates via the register codepath: -// - When an address in an Endpoint is updated, that the corresponding service instance in Consul is updated in the correct Consul namespace. -// - When an address is added to an Endpoint, an additional service instance in Consul is registered in the correct Consul namespace. -// - Tests updates via the deregister codepath: -// - When an address is removed from an Endpoint, the corresponding service instance in Consul is deregistered. -// - When an address is removed from an Endpoint *and there are no addresses left in the Endpoint*, the -// corresponding service instance in Consul is deregistered. -// -// For the register and deregister codepath, this also tests that they work when the Consul service name is different -// from the K8s service name. -// This test covers Controller.deregisterService when services should be selectively deregistered -// since the map will not be nil. -func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { - t.Parallel() - nsCases := map[string]struct { - Mirror bool - MirrorPrefix string - SourceKubeNS string - DestConsulNS string - ExpConsulNS string - }{ - "SourceKubeNS=default, DestConsulNS=default": { - SourceKubeNS: "default", - DestConsulNS: "default", - ExpConsulNS: "default", - }, - "SourceKubeNS=kube, DestConsulNS=default": { - SourceKubeNS: "kube", - DestConsulNS: "default", - ExpConsulNS: "default", - }, - "SourceKubeNS=default, DestConsulNS=other": { - SourceKubeNS: "default", - DestConsulNS: "other", - ExpConsulNS: "other", - }, - "SourceKubeNS=kube, DestConsulNS=other": { - SourceKubeNS: "kube", - DestConsulNS: "other", - ExpConsulNS: "other", - }, - "SourceKubeNS=default, Mirror=true": { - SourceKubeNS: "default", - Mirror: true, - ExpConsulNS: "default", - }, - "SourceKubeNS=kube, Mirror=true": { - SourceKubeNS: "kube", - Mirror: true, - ExpConsulNS: "kube", - }, - "SourceKubeNS=default, Mirror=true, Prefix=prefix": { - SourceKubeNS: "default", - Mirror: true, - MirrorPrefix: "prefix-", - ExpConsulNS: "prefix-default", - }, - } - for name, ts := range nsCases { - cases := []struct { - name string - consulSvcName string - k8sObjects func() []runtime.Object - initialConsulSvcs []*api.CatalogRegistration - expectedConsulSvcInstances []*api.CatalogService - expectedProxySvcInstances []*api.CatalogService - enableACLs bool - }{ - { - name: "Endpoints has an updated address (pod IP change).", - consulSvcName: "service-updated", - k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "4.4.4.4", true, true) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "4.4.4.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: ts.SourceKubeNS, - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-updated", - ServiceAddress: "4.4.4.4", - Namespace: ts.ExpConsulNS, - }, - }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceAddress: "4.4.4.4", - Namespace: ts.ExpConsulNS, - }, - }, - }, - { - name: "Different Consul service name: Endpoints has an updated address (pod IP change).", - consulSvcName: "different-consul-svc-name", - k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "4.4.4.4", true, true) - pod1.Annotations[constants.AnnotationService] = "different-consul-svc-name" - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "4.4.4.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: ts.SourceKubeNS, - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-different-consul-svc-name", - ServiceAddress: "4.4.4.4", - Namespace: ts.ExpConsulNS, - }, - }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", - ServiceAddress: "4.4.4.4", - Namespace: ts.ExpConsulNS, - }, - }, - }, - { - name: "Endpoints has additional address not in Consul.", - consulSvcName: "service-updated", - k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, true) - pod2 := createPodWithNamespace("pod2", ts.SourceKubeNS, "2.2.3.4", true, true) - endpointWithTwoAddresses := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: ts.SourceKubeNS, - }, - }, - { - IP: "2.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod2", - Namespace: ts.SourceKubeNS, - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-updated", - ServiceAddress: "1.2.3.4", - Namespace: ts.ExpConsulNS, - }, - { - ServiceID: "pod2-service-updated", - ServiceAddress: "2.2.3.4", - Namespace: ts.ExpConsulNS, - }, - }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceAddress: "1.2.3.4", - Namespace: ts.ExpConsulNS, - }, - { - ServiceID: "pod2-service-updated-sidecar-proxy", - ServiceAddress: "2.2.3.4", - Namespace: ts.ExpConsulNS, - }, - }, - }, - { - name: "Consul has instances that are not in the Endpoints addresses", - consulSvcName: "service-updated", - k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, true) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: ts.SourceKubeNS, - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-updated", - ServiceAddress: "1.2.3.4", - Namespace: ts.ExpConsulNS, - }, - }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceAddress: "1.2.3.4", - Namespace: ts.ExpConsulNS, - }, - }, - }, - { - name: "Different Consul service name: Consul has instances that are not in the Endpoints addresses", - consulSvcName: "different-consul-svc-name", - k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationService] = "different-consul-svc-name" - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: ts.SourceKubeNS, - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod2-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-different-consul-svc-name", - ServiceAddress: "1.2.3.4", - Namespace: ts.ExpConsulNS, - }, - }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", - ServiceAddress: "1.2.3.4", - Namespace: ts.ExpConsulNS, - }, - }, - }, - { - // When a k8s deployment is deleted but it's k8s service continues to exist, the endpoints has no addresses - // and the instances should be deleted from Consul. - name: "Consul has instances that are not in the endpoints, and the endpoints has no addresses.", - consulSvcName: "service-updated", - k8sObjects: func() []runtime.Object { - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - } - return []runtime.Object{endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{}, - expectedProxySvcInstances: []*api.CatalogService{}, - }, - { - // With a different Consul service name, when a k8s deployment is deleted but it's k8s service continues to - // exist, the endpoints has no addresses and the instances should be deleted from Consul. - name: "Different Consul service name: Consul has instances that are not in the endpoints, and the endpoints has no addresses.", - consulSvcName: "different-consul-svc-name", - k8sObjects: func() []runtime.Object { - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - } - return []runtime.Object{endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod2-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{}, - expectedProxySvcInstances: []*api.CatalogService{}, - }, - { - name: "ACLs enabled: Endpoints has an updated address because the target pod changes", - consulSvcName: "service-updated", - k8sObjects: func() []runtime.Object { - pod2 := createPodWithNamespace("pod2", ts.SourceKubeNS, "4.4.4.4", true, true) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "4.4.4.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod2", - Namespace: ts.SourceKubeNS, - }, - }, - }, - }, - }, - } - return []runtime.Object{pod2, endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyManagedBy: constants.ManagedByValue, - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyPodName: "pod1", - constants.MetaKeyKubeNS: ts.SourceKubeNS, - metaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{ - metaKeyManagedBy: constants.ManagedByValue, - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyPodName: "pod1", - constants.MetaKeyKubeNS: ts.SourceKubeNS, - metaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod2-service-updated", - ServiceAddress: "4.4.4.4", - Namespace: ts.ExpConsulNS, - }, - }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod2-service-updated-sidecar-proxy", - ServiceAddress: "4.4.4.4", - Namespace: ts.ExpConsulNS, - }, - }, - enableACLs: true, - }, - { - name: "ACLs enabled: Consul has instances that are not in the Endpoints addresses", - consulSvcName: "service-updated", - k8sObjects: func() []runtime.Object { - pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, true) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: ts.SourceKubeNS, - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: ts.SourceKubeNS, - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: ts.SourceKubeNS, - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "pod1", - metaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: ts.SourceKubeNS, - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "pod1", - metaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: ts.SourceKubeNS, - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "pod2", - metaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: ts.SourceKubeNS, - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "pod2", - metaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-updated", - ServiceAddress: "1.2.3.4", - Namespace: ts.ExpConsulNS, - }, - }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceAddress: "1.2.3.4", - Namespace: ts.ExpConsulNS, - }, - }, - enableACLs: true, - }, - } - for _, tt := range cases { - t.Run(fmt.Sprintf("%s: %s", name, tt.name), func(t *testing.T) { - // Add the pods namespace. - ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ts.SourceKubeNS}} - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - // Create fake k8s client. - k8sObjects := append(tt.k8sObjects(), &ns, &node) - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - - // Create test consulServer server - adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs - c.ACL.Tokens.InitialManagement = adminToken - } - }) - - consulClient := testClient.APIClient - - _, err := namespaces.EnsureExists(consulClient, ts.ExpConsulNS, "") - require.NoError(t, err) - - // Holds token accessorID for each service ID. - tokensForServices := make(map[string]string) - - // Register service and proxy in Consul. - for _, svc := range tt.initialConsulSvcs { - _, err = consulClient.Catalog().Register(svc, nil) - require.NoError(t, err) - // Create a token for this service if ACLs are enabled. - if tt.enableACLs { - if svc.Service.Kind != api.ServiceKindConnectProxy { - var writeOpts api.WriteOptions - // When mirroring is enabled, the auth method will be created in the "default" Consul namespace. - if ts.Mirror { - writeOpts.Namespace = "default" - } else { - writeOpts.Namespace = ts.ExpConsulNS - } - test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Service.Service, svc.Service.Meta[constants.MetaKeyKubeNS], ts.ExpConsulNS, ts.Mirror, ts.MirrorPrefix) - token, _, err := consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - tokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Service.Meta[constants.MetaKeyKubeNS], svc.Service.Meta[constants.MetaKeyPodName]), - }, - }, &writeOpts) - - require.NoError(t, err) - - tokensForServices[svc.ID] = token.AccessorID - - // Create another token for the same service but a pod that either no longer exists - // or the endpoints controller doesn't know about it yet. - // This is to test a scenario with either orphaned tokens - // or tokens for services that haven't yet been registered with Consul. - // In that case, we have a token for the pod but the service instance - // for that pod either no longer exists or is not yet registered in Consul. - // This token should not be deleted. - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - tokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Service.Meta[constants.MetaKeyKubeNS], "does-not-exist"), - }, - }, &writeOpts) - require.NoError(t, err) - tokensForServices["does-not-exist"+svc.Service.Service] = token.AccessorID - } - } - } - - // Create the endpoints controller. - ep := &Controller{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - EnableConsulNamespaces: true, - EnableNSMirroring: ts.Mirror, - NSMirroringPrefix: ts.MirrorPrefix, - ConsulDestinationNamespace: ts.DestConsulNS, - } - if tt.enableACLs { - ep.AuthMethod = test.AuthMethod - } - namespacedName := types.NamespacedName{ - Namespace: ts.SourceKubeNS, - Name: "service-updated", - } - - resp, err := ep.Reconcile(context.Background(), ctrl.Request{ - NamespacedName: namespacedName, - }) - require.NoError(t, err) - require.False(t, resp.Requeue) - - // Create new consul client with the expected consul ns so we can make calls for assertions. - consulConfig := testClient.Cfg - consulConfig.APIClientConfig.Namespace = ts.ExpConsulNS - consulClient, err = api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - - // After reconciliation, Consul should have service-updated with the correct number of instances. - serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) - require.NoError(t, err) - require.Len(t, serviceInstances, len(tt.expectedProxySvcInstances)) - for i, instance := range serviceInstances { - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) - } - proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) - require.NoError(t, err) - require.Len(t, proxyServiceInstances, len(tt.expectedProxySvcInstances)) - for i, instance := range proxyServiceInstances { - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) - } - - if tt.enableACLs { - // Put expected services into a map to make it easier to find service IDs. - expectedServices := mapset.NewSet() - for _, svc := range tt.expectedConsulSvcInstances { - expectedServices.Add(svc.ServiceID) - } - - initialServices := mapset.NewSet() - for _, svc := range tt.initialConsulSvcs { - initialServices.Add(svc.ID) - } - - // We only care about a case when services are deregistered, where - // the set of initial services is bigger than the set of expected services. - deregisteredServices := initialServices.Difference(expectedServices) - - // Look through the tokens we've created and check that only - // tokens for the deregistered services have been deleted. - for serviceID, tokenID := range tokensForServices { - // Read the token from Consul. - token, _, err := consulClient.ACL().TokenRead(tokenID, nil) - if deregisteredServices.Contains(serviceID) { - require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") - } else { - require.NoError(t, err, "token should exist for service instance: "+serviceID) - require.NotNil(t, token) - } - } - } - }) - } - } -} - -// Tests deleting an Endpoints object, with and without matching Consul and K8s service names when Consul namespaces are enabled. -// This test covers Controller.deregisterService when the map is nil (not selectively deregistered). -func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { - t.Parallel() - cases := map[string]struct { - Mirror bool - MirrorPrefix string - SourceKubeNS string - DestConsulNS string - ExpConsulNS string - }{ - "SourceKubeNS=default, DestConsulNS=default": { - SourceKubeNS: "default", - DestConsulNS: "default", - ExpConsulNS: "default", - }, - "SourceKubeNS=kube, DestConsulNS=default": { - SourceKubeNS: "kube", - DestConsulNS: "default", - ExpConsulNS: "default", - }, - "SourceKubeNS=default, DestConsulNS=other": { - SourceKubeNS: "default", - DestConsulNS: "other", - ExpConsulNS: "other", - }, - "SourceKubeNS=kube, DestConsulNS=other": { - SourceKubeNS: "kube", - DestConsulNS: "other", - ExpConsulNS: "other", - }, - "SourceKubeNS=default, Mirror=true": { - SourceKubeNS: "default", - Mirror: true, - ExpConsulNS: "default", - }, - "SourceKubeNS=kube, Mirror=true": { - SourceKubeNS: "kube", - Mirror: true, - ExpConsulNS: "kube", - }, - "SourceKubeNS=default, Mirror=true, Prefix=prefix": { - SourceKubeNS: "default", - Mirror: true, - MirrorPrefix: "prefix-", - ExpConsulNS: "prefix-default", - }, - } - for name, ts := range cases { - cases := []struct { - name string - consulSvcName string - initialConsulSvcs []*api.AgentService - enableACLs bool - }{ - { - name: "Consul service name matches K8s service name", - consulSvcName: "service-deleted", - initialConsulSvcs: []*api.AgentService{ - { - ID: "pod1-service-deleted", - Service: "service-deleted", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - { - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-deleted", - DestinationServiceID: "pod1-service-deleted", - }, - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - }, - { - name: "Consul service name does not match K8s service name", - consulSvcName: "different-consul-svc-name", - initialConsulSvcs: []*api.AgentService{ - { - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - { - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - TransparentProxy: &api.TransparentProxyConfig{}, - }, - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, metaKeyManagedBy: constants.ManagedByValue}, - Namespace: ts.ExpConsulNS, - }, - }, - }, - { - name: "When ACLs are enabled, the ACL token should be deleted", - consulSvcName: "service-deleted", - initialConsulSvcs: []*api.AgentService{ - { - ID: "pod1-service-deleted", - Service: "service-deleted", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: ts.SourceKubeNS, - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "pod1", - }, - Namespace: ts.ExpConsulNS, - }, - { - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-deleted", - DestinationServiceID: "pod1-service-deleted", - }, - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: ts.SourceKubeNS, - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "pod1", - metaKeySyntheticNode: "true", - }, - Namespace: ts.ExpConsulNS, - }, - }, - enableACLs: true, - }, - } - for _, tt := range cases { - t.Run(fmt.Sprintf("%s:%s", name, tt.name), func(t *testing.T) { - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - // Create fake k8s client. - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(&node).Build() - - // Create test consulServer server - adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs - c.ACL.Tokens.InitialManagement = adminToken - } - }) - consulClient := testClient.APIClient - - _, err := namespaces.EnsureExists(consulClient, ts.ExpConsulNS, "") - require.NoError(t, err) - - // Register service and proxy in consul. - var token *api.ACLToken - for _, svc := range tt.initialConsulSvcs { - serviceRegistration := &api.CatalogRegistration{ - Node: consulNodeName, - Address: consulNodeAddress, - Service: svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) - // Create a token for it if ACLs are enabled. - if tt.enableACLs { - var writeOpts api.WriteOptions - // When mirroring is enabled, the auth method will be created in the "default" Consul namespace. - if ts.Mirror { - writeOpts.Namespace = "default" - } else { - writeOpts.Namespace = ts.ExpConsulNS - } - test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Service, svc.Meta[constants.MetaKeyKubeNS], ts.ExpConsulNS, ts.Mirror, ts.MirrorPrefix) - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - tokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[constants.MetaKeyKubeNS], svc.Meta[constants.MetaKeyPodName]), - }, - }, &writeOpts) - - require.NoError(t, err) - } - } - - // Create the endpoints controller. - ep := &Controller{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - EnableConsulNamespaces: true, - EnableNSMirroring: ts.Mirror, - NSMirroringPrefix: ts.MirrorPrefix, - ConsulDestinationNamespace: ts.DestConsulNS, - } - if tt.enableACLs { - ep.AuthMethod = test.AuthMethod - } - - // Set up the Endpoint that will be reconciled, and reconcile. - namespacedName := types.NamespacedName{ - Namespace: ts.SourceKubeNS, - Name: "service-deleted", - } - resp, err := ep.Reconcile(context.Background(), ctrl.Request{ - NamespacedName: namespacedName, - }) - require.NoError(t, err) - require.False(t, resp.Requeue) - - consulConfig := testClient.Cfg - consulConfig.APIClientConfig.Namespace = ts.ExpConsulNS - consulClient, err = api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - - // After reconciliation, Consul should not have any instances of service-deleted. - serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) - require.NoError(t, err) - require.Empty(t, serviceInstances) - proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) - require.NoError(t, err) - require.Empty(t, proxyServiceInstances) - - if tt.enableACLs { - _, _, err = consulClient.ACL().TokenRead(token.AccessorID, nil) - require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") - } - }) - } - } -} - -// Tests deleting an Endpoints object, with and without matching Consul and K8s service names when Consul namespaces are enabled. -// This test covers Controller.deregisterService when the map is nil (not selectively deregistered). -func TestReconcileDeleteGatewayWithNamespaces(t *testing.T) { - t.Parallel() - - consulSvcName := "service-deleted" - cases := map[string]struct { - ConsulNS string - }{ - "default Consul namespace": { - ConsulNS: "default", - }, - "other Consul namespace": { - ConsulNS: "other", - }, - } - for name, ts := range cases { - cases := []struct { - name string - initialConsulSvcs []*api.AgentService - enableACLs bool - }{ - { - name: "mesh-gateway", - initialConsulSvcs: []*api.AgentService{ - { - ID: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Service: "mesh-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "mesh-gateway", - metaKeySyntheticNode: "true", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - Namespace: "default", - }, - }, - enableACLs: false, - }, - { - name: "mesh-gateway with ACLs enabled", - initialConsulSvcs: []*api.AgentService{ - { - ID: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Service: "mesh-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "mesh-gateway", - metaKeySyntheticNode: "true", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - Namespace: "default", - }, - }, - enableACLs: true, - }, - { - name: "terminating-gateway", - initialConsulSvcs: []*api.AgentService{ - { - ID: "terminating-gateway", - Kind: api.ServiceKindTerminatingGateway, - Service: "terminating-gateway", - Port: 8443, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "terminating-gateway", - metaKeySyntheticNode: "true", - }, - Namespace: ts.ConsulNS, - }, - }, - enableACLs: false, - }, - { - name: "terminating-gateway with ACLs enabled", - initialConsulSvcs: []*api.AgentService{ - { - ID: "terminating-gateway", - Kind: api.ServiceKindTerminatingGateway, - Service: "terminating-gateway", - Port: 8443, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "terminating-gateway", - metaKeySyntheticNode: "true", - }, - Namespace: ts.ConsulNS, - }, - }, - enableACLs: true, - }, - { - name: "ingress-gateway", - initialConsulSvcs: []*api.AgentService{ - { - ID: "ingress-gateway", - Kind: api.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "gateway", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "ingress-gateway", - metaKeySyntheticNode: "true", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - Namespace: ts.ConsulNS, - }, - }, - enableACLs: false, - }, - { - name: "ingress-gateway with ACLs enabled", - initialConsulSvcs: []*api.AgentService{ - { - ID: "ingress-gateway", - Kind: api.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - constants.MetaKeyPodName: "ingress-gateway", - metaKeySyntheticNode: "true", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - Namespace: ts.ConsulNS, - }, - }, - enableACLs: true, - }, - } - for _, tt := range cases { - t.Run(fmt.Sprintf("%s:%s", name, tt.name), func(t *testing.T) { - // Create fake k8s client. - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(&node).Build() - - // Create test Consul server. - adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs - c.ACL.Tokens.InitialManagement = adminToken - } - }) - consulClient := testClient.APIClient - - _, err := namespaces.EnsureExists(consulClient, ts.ConsulNS, "") - require.NoError(t, err) - - // Register service and proxy in consul. - var token *api.ACLToken - for _, svc := range tt.initialConsulSvcs { - serviceRegistration := &api.CatalogRegistration{ - Node: consulNodeName, - Address: consulNodeAddress, - Service: svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) - - // Create a token for it if ACLs are enabled. - if tt.enableACLs { - var writeOpts api.WriteOptions - if svc.Kind == api.ServiceKindMeshGateway { - writeOpts.Namespace = "default" // Mesh Gateways must always be registered in the "default" namespace. - } else { - writeOpts.Namespace = ts.ConsulNS - } - - test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Service, svc.Meta[constants.MetaKeyKubeNS], writeOpts.Namespace, false, "") - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - tokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[constants.MetaKeyKubeNS], svc.Meta[constants.MetaKeyPodName]), - "component": svc.ID, - }, - }, &writeOpts) - - require.NoError(t, err) - } - } - - // Create the endpoints controller. - ep := &Controller{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - EnableConsulNamespaces: true, - } - if tt.enableACLs { - ep.AuthMethod = test.AuthMethod - } - - // Set up the Endpoint that will be reconciled, and reconcile. - namespacedName := types.NamespacedName{ - Namespace: "default", - Name: "service-deleted", - } - resp, err := ep.Reconcile(context.Background(), ctrl.Request{ - NamespacedName: namespacedName, - }) - require.NoError(t, err) - require.False(t, resp.Requeue) - - // After reconciliation, Consul should not have any instances of service-deleted. - defaultNS, _, err := consulClient.Catalog().Service(consulSvcName, "", &api.QueryOptions{Namespace: "default"}) - require.NoError(t, err) - testNS, _, err := consulClient.Catalog().Service(consulSvcName, "", &api.QueryOptions{Namespace: ts.ConsulNS}) - require.NoError(t, err) - require.Empty(t, append(defaultNS, testNS...)) - - if tt.enableACLs { - _, _, err = consulClient.ACL().TokenRead(token.AccessorID, nil) - require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") - } - }) - } - } -} - -func createPodWithNamespace(name, namespace, ip string, inject bool, managedByEndpointsController bool) *corev1.Pod { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{}, - Annotations: map[string]string{ - constants.AnnotationConsulK8sVersion: "1.0.0", - }, - }, - Status: corev1.PodStatus{ - PodIP: ip, - HostIP: consulNodeAddress, - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, - Spec: corev1.PodSpec{ - NodeName: nodeName, - }, - } - if inject { - pod.Labels[constants.KeyInjectStatus] = constants.Injected - pod.Annotations[constants.KeyInjectStatus] = constants.Injected - } - if managedByEndpointsController { - pod.Labels[constants.KeyManagedBy] = constants.ManagedByValue - } - return pod - -} - -func createGatewayWithNamespace(name, namespace, ip string, annotations map[string]string) *corev1.Pod { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - constants.KeyManagedBy: constants.ManagedByValue, - }, - Annotations: annotations, - }, - Status: corev1.PodStatus{ - PodIP: ip, - HostIP: consulNodeAddress, - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, - Spec: corev1.PodSpec{ - NodeName: nodeName, - }, - } - return pod -} diff --git a/control-plane/connect-inject/controllers/endpoints/endpoints_controller.go b/control-plane/connect-inject/endpoints_controller.go similarity index 52% rename from control-plane/connect-inject/controllers/endpoints/endpoints_controller.go rename to control-plane/connect-inject/endpoints_controller.go index 800df9cc24..f9759b4fe7 100644 --- a/control-plane/connect-inject/controllers/endpoints/endpoints_controller.go +++ b/control-plane/connect-inject/endpoints_controller.go @@ -1,4 +1,4 @@ -package endpoints +package connectinject import ( "context" @@ -8,12 +8,10 @@ import ( "regexp" "strconv" "strings" + "time" mapset "github.com/deckarep/golang-set" "github.com/go-logr/logr" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/helper/parsetags" "github.com/hashicorp/consul-k8s/control-plane/namespaces" @@ -22,52 +20,63 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" ) const ( - metaKeyKubeServiceName = "k8s-service-name" - - metaKeyManagedBy = "managed-by" - metaKeySyntheticNode = "synthetic-node" - metaKeyConsulWANFederation = "consul-wan-federation" - tokenMetaPodNameKey = "pod" - - // Gateway types for registration. - meshGateway = "mesh-gateway" - terminatingGateway = "terminating-gateway" - ingressGateway = "ingress-gateway" - + MetaKeyPodName = "pod-name" + MetaKeyKubeServiceName = "k8s-service-name" + MetaKeyKubeNS = "k8s-namespace" + MetaKeyManagedBy = "managed-by" + TokenMetaPodNameKey = "pod" kubernetesSuccessReasonMsg = "Kubernetes health checks passing" envoyPrometheusBindAddr = "envoy_prometheus_bind_addr" - defaultNS = "default" + envoySidecarContainer = "envoy-sidecar" // clusterIPTaggedAddressName is the key for the tagged address to store the service's cluster IP and service port // in Consul. Note: This value should not be changed without a corresponding change in Consul. clusterIPTaggedAddressName = "virtual" - // consulNodeAddress is the address of the consul node (defined by ConsulNodeName). - // This address does not need to be routable as this node is ephemeral, and we're only providing it because - // Consul's API currently requires node address to be provided when registering a node. - consulNodeAddress = "127.0.0.1" + // exposedPathsLivenessPortsRangeStart is the start of the port range that we will use as + // the ListenerPort for the Expose configuration of the proxy registration for a liveness probe. + exposedPathsLivenessPortsRangeStart = 20300 + + // exposedPathsReadinessPortsRangeStart is the start of the port range that we will use as + // the ListenerPort for the Expose configuration of the proxy registration for a readiness probe. + exposedPathsReadinessPortsRangeStart = 20400 - // consulKubernetesCheckType is the type of health check in Consul for Kubernetes readiness status. - consulKubernetesCheckType = "kubernetes-readiness" + // exposedPathsStartupPortsRangeStart is the start of the port range that we will use as + // the ListenerPort for the Expose configuration of the proxy registration for a startup probe. + exposedPathsStartupPortsRangeStart = 20500 - // consulKubernetesCheckName is the name of health check in Consul for Kubernetes readiness status. - consulKubernetesCheckName = "Kubernetes Readiness Check" + // proxyDefaultInboundPort is the default inbound port for the proxy. + proxyDefaultInboundPort = 20000 + + // proxyDefaultHealthPort is the default health check port for the proxy. + proxyDefaultHealthPort = 21000 ) -type Controller struct { +type EndpointsController struct { client.Client - // ConsulClientConfig is the config for the Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager + // ConsulClient points at the agent local to the connect-inject deployment pod. + ConsulClient *api.Client + // ConsulClientCfg is the client config used by the ConsulClient when calling NewClient(). + ConsulClientCfg *api.Config + // ConsulScheme is the scheme to use when making API calls to Consul, + // i.e. "http" or "https". + ConsulScheme string + // ConsulPort is the port to make HTTP API calls to Consul agents on. + ConsulPort string // Only endpoints in the AllowK8sNamespacesSet are reconciled. AllowK8sNamespacesSet mapset.Set // Endpoints in the DenyK8sNamespacesSet are ignored. @@ -78,9 +87,6 @@ type Controller struct { // EnableConsulNamespaces indicates that a user is running Consul Enterprise // with version 1.7+ which supports namespaces. EnableConsulNamespaces bool - // EnableWANFederation indicates that a user is running Consul with - // WAN Federation enabled. - EnableWANFederation bool // ConsulDestinationNamespace is the name of the Consul namespace to create // all config entries in. If EnableNSMirroring is true this is ignored. ConsulDestinationNamespace string @@ -112,25 +118,20 @@ type Controller struct { // will delete any tokens associated with this auth method // whenever service instances are deregistered. AuthMethod string + // ConsulAPITimeout is the duration that the consul API client will + // wait for a response from the API before cancelling the request. + ConsulAPITimeout time.Duration - // EnableAutoEncrypt indicates whether we should use auto-encrypt when talking - // to Consul client agents. - EnableAutoEncrypt bool - - MetricsConfig metrics.Config + MetricsConfig MetricsConfig Log logr.Logger Scheme *runtime.Scheme context.Context - - // consulClientHttpPort is only used in tests. - consulClientHttpPort int - NodeMeta map[string]string } // Reconcile reads the state of an Endpoints object for a Kubernetes Service and reconciles Consul services which // correspond to the Kubernetes Service. These events are driven by changes to the Pods backing the Kube service. -func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *EndpointsController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { var errs error var serviceEndpoints corev1.Endpoints @@ -139,19 +140,7 @@ func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, nil } - // Create Consul client for this reconcile. - serverState, err := r.ConsulServerConnMgr.State() - if err != nil { - r.Log.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - apiClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) - if err != nil { - r.Log.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - - err = r.Client.Get(ctx, req.NamespacedName, &serviceEndpoints) + err := r.Client.Get(ctx, req.NamespacedName, &serviceEndpoints) // endpointPods holds a set of all pods this endpoints object is currently pointing to. // We use this later when we reconcile ACL tokens to decide whether an ACL token in Consul @@ -161,9 +150,9 @@ func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // If the endpoints object has been deleted (and we get an IsNotFound // error), we need to deregister all instances in Consul for that service. if k8serrors.IsNotFound(err) { - // Deregister all instances in Consul for this service. The function deregisterService handles + // Deregister all instances in Consul for this service. The function deregisterServiceOnAllAgents handles // the case where the Consul service name is different from the Kubernetes service name. - err = r.deregisterService(apiClient, req.Name, req.Namespace, nil) + err = r.deregisterServiceOnAllAgents(ctx, req.Name, req.Namespace, nil) return ctrl.Result{}, err } else if err != nil { r.Log.Error(err, "failed to get Endpoints", "name", req.Name, "ns", req.Namespace) @@ -177,7 +166,7 @@ func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu if isLabeledIgnore(serviceEndpoints.Labels) { // We always deregister the service to handle the case where a user has registered the service, then added the label later. r.Log.Info("Ignoring endpoint labeled with `consul.hashicorp.com/service-ignore: \"true\"`", "name", req.Name, "namespace", req.Namespace) - err = r.deregisterService(apiClient, req.Name, req.Namespace, nil) + err = r.deregisterServiceOnAllAgents(ctx, req.Name, req.Namespace, nil) return ctrl.Result{}, err } @@ -191,14 +180,14 @@ func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu if address.TargetRef != nil && address.TargetRef.Kind == "Pod" { var pod corev1.Pod objectKey := types.NamespacedName{Name: address.TargetRef.Name, Namespace: address.TargetRef.Namespace} - if err = r.Client.Get(ctx, objectKey, &pod); err != nil { + if err := r.Client.Get(ctx, objectKey, &pod); err != nil { r.Log.Error(err, "failed to get pod", "name", address.TargetRef.Name) errs = multierror.Append(errs, err) continue } - svcName, ok := pod.Annotations[constants.AnnotationKubernetesService] - if ok && serviceEndpoints.Name != svcName { + serviceName, ok := pod.Annotations[annotationKubernetesService] + if ok && serviceEndpoints.Name != serviceName { r.Log.Info("ignoring endpoint because it doesn't match explicit service annotation", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) // deregistration for service instances that don't match the annotation happens // later because we don't add this pod to the endpointAddressMap. @@ -207,35 +196,8 @@ func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu if hasBeenInjected(pod) { endpointPods.Add(address.TargetRef.Name) - if isConsulDataplaneSupported(pod) { - if err = r.registerServicesAndHealthCheck(apiClient, pod, serviceEndpoints, healthStatus, endpointAddressMap); err != nil { - r.Log.Error(err, "failed to register services or health check", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) - errs = multierror.Append(errs, err) - } - } else { - r.Log.Info("detected an update to pre-consul-dataplane service", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) - nodeAgentClientCfg, err := r.consulClientCfgForNodeAgent(apiClient, pod, serverState) - if err != nil { - r.Log.Error(err, "failed to create node-local Consul API client", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) - errs = multierror.Append(errs, err) - continue - } - r.Log.Info("updating health check on the Consul client", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) - if err = r.updateHealthCheckOnConsulClient(nodeAgentClientCfg, pod, serviceEndpoints, healthStatus); err != nil { - r.Log.Error(err, "failed to update health check on Consul client", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace, "consul-client-ip", pod.Status.HostIP) - errs = multierror.Append(errs, err) - } - // We want to skip the rest of the reconciliation because we only care about updating health checks for existing services - // in the case when Consul clients are running in the cluster. If endpoints are deleted, consul clients - // will detect that they are unhealthy, and we don't need to worry about keeping them up-to-date. - // This is so that health checks are still updated during an upgrade to consul-dataplane. - continue - } - } - if isGateway(pod) { - endpointPods.Add(address.TargetRef.Name) - if err = r.registerGateway(apiClient, pod, serviceEndpoints, healthStatus, endpointAddressMap); err != nil { - r.Log.Error(err, "failed to register gateway or health check", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) + if err := r.registerServicesAndHealthCheck(pod, serviceEndpoints, healthStatus, endpointAddressMap); err != nil { + r.Log.Error(err, "failed to register services or health check", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) errs = multierror.Append(errs, err) } } @@ -246,143 +208,219 @@ func (r *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // Compare service instances in Consul with addresses in Endpoints. If an address is not in Endpoints, deregister // from Consul. This uses endpointAddressMap which is populated with the addresses in the Endpoints object during // the registration codepath. - if err = r.deregisterService(apiClient, serviceEndpoints.Name, serviceEndpoints.Namespace, endpointAddressMap); err != nil { - r.Log.Error(err, "failed to deregister endpoints", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) + if err = r.deregisterServiceOnAllAgents(ctx, serviceEndpoints.Name, serviceEndpoints.Namespace, endpointAddressMap); err != nil { + r.Log.Error(err, "failed to deregister endpoints on all agents", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) errs = multierror.Append(errs, err) } return ctrl.Result{}, errs } -func (r *Controller) Logger(name types.NamespacedName) logr.Logger { +func (r *EndpointsController) Logger(name types.NamespacedName) logr.Logger { return r.Log.WithValues("request", name) } -func (r *Controller) SetupWithManager(mgr ctrl.Manager) error { +func (r *EndpointsController) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&corev1.Endpoints{}). - Complete(r) + Watches( + &source.Kind{Type: &corev1.Pod{}}, + handler.EnqueueRequestsFromMapFunc(r.requestsForRunningAgentPods), + builder.WithPredicates(predicate.NewPredicateFuncs(r.filterAgentPods)), + ).Complete(r) } // registerServicesAndHealthCheck creates Consul registrations for the service and proxy and registers them with Consul. // It also upserts a Kubernetes health check for the service based on whether the endpoint address is ready. -func (r *Controller) registerServicesAndHealthCheck(apiClient *api.Client, pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string, endpointAddressMap map[string]bool) error { - // Build the endpointAddressMap up for deregistering service instances later. - endpointAddressMap[pod.Status.PodIP] = true - - var managedByEndpointsController bool - if raw, ok := pod.Labels[constants.KeyManagedBy]; ok && raw == constants.ManagedByValue { - managedByEndpointsController = true - } - // For pods managed by this controller, create and register the service instance. - if managedByEndpointsController { - // Get information from the pod to create service instance registrations. - serviceRegistration, proxyServiceRegistration, err := r.createServiceRegistrations(pod, serviceEndpoints, healthStatus) +func (r *EndpointsController) registerServicesAndHealthCheck(pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string, endpointAddressMap map[string]bool) error { + podHostIP := pod.Status.HostIP + + if hasBeenInjected(pod) { + // Build the endpointAddressMap up for deregistering service instances later. + endpointAddressMap[pod.Status.PodIP] = true + // Create client for Consul agent local to the pod. + client, err := r.remoteConsulClient(podHostIP, r.consulNamespace(pod.Namespace)) if err != nil { - r.Log.Error(err, "failed to create service registrations for endpoints", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) + r.Log.Error(err, "failed to create a new Consul client", "address", podHostIP) return err } - // Register the service instance with Consul. - r.Log.Info("registering service with Consul", "name", serviceRegistration.Service.Service, - "id", serviceRegistration.ID) - _, err = apiClient.Catalog().Register(serviceRegistration, nil) - if err != nil { - r.Log.Error(err, "failed to register service", "name", serviceRegistration.Service.Service) - return err + var managedByEndpointsController bool + if raw, ok := pod.Labels[keyManagedBy]; ok && raw == managedByValue { + managedByEndpointsController = true + } + // For pods managed by this controller, create and register the service instance. + if managedByEndpointsController { + // Get information from the pod to create service instance registrations. + serviceRegistration, proxyServiceRegistration, err := r.createServiceRegistrations(pod, serviceEndpoints) + if err != nil { + r.Log.Error(err, "failed to create service registrations for endpoints", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) + return err + } + + // Register the service instance with the local agent. + // Note: the order of how we register services is important, + // and the connect-proxy service should come after the "main" service + // because its alias health check depends on the main service existing. + r.Log.Info("registering service with Consul", "name", serviceRegistration.Name, + "id", serviceRegistration.ID, "agentIP", podHostIP) + err = client.Agent().ServiceRegister(serviceRegistration) + if err != nil { + r.Log.Error(err, "failed to register service", "name", serviceRegistration.Name) + return err + } + + // Register the proxy service instance with the local agent. + r.Log.Info("registering proxy service with Consul", "name", proxyServiceRegistration.Name) + err = client.Agent().ServiceRegister(proxyServiceRegistration) + if err != nil { + r.Log.Error(err, "failed to register proxy service", "name", proxyServiceRegistration.Name) + return err + } } - // Register the proxy service instance with Consul. - r.Log.Info("registering proxy service with Consul", "name", proxyServiceRegistration.Service.Service) - _, err = apiClient.Catalog().Register(proxyServiceRegistration, nil) + // Update the service TTL health check for both legacy services and services managed by endpoints + // controller. The proxy health checks are registered separately by endpoints controller and + // lifecycle sidecar for legacy services. Here, we always update the health check for legacy and + // newer services idempotently since the service health check is not added as part of the service + // registration. + reason := getHealthCheckStatusReason(healthStatus, pod.Name, pod.Namespace) + serviceName := getServiceName(pod, serviceEndpoints) + r.Log.Info("updating health check status for service", "name", serviceName, "reason", reason, "status", healthStatus) + serviceID := getServiceID(pod, serviceEndpoints) + healthCheckID := getConsulHealthCheckID(pod, serviceID) + err = r.upsertHealthCheck(pod, client, serviceID, healthCheckID, healthStatus) if err != nil { - r.Log.Error(err, "failed to register proxy service", "name", proxyServiceRegistration.Service.Service) + r.Log.Error(err, "failed to update health check status for service", "name", serviceName) return err } } return nil } -// registerGateway creates Consul registrations for the Connect Gateways and registers them with Consul. -// It also upserts a Kubernetes health check for the service based on whether the endpoint address is ready. -func (r *Controller) registerGateway(apiClient *api.Client, pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string, endpointAddressMap map[string]bool) error { - // Build the endpointAddressMap up for deregistering service instances later. - endpointAddressMap[pod.Status.PodIP] = true - - var managedByEndpointsController bool - if raw, ok := pod.Labels[constants.KeyManagedBy]; ok && raw == constants.ManagedByValue { - managedByEndpointsController = true - } - // For pods managed by this controller, create and register the service instance. - if managedByEndpointsController { - // Get information from the pod to create service instance registrations. - serviceRegistration, err := r.createGatewayRegistrations(pod, serviceEndpoints, healthStatus) +// getServiceCheck will return the health check for this pod and service if it exists. +func getServiceCheck(client *api.Client, healthCheckID string) (*api.AgentCheck, error) { + filter := fmt.Sprintf("CheckID == `%s`", healthCheckID) + checks, err := client.Agent().ChecksWithFilter(filter) + if err != nil { + return nil, err + } + // This will be nil (does not exist) or an actual check. + return checks[healthCheckID], nil +} + +// registerConsulHealthCheck registers a TTL health check for the service on this Agent local to the Pod. This will add +// the Pod's readiness status, which will mark the service instance healthy/unhealthy for Consul service mesh +// traffic. +func registerConsulHealthCheck(client *api.Client, consulHealthCheckID, serviceID, status string) error { + // Create a TTL health check in Consul associated with this service and pod. + // The TTL time is 100000h which should ensure that the check never fails due to timeout + // of the TTL check. + err := client.Agent().CheckRegister(&api.AgentCheckRegistration{ + ID: consulHealthCheckID, + Name: "Kubernetes Health Check", + ServiceID: serviceID, + AgentServiceCheck: api.AgentServiceCheck{ + TTL: "100000h", + Status: status, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, + }, + }) + if err != nil { + // Full error looks like: + // Unexpected response code: 500 (ServiceID "consulnamespace/svc-id" does not exist) + if strings.Contains(err.Error(), fmt.Sprintf("%s\" does not exist", serviceID)) { + return fmt.Errorf("service %q not found in Consul: unable to register health check", serviceID) + } + return fmt.Errorf("registering health check for service %q: %w", serviceID, err) + } + + return nil +} + +// updateConsulHealthCheckStatus updates the consul health check status. +func (r *EndpointsController) updateConsulHealthCheckStatus(client *api.Client, consulHealthCheckID, status, reason string) error { + r.Log.Info("updating health check", "id", consulHealthCheckID) + err := client.Agent().UpdateTTL(consulHealthCheckID, reason, status) + if err != nil { + return fmt.Errorf("error updating health check: %w", err) + } + return nil +} + +// upsertHealthCheck checks if the healthcheck exists for the service, and creates it if it doesn't exist, or updates it +// if it does. +func (r *EndpointsController) upsertHealthCheck(pod corev1.Pod, client *api.Client, serviceID, healthCheckID, status string) error { + reason := getHealthCheckStatusReason(status, pod.Name, pod.Namespace) + // Retrieve the health check that would exist if the service had one registered for this pod. + serviceCheck, err := getServiceCheck(client, healthCheckID) + if err != nil { + return fmt.Errorf("unable to get agent health checks: serviceID=%s, checkID=%s, %s", serviceID, healthCheckID, err) + } + if serviceCheck == nil { + // Create a new health check. + err = registerConsulHealthCheck(client, healthCheckID, serviceID, status) if err != nil { - r.Log.Error(err, "failed to create service registrations for endpoints", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace) return err } - if r.EnableConsulNamespaces { - if _, err := namespaces.EnsureExists(apiClient, serviceRegistration.Service.Namespace, r.CrossNSACLPolicy); err != nil { - r.Log.Error(err, "failed to ensure Consul namespace exists", "name", serviceEndpoints.Name, "ns", serviceEndpoints.Namespace, "consul ns", serviceRegistration.Service.Namespace) - return err - } + // Also update it, the reason this is separate is there is no way to set the Output field of the health check + // at creation time, and this is what is displayed on the UI as opposed to the Notes field. + err = r.updateConsulHealthCheckStatus(client, healthCheckID, status, reason) + if err != nil { + return err } - - // Register the service instance with Consul. - r.Log.Info("registering gateway with Consul", "name", serviceRegistration.Service.Service, - "id", serviceRegistration.ID) - _, err = apiClient.Catalog().Register(serviceRegistration, nil) + } else if serviceCheck.Status != status { + err = r.updateConsulHealthCheckStatus(client, healthCheckID, status, reason) if err != nil { - r.Log.Error(err, "failed to register gateway", "name", serviceRegistration.Service.Service) return err } } - return nil } -// serviceName computes the service name to register with Consul from the pod and endpoints object. In a single port +// getServiceName computes the service name to register with Consul from the pod and endpoints object. In a single port // service, it defaults to the endpoints name, but can be overridden by a pod annotation. In a multi port service, the // endpoints name is always used since the pod annotation will have multiple service names listed (one per port). // Changing the Consul service name via annotations is not supported for multi port services. -func serviceName(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { - svcName := serviceEndpoints.Name +func getServiceName(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { + serviceName := serviceEndpoints.Name // If the annotation has a comma, it is a multi port Pod. In that case we always use the name of the endpoint. - if serviceNameFromAnnotation, ok := pod.Annotations[constants.AnnotationService]; ok && serviceNameFromAnnotation != "" && !strings.Contains(serviceNameFromAnnotation, ",") { - svcName = serviceNameFromAnnotation + if serviceNameFromAnnotation, ok := pod.Annotations[annotationService]; ok && serviceNameFromAnnotation != "" && !strings.Contains(serviceNameFromAnnotation, ",") { + serviceName = serviceNameFromAnnotation } - return svcName + return serviceName } -func serviceID(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { - return fmt.Sprintf("%s-%s", pod.Name, serviceName(pod, serviceEndpoints)) +func getServiceID(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { + return fmt.Sprintf("%s-%s", pod.Name, getServiceName(pod, serviceEndpoints)) } -func proxyServiceName(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { - svcName := serviceName(pod, serviceEndpoints) - return fmt.Sprintf("%s-sidecar-proxy", svcName) +func getProxyServiceName(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { + serviceName := getServiceName(pod, serviceEndpoints) + return fmt.Sprintf("%s-sidecar-proxy", serviceName) } -func proxyServiceID(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { - proxySvcName := proxyServiceName(pod, serviceEndpoints) - return fmt.Sprintf("%s-%s", pod.Name, proxySvcName) +func getProxyServiceID(pod corev1.Pod, serviceEndpoints corev1.Endpoints) string { + proxyServiceName := getProxyServiceName(pod, serviceEndpoints) + return fmt.Sprintf("%s-%s", pod.Name, proxyServiceName) } // createServiceRegistrations creates the service and proxy service instance registrations with the information from the // Pod. -func (r *Controller) createServiceRegistrations(pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string) (*api.CatalogRegistration, *api.CatalogRegistration, error) { +func (r *EndpointsController) createServiceRegistrations(pod corev1.Pod, serviceEndpoints corev1.Endpoints) (*api.AgentServiceRegistration, *api.AgentServiceRegistration, error) { // If a port is specified, then we determine the value of that port // and register that port for the host service. // The meshWebhook will always set the port annotation if one is not provided on the pod. var consulServicePort int - if raw, ok := pod.Annotations[constants.AnnotationPort]; ok && raw != "" { + if raw, ok := pod.Annotations[annotationPort]; ok && raw != "" { if multiPort := strings.Split(raw, ","); len(multiPort) > 1 { // Figure out which index of the ports annotation to use by // finding the index of the service names annotation. raw = multiPort[getMultiPortIdx(pod, serviceEndpoints)] } - if port, err := common.PortValue(pod, raw); port > 0 { + if port, err := portValue(pod, raw); port > 0 { if err != nil { return nil, nil, err } @@ -394,77 +432,56 @@ func (r *Controller) createServiceRegistrations(pod corev1.Pod, serviceEndpoints // Otherwise, the Consul service name should equal the Kubernetes Service name. // The service name in Consul defaults to the Endpoints object name, and is overridden by the pod // annotation consul.hashicorp.com/connect-service.. - svcName := serviceName(pod, serviceEndpoints) + serviceName := getServiceName(pod, serviceEndpoints) - svcID := serviceID(pod, serviceEndpoints) + serviceID := getServiceID(pod, serviceEndpoints) meta := map[string]string{ - constants.MetaKeyPodName: pod.Name, - metaKeyKubeServiceName: serviceEndpoints.Name, - constants.MetaKeyKubeNS: serviceEndpoints.Namespace, - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", + MetaKeyPodName: pod.Name, + MetaKeyKubeServiceName: serviceEndpoints.Name, + MetaKeyKubeNS: serviceEndpoints.Namespace, + MetaKeyManagedBy: managedByValue, } for k, v := range pod.Annotations { - if strings.HasPrefix(k, constants.AnnotationMeta) && strings.TrimPrefix(k, constants.AnnotationMeta) != "" { + if strings.HasPrefix(k, annotationMeta) && strings.TrimPrefix(k, annotationMeta) != "" { if v == "$POD_NAME" { - meta[strings.TrimPrefix(k, constants.AnnotationMeta)] = pod.Name + meta[strings.TrimPrefix(k, annotationMeta)] = pod.Name } else { - meta[strings.TrimPrefix(k, constants.AnnotationMeta)] = v + meta[strings.TrimPrefix(k, annotationMeta)] = v } } } tags := consulTags(pod) - consulNS := r.consulNamespace(pod.Namespace) - service := &api.AgentService{ - ID: svcID, - Service: svcName, + service := &api.AgentServiceRegistration{ + ID: serviceID, + Name: serviceName, Port: consulServicePort, Address: pod.Status.PodIP, Meta: meta, - Namespace: consulNS, + Namespace: r.consulNamespace(pod.Namespace), Tags: tags, } - serviceRegistration := &api.CatalogRegistration{ - Node: common.ConsulNodeNameFromK8sNode(pod.Spec.NodeName), - Address: pod.Status.HostIP, - NodeMeta: map[string]string{ - metaKeySyntheticNode: "true", - }, - Service: service, - Check: &api.AgentCheck{ - CheckID: consulHealthCheckID(pod.Namespace, svcID), - Name: consulKubernetesCheckName, - Type: consulKubernetesCheckType, - Status: healthStatus, - ServiceID: svcID, - Output: getHealthCheckStatusReason(healthStatus, pod.Name, pod.Namespace), - Namespace: consulNS, - }, - SkipNodeUpdate: true, - } - r.appendNodeMeta(serviceRegistration) - proxySvcName := proxyServiceName(pod, serviceEndpoints) - proxySvcID := proxyServiceID(pod, serviceEndpoints) + proxyServiceName := getProxyServiceName(pod, serviceEndpoints) + proxyServiceID := getProxyServiceID(pod, serviceEndpoints) proxyConfig := &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: svcName, - DestinationServiceID: svcID, + DestinationServiceName: serviceName, + DestinationServiceID: serviceID, Config: make(map[string]interface{}), } // If metrics are enabled, the proxyConfig should set envoy_prometheus_bind_addr to a listener on 0.0.0.0 on - // the PrometheusScrapePort that points to a metrics backend. The backend for this listener will be determined by + // the prometheusScrapePort that points to a metrics backend. The backend for this listener will be determined by // the envoy bootstrapping command (consul connect envoy) configuration in the init container. If there is a merged // metrics server, the backend would be that server. If we are not running the merged metrics server, the backend // should just be the Envoy metrics endpoint. - enableMetrics, err := r.MetricsConfig.EnableMetrics(pod) + enableMetrics, err := r.MetricsConfig.enableMetrics(pod) if err != nil { return nil, nil, err } if enableMetrics { - prometheusScrapePort, err := r.MetricsConfig.PrometheusScrapePort(pod) + prometheusScrapePort, err := r.MetricsConfig.prometheusScrapePort(pod) if err != nil { return nil, nil, err } @@ -483,20 +500,52 @@ func (r *Controller) createServiceRegistrations(pod corev1.Pod, serviceEndpoints } proxyConfig.Upstreams = upstreams - proxyPort := constants.ProxyDefaultInboundPort + proxyPort := proxyDefaultInboundPort if idx := getMultiPortIdx(pod, serviceEndpoints); idx >= 0 { proxyPort += idx } - proxyService := &api.AgentService{ + var publicListenerCheck api.AgentServiceCheck + if useProxyHealthCheck(pod) { + // When using the proxy's health check, create an HTTP check on the ready endpoint + // that will be configured on the proxy sidecar container. + healthCheckPort := proxyDefaultHealthPort + if idx := getMultiPortIdx(pod, serviceEndpoints); idx >= 0 { + healthCheckPort += idx + } + publicListenerCheck = api.AgentServiceCheck{ + Name: "Proxy Public Listener", + HTTP: fmt.Sprintf("http://%s:%d/ready", pod.Status.PodIP, healthCheckPort), + TLSSkipVerify: true, + Interval: "10s", + DeregisterCriticalServiceAfter: "10m", + } + } else { + // Configure the default application health check. + publicListenerCheck = api.AgentServiceCheck{ + Name: "Proxy Public Listener", + TCP: fmt.Sprintf("%s:%d", pod.Status.PodIP, proxyPort), + Interval: "10s", + DeregisterCriticalServiceAfter: "10m", + } + } + + proxyService := &api.AgentServiceRegistration{ Kind: api.ServiceKindConnectProxy, - ID: proxySvcID, - Service: proxySvcName, + ID: proxyServiceID, + Name: proxyServiceName, Port: proxyPort, Address: pod.Status.PodIP, Meta: meta, - Namespace: consulNS, + Namespace: r.consulNamespace(pod.Namespace), Proxy: proxyConfig, - Tags: tags, + Checks: api.AgentServiceChecks{ + &publicListenerCheck, + { + Name: "Destination Alias", + AliasService: serviceID, + }, + }, + Tags: tags, } // A user can enable/disable tproxy for an entire namespace. @@ -506,7 +555,7 @@ func (r *Controller) createServiceRegistrations(pod corev1.Pod, serviceEndpoints return nil, nil, err } - tproxyEnabled, err := common.TransparentProxyEnabled(ns, pod, r.EnableTransparentProxy) + tproxyEnabled, err := transparentProxyEnabled(ns, pod, r.EnableTransparentProxy) if err != nil { return nil, nil, err } @@ -514,7 +563,7 @@ func (r *Controller) createServiceRegistrations(pod corev1.Pod, serviceEndpoints if tproxyEnabled { var k8sService corev1.Service - err = r.Client.Get(r.Context, types.NamespacedName{Name: serviceEndpoints.Name, Namespace: serviceEndpoints.Namespace}, &k8sService) + err := r.Client.Get(r.Context, types.NamespacedName{Name: serviceEndpoints.Name, Namespace: serviceEndpoints.Namespace}, &k8sService) if err != nil { return nil, nil, err } @@ -562,13 +611,13 @@ func (r *Controller) createServiceRegistrations(pod corev1.Pod, serviceEndpoints } // Expose k8s probes as Envoy listeners if needed. - overwriteProbes, err := common.ShouldOverwriteProbes(pod, r.TProxyOverwriteProbes) + overwriteProbes, err := shouldOverwriteProbes(pod, r.TProxyOverwriteProbes) if err != nil { return nil, nil, err } if overwriteProbes { var originalPod corev1.Pod - err = json.Unmarshal([]byte(pod.Annotations[constants.AnnotationOriginalPod]), &originalPod) + err := json.Unmarshal([]byte(pod.Annotations[annotationOriginalPod]), &originalPod) if err != nil { return nil, nil, err } @@ -614,218 +663,7 @@ func (r *Controller) createServiceRegistrations(pod corev1.Pod, serviceEndpoints } } } - - proxyServiceRegistration := &api.CatalogRegistration{ - Node: common.ConsulNodeNameFromK8sNode(pod.Spec.NodeName), - Address: pod.Status.HostIP, - NodeMeta: map[string]string{ - metaKeySyntheticNode: "true", - }, - Service: proxyService, - Check: &api.AgentCheck{ - CheckID: consulHealthCheckID(pod.Namespace, proxySvcID), - Name: consulKubernetesCheckName, - Type: consulKubernetesCheckType, - Status: healthStatus, - ServiceID: proxySvcID, - Output: getHealthCheckStatusReason(healthStatus, pod.Name, pod.Namespace), - Namespace: consulNS, - }, - SkipNodeUpdate: true, - } - r.appendNodeMeta(proxyServiceRegistration) - - return serviceRegistration, proxyServiceRegistration, nil -} - -// createGatewayRegistrations creates the gateway service registrations with the information from the Pod. -func (r *Controller) createGatewayRegistrations(pod corev1.Pod, serviceEndpoints corev1.Endpoints, healthStatus string) (*api.CatalogRegistration, error) { - meta := map[string]string{ - constants.MetaKeyPodName: pod.Name, - metaKeyKubeServiceName: serviceEndpoints.Name, - constants.MetaKeyKubeNS: serviceEndpoints.Namespace, - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - } - - service := &api.AgentService{ - ID: pod.Name, - Address: pod.Status.PodIP, - Meta: meta, - } - - gatewayServiceName, ok := pod.Annotations[constants.AnnotationGatewayConsulServiceName] - if !ok { - return nil, fmt.Errorf("failed to read annontation %s from pod %s/%s", constants.AnnotationGatewayConsulServiceName, pod.Namespace, pod.Name) - } - service.Service = gatewayServiceName - - var consulNS string - - // Set the service values. - switch pod.Annotations[constants.AnnotationGatewayKind] { - case meshGateway: - service.Kind = api.ServiceKindMeshGateway - if r.EnableConsulNamespaces { - service.Namespace = defaultNS - consulNS = defaultNS - } - - port, err := strconv.Atoi(pod.Annotations[constants.AnnotationMeshGatewayContainerPort]) - if err != nil { - return nil, err - } - service.Port = port - - if r.EnableWANFederation { - meta[metaKeyConsulWANFederation] = "1" - } - - wanAddr, wanPort, err := r.getWanData(pod, serviceEndpoints) - if err != nil { - return nil, err - } - service.TaggedAddresses = map[string]api.ServiceAddress{ - "lan": { - Address: pod.Status.PodIP, - Port: port, - }, - "wan": { - Address: wanAddr, - Port: wanPort, - }, - } - case terminatingGateway: - service.Kind = api.ServiceKindTerminatingGateway - service.Port = 8443 - if ns, ok := pod.Annotations[constants.AnnotationGatewayNamespace]; ok && r.EnableConsulNamespaces { - service.Namespace = ns - consulNS = ns - } - case ingressGateway: - service.Kind = api.ServiceKindIngressGateway - if ns, ok := pod.Annotations[constants.AnnotationGatewayNamespace]; ok && r.EnableConsulNamespaces { - service.Namespace = ns - consulNS = ns - } - - wanAddr, wanPort, err := r.getWanData(pod, serviceEndpoints) - if err != nil { - return nil, err - } - service.Port = 21000 - service.TaggedAddresses = map[string]api.ServiceAddress{ - "lan": { - Address: pod.Status.PodIP, - Port: 21000, - }, - "wan": { - Address: wanAddr, - Port: wanPort, - }, - } - service.Proxy = &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_gateway_no_default_bind": true, - "envoy_gateway_bind_addresses": map[string]interface{}{ - "all-interfaces": map[string]interface{}{ - "address": "0.0.0.0", - }, - }, - }, - } - - default: - return nil, fmt.Errorf("%s must be one of %s, %s, or %s", constants.AnnotationGatewayKind, meshGateway, terminatingGateway, ingressGateway) - } - - if r.MetricsConfig.DefaultEnableMetrics && r.MetricsConfig.EnableGatewayMetrics { - if pod.Annotations[constants.AnnotationGatewayKind] == ingressGateway { - service.Proxy.Config["envoy_prometheus_bind_addr"] = fmt.Sprintf("%s:20200", pod.Status.PodIP) - } else { - service.Proxy = &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_prometheus_bind_addr": fmt.Sprintf("%s:20200", pod.Status.PodIP), - }, - } - } - } - - serviceRegistration := &api.CatalogRegistration{ - Node: common.ConsulNodeNameFromK8sNode(pod.Spec.NodeName), - Address: pod.Status.HostIP, - NodeMeta: map[string]string{ - metaKeySyntheticNode: "true", - }, - Service: service, - Check: &api.AgentCheck{ - CheckID: consulHealthCheckID(pod.Namespace, pod.Name), - Name: consulKubernetesCheckName, - Type: consulKubernetesCheckType, - Status: healthStatus, - ServiceID: pod.Name, - Namespace: consulNS, - Output: getHealthCheckStatusReason(healthStatus, pod.Name, pod.Namespace), - }, - SkipNodeUpdate: true, - } - r.appendNodeMeta(serviceRegistration) - - return serviceRegistration, nil -} - -func (r *Controller) getWanData(pod corev1.Pod, endpoints corev1.Endpoints) (string, int, error) { - var wanAddr string - source, ok := pod.Annotations[constants.AnnotationGatewayWANSource] - if !ok { - return "", 0, fmt.Errorf("failed to read annotation %s", constants.AnnotationGatewayWANSource) - } - switch source { - case "NodeName": - wanAddr = pod.Spec.NodeName - case "NodeIP": - wanAddr = pod.Status.HostIP - case "Static": - wanAddr = pod.Annotations[constants.AnnotationGatewayWANAddress] - case "Service": - svc, err := r.getService(endpoints) - if err != nil { - return "", 0, fmt.Errorf("failed to read service %s in namespace %s", endpoints.Name, endpoints.Namespace) - } - switch svc.Spec.Type { - case corev1.ServiceTypeNodePort: - wanAddr = pod.Status.HostIP - case corev1.ServiceTypeClusterIP: - wanAddr = svc.Spec.ClusterIP - case corev1.ServiceTypeLoadBalancer: - if len(svc.Status.LoadBalancer.Ingress) == 0 { - return "", 0, fmt.Errorf("failed to read ingress config for loadbalancer for service %s in namespace %s", endpoints.Name, endpoints.Namespace) - } - for _, ingr := range svc.Status.LoadBalancer.Ingress { - if ingr.IP != "" { - wanAddr = ingr.IP - break - } else if ingr.Hostname != "" { - wanAddr = ingr.Hostname - break - } - } - } - } - - wanPort, err := strconv.Atoi(pod.Annotations[constants.AnnotationGatewayWANPort]) - if err != nil { - return "", 0, fmt.Errorf("failed to parse WAN port from value %s", pod.Annotations[constants.AnnotationGatewayWANPort]) - } - return wanAddr, wanPort, nil -} - -func (r *Controller) getService(endpoints corev1.Endpoints) (*corev1.Service, error) { - var svc corev1.Service - if err := r.Client.Get(r.Context, types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, &svc); err != nil { - return nil, err - } - return &svc, nil + return service, proxyService, nil } // portValueFromIntOrString returns the integer port value from the port that can be @@ -837,16 +675,17 @@ func portValueFromIntOrString(pod corev1.Pod, port intstr.IntOrString) (int, err } // Otherwise, find named port or try to parse the string as an int. - portVal, err := common.PortValue(pod, port.StrVal) + portVal, err := portValue(pod, port.StrVal) if err != nil { return 0, err } return int(portVal), nil } -// consulHealthCheckID deterministically generates a health check ID based on service ID and Kubernetes namespace. -func consulHealthCheckID(k8sNS string, serviceID string) string { - return fmt.Sprintf("%s/%s", k8sNS, serviceID) +// getConsulHealthCheckID deterministically generates a health check ID that will be unique to the Agent +// where the health check is registered and deregistered. +func getConsulHealthCheckID(pod corev1.Pod, serviceID string) string { + return fmt.Sprintf("%s/%s/kubernetes-health-check", pod.Namespace, serviceID) } // getHealthCheckStatusReason takes an Consul's health check status (either passing or critical) @@ -859,63 +698,88 @@ func getHealthCheckStatusReason(healthCheckStatus, podName, podNamespace string) return fmt.Sprintf("Pod \"%s/%s\" is not ready", podNamespace, podName) } -// deregisterService queries all services on the node for service instances that have the metadata +// deregisterServiceOnAllAgents queries all agents for service instances that have the metadata // "k8s-service-name"=k8sSvcName and "k8s-namespace"=k8sSvcNamespace. The k8s service name may or may not match the // consul service name, but the k8s service name will always match the metadata on the Consul service -// "k8s-service-name". So, we query Consul services by "k8s-service-name" metadata. -// When querying by the k8s service name and namespace, the request will return service instances and +// "k8s-service-name". So, we query Consul services by "k8s-service-name" metadata, which is only exposed on the agent +// API. Therefore, we need to query all agents who have services matching that metadata, and deregister each service +// instance. When querying by the k8s service name and namespace, the request will return service instances and // associated proxy service instances. // The argument endpointsAddressesMap decides whether to deregister *all* service instances or selectively deregister // them only if they are not in endpointsAddressesMap. If the map is nil, it will deregister all instances. If the map // has addresses, it will only deregister instances not in the map. -func (r *Controller) deregisterService(apiClient *api.Client, k8sSvcName, k8sSvcNamespace string, endpointsAddressesMap map[string]bool) error { - // Get services matching metadata. - nodesWithSvcs, err := r.serviceInstancesForK8sNodes(apiClient, k8sSvcName, k8sSvcNamespace) - if err != nil { - r.Log.Error(err, "failed to get service instances", "name", k8sSvcName) +func (r *EndpointsController) deregisterServiceOnAllAgents(ctx context.Context, k8sSvcName, k8sSvcNamespace string, endpointsAddressesMap map[string]bool) error { + // Get all agents by getting pods with label component=client, app=consul and release= + agents := corev1.PodList{} + listOptions := client.ListOptions{ + Namespace: r.ReleaseNamespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + "component": "client", + "app": "consul", + "release": r.ReleaseName, + }), + } + if err := r.Client.List(ctx, &agents, &listOptions); err != nil { + r.Log.Error(err, "failed to get Consul client agent pods") return err } - // Deregister each service instance that matches the metadata. - for _, nodeSvcs := range nodesWithSvcs { - for _, svc := range nodeSvcs.Services { - // We need to get services matching "k8s-service-name" and "k8s-namespace" metadata. + // On each agent, we need to get services matching "k8s-service-name" and "k8s-namespace" metadata. + for _, agent := range agents.Items { + ready := false + for _, status := range agent.Status.Conditions { + if status.Type == corev1.PodReady { + ready = status.Status == corev1.ConditionTrue + } + } + if !ready { + // We can ignore this client agent here because once it switches its status from not-ready to ready, + // we will reconcile all services as part of that event. + r.Log.Info("Consul client agent is not ready, skipping deregistration", "consul-agent", agent.Name, "svc", k8sSvcName) + continue + } + client, err := r.remoteConsulClient(agent.Status.PodIP, r.consulNamespace(k8sSvcNamespace)) + if err != nil { + r.Log.Error(err, "failed to create a new Consul client", "address", agent.Status.PodIP) + return err + } + + // Get services matching metadata. + svcs, err := serviceInstancesForK8SServiceNameAndNamespace(k8sSvcName, k8sSvcNamespace, client) + if err != nil { + r.Log.Error(err, "failed to get service instances", "name", k8sSvcName) + return err + } + + // Deregister each service instance that matches the metadata. + for svcID, serviceRegistration := range svcs { // If we selectively deregister, only deregister if the address is not in the map. Otherwise, deregister // every service instance. var serviceDeregistered bool if endpointsAddressesMap != nil { - if _, ok := endpointsAddressesMap[svc.Address]; !ok { + if _, ok := endpointsAddressesMap[serviceRegistration.Address]; !ok { // If the service address is not in the Endpoints addresses, deregister it. - r.Log.Info("deregistering service from consul", "svc", svc.ID) - _, err = apiClient.Catalog().Deregister(&api.CatalogDeregistration{ - Node: nodeSvcs.Node.Node, - ServiceID: svc.ID, - Namespace: svc.Namespace, - }, nil) - if err != nil { - r.Log.Error(err, "failed to deregister service instance", "id", svc.ID) + r.Log.Info("deregistering service from consul", "svc", svcID) + if err = client.Agent().ServiceDeregister(svcID); err != nil { + r.Log.Error(err, "failed to deregister service instance", "id", svcID) return err } serviceDeregistered = true } } else { - r.Log.Info("deregistering service from consul", "svc", svc.ID) - if _, err = apiClient.Catalog().Deregister(&api.CatalogDeregistration{ - Node: nodeSvcs.Node.Node, - ServiceID: svc.ID, - Namespace: svc.Namespace, - }, nil); err != nil { - r.Log.Error(err, "failed to deregister service instance", "id", svc.ID) + r.Log.Info("deregistering service from consul", "svc", svcID) + if err = client.Agent().ServiceDeregister(svcID); err != nil { + r.Log.Error(err, "failed to deregister service instance", "id", svcID) return err } serviceDeregistered = true } if r.AuthMethod != "" && serviceDeregistered { - r.Log.Info("reconciling ACL tokens for service", "svc", svc.Service) - err = r.deleteACLTokensForServiceInstance(apiClient, svc, k8sSvcNamespace, svc.Meta[constants.MetaKeyPodName]) + r.Log.Info("reconciling ACL tokens for service", "svc", serviceRegistration.Service) + err = r.deleteACLTokensForServiceInstance(client, serviceRegistration.Service, k8sSvcNamespace, serviceRegistration.Meta[MetaKeyPodName]) if err != nil { - r.Log.Error(err, "failed to reconcile ACL tokens for service", "svc", svc.Service) + r.Log.Error(err, "failed to reconcile ACL tokens for service", "svc", serviceRegistration.Service) return err } } @@ -928,15 +792,13 @@ func (r *Controller) deregisterService(apiClient *api.Client, k8sSvcName, k8sSvc // deleteACLTokensForServiceInstance finds the ACL tokens that belongs to the service instance and deletes it from Consul. // It will only check for ACL tokens that have been created with the auth method this controller // has been configured with and will only delete tokens for the provided podName. -func (r *Controller) deleteACLTokensForServiceInstance(apiClient *api.Client, svc *api.AgentService, k8sNS, podName string) error { +func (r *EndpointsController) deleteACLTokensForServiceInstance(client *api.Client, serviceName, k8sNS, podName string) error { // Skip if podName is empty. if podName == "" { return nil } - tokens, _, err := apiClient.ACL().TokenList(&api.QueryOptions{ - Namespace: svc.Namespace, - }) + tokens, _, err := client.ACL().TokenList(nil) if err != nil { return fmt.Errorf("failed to get a list of tokens from Consul: %s", err) } @@ -944,32 +806,36 @@ func (r *Controller) deleteACLTokensForServiceInstance(apiClient *api.Client, sv for _, token := range tokens { // Only delete tokens that: // * have been created with the auth method configured for this endpoints controller - // * have a single service identity whose service name is the same as 'svc.Service' + // * have a single service identity whose service name is the same as 'serviceName' if token.AuthMethod == r.AuthMethod && len(token.ServiceIdentities) == 1 && - token.ServiceIdentities[0].ServiceName == svc.Service { + token.ServiceIdentities[0].ServiceName == serviceName { tokenMeta, err := getTokenMetaFromDescription(token.Description) if err != nil { return fmt.Errorf("failed to parse token metadata: %s", err) } - tokenPodName := strings.TrimPrefix(tokenMeta[tokenMetaPodNameKey], k8sNS+"/") + tokenPodName := strings.TrimPrefix(tokenMeta[TokenMetaPodNameKey], k8sNS+"/") // If we can't find token's pod, delete it. if tokenPodName == podName { r.Log.Info("deleting ACL token for pod", "name", podName) - if _, err := apiClient.ACL().TokenDelete(token.AccessorID, &api.WriteOptions{Namespace: svc.Namespace}); err != nil { + _, err = client.ACL().TokenDelete(token.AccessorID, nil) + if err != nil { return fmt.Errorf("failed to delete token from Consul: %s", err) } + } else if err != nil { + return err } } } + return nil } // processUpstreams reads the list of upstreams from the Pod annotation and converts them into a list of api.Upstream // objects. -func (r *Controller) processUpstreams(pod corev1.Pod, endpoints corev1.Endpoints) ([]api.Upstream, error) { +func (r *EndpointsController) processUpstreams(pod corev1.Pod, endpoints corev1.Endpoints) ([]api.Upstream, error) { // In a multiport pod, only the first service's proxy should have upstreams configured. This skips configuring // upstreams on additional services on the pod. mpIdx := getMultiPortIdx(pod, endpoints) @@ -978,7 +844,7 @@ func (r *Controller) processUpstreams(pod corev1.Pod, endpoints corev1.Endpoints } var upstreams []api.Upstream - if raw, ok := pod.Annotations[constants.AnnotationUpstreams]; ok && raw != "" { + if raw, ok := pod.Annotations[annotationUpstreams]; ok && raw != "" { for _, raw := range strings.Split(raw, ",") { var upstream api.Upstream @@ -1042,38 +908,12 @@ func getTokenMetaFromDescription(description string) (map[string]string, error) return tokenMeta, nil } -func (r *Controller) serviceInstancesForK8sNodes(apiClient *api.Client, k8sServiceName, k8sServiceNamespace string) ([]*api.CatalogNodeServiceList, error) { - var serviceList []*api.CatalogNodeServiceList - // Get a list of k8s nodes. - var nodeList corev1.NodeList - err := r.Client.List(r.Context, &nodeList) - if err != nil { - return nil, err - } - for _, node := range nodeList.Items { - var nodeServices *api.CatalogNodeServiceList - nodeServices, err = r.serviceInstancesForK8SServiceNameAndNamespace(apiClient, k8sServiceName, k8sServiceNamespace, common.ConsulNodeNameFromK8sNode(node.Name)) - serviceList = append(serviceList, nodeServices) - } - - return serviceList, err -} - // serviceInstancesForK8SServiceNameAndNamespace calls Consul's ServicesWithFilter to get the list // of services instances that have the provided k8sServiceName and k8sServiceNamespace in their metadata. -func (r *Controller) serviceInstancesForK8SServiceNameAndNamespace(apiClient *api.Client, k8sServiceName, k8sServiceNamespace, nodeName string) (*api.CatalogNodeServiceList, error) { - var ( - serviceList *api.CatalogNodeServiceList - err error - ) - filter := fmt.Sprintf(`Meta[%q] == %q and Meta[%q] == %q and Meta[%q] == %q`, - metaKeyKubeServiceName, k8sServiceName, constants.MetaKeyKubeNS, k8sServiceNamespace, metaKeyManagedBy, constants.ManagedByValue) - if r.EnableConsulNamespaces { - serviceList, _, err = apiClient.Catalog().NodeServiceList(nodeName, &api.QueryOptions{Filter: filter, Namespace: namespaces.WildcardNamespace}) - } else { - serviceList, _, err = apiClient.Catalog().NodeServiceList(nodeName, &api.QueryOptions{Filter: filter}) - } - return serviceList, err +func serviceInstancesForK8SServiceNameAndNamespace(k8sServiceName, k8sServiceNamespace string, client *api.Client) (map[string]*api.AgentService, error) { + return client.Agent().ServicesWithFilter( + fmt.Sprintf(`Meta[%q] == %q and Meta[%q] == %q and Meta[%q] == %q`, + MetaKeyKubeServiceName, k8sServiceName, MetaKeyKubeNS, k8sServiceNamespace, MetaKeyManagedBy, managedByValue)) } // processPreparedQueryUpstream processes an upstream in the format: @@ -1083,7 +923,7 @@ func processPreparedQueryUpstream(pod corev1.Pod, rawUpstream string) api.Upstre var port int32 parts := strings.SplitN(rawUpstream, ":", 3) - port, _ = common.PortValue(pod, strings.TrimSpace(parts[2])) + port, _ = portValue(pod, strings.TrimSpace(parts[2])) preparedQuery = strings.TrimSpace(parts[1]) var upstream api.Upstream if port > 0 { @@ -1098,14 +938,14 @@ func processPreparedQueryUpstream(pod corev1.Pod, rawUpstream string) api.Upstre // processUnlabeledUpstream processes an upstream in the format: // [service-name].[service-namespace].[service-partition]:[port]:[optional datacenter]. -func (r *Controller) processUnlabeledUpstream(pod corev1.Pod, rawUpstream string) (api.Upstream, error) { - var datacenter, svcName, namespace, partition, peer string +func (r *EndpointsController) processUnlabeledUpstream(pod corev1.Pod, rawUpstream string) (api.Upstream, error) { + var datacenter, serviceName, namespace, partition, peer string var port int32 var upstream api.Upstream parts := strings.SplitN(rawUpstream, ":", 3) - port, _ = common.PortValue(pod, strings.TrimSpace(parts[1])) + port, _ = portValue(pod, strings.TrimSpace(parts[1])) // If Consul Namespaces or Admin Partitions are enabled, attempt to parse the // upstream for a namespace. @@ -1119,15 +959,33 @@ func (r *Controller) processUnlabeledUpstream(pod corev1.Pod, rawUpstream string namespace = strings.TrimSpace(pieces[1]) fallthrough default: - svcName = strings.TrimSpace(pieces[0]) + serviceName = strings.TrimSpace(pieces[0]) } } else { - svcName = strings.TrimSpace(parts[0]) + serviceName = strings.TrimSpace(parts[0]) } // parse the optional datacenter if len(parts) > 2 { datacenter = strings.TrimSpace(parts[2]) + + // Check if there's a proxy defaults config with mesh gateway + // mode set to local or remote. This helps users from + // accidentally forgetting to set a mesh gateway mode + // and then being confused as to why their traffic isn't + // routing. + entry, _, err := r.ConsulClient.ConfigEntries().Get(api.ProxyDefaults, api.ProxyConfigGlobal, nil) + if err != nil && strings.Contains(err.Error(), "Unexpected response code: 404") { + return api.Upstream{}, fmt.Errorf("upstream %q is invalid: there is no ProxyDefaults config to set mesh gateway mode", rawUpstream) + } else if err == nil { + mode := entry.(*api.ProxyConfigEntry).MeshGateway.Mode + if mode != api.MeshGatewayModeLocal && mode != api.MeshGatewayModeRemote { + return api.Upstream{}, fmt.Errorf("upstream %q is invalid: ProxyDefaults mesh gateway mode is neither %q nor %q", rawUpstream, api.MeshGatewayModeLocal, api.MeshGatewayModeRemote) + } + } + // NOTE: If we can't reach Consul we don't error out because + // that would fail the pod scheduling and this is a nice-to-have + // check, not something that should block during a Consul hiccup. } if port > 0 { upstream = api.Upstream{ @@ -1135,7 +993,7 @@ func (r *Controller) processUnlabeledUpstream(pod corev1.Pod, rawUpstream string DestinationPartition: partition, DestinationPeer: peer, DestinationNamespace: namespace, - DestinationName: svcName, + DestinationName: serviceName, Datacenter: datacenter, LocalBindPort: int(port), } @@ -1147,14 +1005,14 @@ func (r *Controller) processUnlabeledUpstream(pod corev1.Pod, rawUpstream string // [service-name].svc.[service-namespace].ns.[service-peer].peer:[port] // [service-name].svc.[service-namespace].ns.[service-partition].ap:[port] // [service-name].svc.[service-namespace].ns.[service-datacenter].dc:[port]. -func (r *Controller) processLabeledUpstream(pod corev1.Pod, rawUpstream string) (api.Upstream, error) { - var datacenter, svcName, namespace, partition, peer string +func (r *EndpointsController) processLabeledUpstream(pod corev1.Pod, rawUpstream string) (api.Upstream, error) { + var datacenter, serviceName, namespace, partition, peer string var port int32 var upstream api.Upstream parts := strings.SplitN(rawUpstream, ":", 3) - port, _ = common.PortValue(pod, strings.TrimSpace(parts[1])) + port, _ = portValue(pod, strings.TrimSpace(parts[1])) service := parts[0] @@ -1184,7 +1042,7 @@ func (r *Controller) processLabeledUpstream(pod corev1.Pod, rawUpstream string) fallthrough case 2: if strings.TrimSpace(pieces[1]) == "svc" { - svcName = strings.TrimSpace(pieces[0]) + serviceName = strings.TrimSpace(pieces[0]) } default: return api.Upstream{}, fmt.Errorf("upstream structured incorrectly: %s", rawUpstream) @@ -1203,7 +1061,7 @@ func (r *Controller) processLabeledUpstream(pod corev1.Pod, rawUpstream string) } fallthrough case 2: - svcName = strings.TrimSpace(pieces[0]) + serviceName = strings.TrimSpace(pieces[0]) default: return api.Upstream{}, fmt.Errorf("upstream structured incorrectly: %s", rawUpstream) } @@ -1215,7 +1073,7 @@ func (r *Controller) processLabeledUpstream(pod corev1.Pod, rawUpstream string) DestinationPartition: partition, DestinationPeer: peer, DestinationNamespace: namespace, - DestinationName: svcName, + DestinationName: serviceName, Datacenter: datacenter, LocalBindPort: int(port), } @@ -1223,6 +1081,15 @@ func (r *Controller) processLabeledUpstream(pod corev1.Pod, rawUpstream string) return upstream, nil } +// remoteConsulClient returns an *api.Client that points at the consul agent local to the pod for a provided namespace. +func (r *EndpointsController) remoteConsulClient(ip string, namespace string) (*api.Client, error) { + newAddr := fmt.Sprintf("%s://%s:%s", r.ConsulScheme, ip, r.ConsulPort) + localConfig := r.ConsulClientCfg + localConfig.Address = newAddr + localConfig.Namespace = namespace + return consul.NewClient(localConfig, r.ConsulAPITimeout) +} + // shouldIgnore ignores namespaces where we don't connect-inject. func shouldIgnore(namespace string, denySet, allowSet mapset.Set) bool { // Ignores system namespaces. @@ -1243,32 +1110,106 @@ func shouldIgnore(namespace string, denySet, allowSet mapset.Set) bool { return false } -// consulNamespace returns the Consul destination namespace for a provided Kubernetes namespace -// depending on Consul Namespaces being enabled and the value of namespace mirroring. -func (r *Controller) consulNamespace(namespace string) string { - return namespaces.ConsulNamespace(namespace, r.EnableConsulNamespaces, r.ConsulDestinationNamespace, r.EnableNSMirroring, r.NSMirroringPrefix) +// filterAgentPods receives meta and object information for Kubernetes resources that are being watched, +// which in this case are Pods. It only returns true if the Pod is a Consul Client Agent Pod. It reads the labels +// from the meta of the resource and uses the values of the "app" and "component" label to validate that +// the Pod is a Consul Client Agent. +func (r *EndpointsController) filterAgentPods(object client.Object) bool { + podLabels := object.GetLabels() + app, ok := podLabels["app"] + if !ok { + return false + } + component, ok := podLabels["component"] + if !ok { + return false + } + + release, ok := podLabels["release"] + if !ok { + return false + } + + if app == "consul" && component == "client" && release == r.ReleaseName { + return true + } + return false } -func (r *Controller) appendNodeMeta(registration *api.CatalogRegistration) { - for k, v := range r.NodeMeta { - registration.NodeMeta[k] = v +// requestsForRunningAgentPods creates a slice of requests for the endpoints controller. +// It enqueues a request for each endpoint that needs to be reconciled. It iterates through +// the list of endpoints and creates a request for those endpoints that have an address that +// are on the same node as the new Consul Agent pod. It receives a Pod Object which is a +// Consul Agent that has been filtered by filterAgentPods and only enqueues endpoints +// for client agent pods where the Ready condition is true. +func (r *EndpointsController) requestsForRunningAgentPods(object client.Object) []ctrl.Request { + var consulClientPod corev1.Pod + r.Log.Info("received update for Consul client pod", "name", object.GetName()) + err := r.Client.Get(r.Context, types.NamespacedName{Name: object.GetName(), Namespace: object.GetNamespace()}, &consulClientPod) + if k8serrors.IsNotFound(err) { + // Ignore if consulClientPod is not found. + return []ctrl.Request{} } + if err != nil { + r.Log.Error(err, "failed to get Consul client pod", "name", consulClientPod.Name) + return []ctrl.Request{} + } + // We can ignore the agent pod if it's not running, since + // we can't reconcile and register/deregister services against that agent. + if consulClientPod.Status.Phase != corev1.PodRunning { + r.Log.Info("ignoring Consul client pod because it's not running", "name", consulClientPod.Name) + return []ctrl.Request{} + } + // We can ignore the agent pod if it's not yet ready, since + // we can't reconcile and register/deregister services against that agent. + for _, cond := range consulClientPod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status != corev1.ConditionTrue { + // Ignore if consulClientPod is not ready. + r.Log.Info("ignoring Consul client pod because it's not ready", "name", consulClientPod.Name) + return []ctrl.Request{} + } + } + + // Get the list of all endpoints. + var endpointsList corev1.EndpointsList + err = r.Client.List(r.Context, &endpointsList) + if err != nil { + r.Log.Error(err, "failed to list endpoints") + return []ctrl.Request{} + } + + // Enqueue requests for endpoints that are on the same node + // as the client agent. + var requests []reconcile.Request + for _, ep := range endpointsList.Items { + for _, subset := range ep.Subsets { + allAddresses := subset.Addresses + allAddresses = append(allAddresses, subset.NotReadyAddresses...) + for _, address := range allAddresses { + // Only add requests for the address that is on the same node as the consul client pod. + if address.NodeName != nil && *address.NodeName == consulClientPod.Spec.NodeName { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: ep.Name, Namespace: ep.Namespace}}) + } + } + } + } + return requests +} + +// consulNamespace returns the Consul destination namespace for a provided Kubernetes namespace +// depending on Consul Namespaces being enabled and the value of namespace mirroring. +func (r *EndpointsController) consulNamespace(namespace string) string { + return namespaces.ConsulNamespace(namespace, r.EnableConsulNamespaces, r.ConsulDestinationNamespace, r.EnableNSMirroring, r.NSMirroringPrefix) } // hasBeenInjected checks the value of the status annotation and returns true if the Pod has been injected. func hasBeenInjected(pod corev1.Pod) bool { - if anno, ok := pod.Annotations[constants.KeyInjectStatus]; ok && anno == constants.Injected { + if anno, ok := pod.Annotations[keyInjectStatus]; ok && anno == injected { return true } return false } -// isGateway checks the value of the gateway annotation and returns true if the Pod represents a Gateway. -func isGateway(pod corev1.Pod) bool { - anno, ok := pod.Annotations[constants.AnnotationGatewayKind] - return ok && anno != "" -} - // mapAddresses combines all addresses to a mapping of address to its health status. func mapAddresses(addresses corev1.EndpointSubset) map[corev1.EndpointAddress]string { m := make(map[corev1.EndpointAddress]string) @@ -1286,7 +1227,7 @@ func mapAddresses(addresses corev1.EndpointSubset) map[corev1.EndpointAddress]st // isLabeledIgnore checks the value of the label `consul.hashicorp.com/service-ignore` and returns true if the // label exists and is "truthy". Otherwise, it returns false. func isLabeledIgnore(labels map[string]string) bool { - value, labelExists := labels[constants.LabelServiceIgnore] + value, labelExists := labels[labelServiceIgnore] shouldIgnore, err := strconv.ParseBool(value) return shouldIgnore && labelExists && err == nil @@ -1295,7 +1236,11 @@ func isLabeledIgnore(labels map[string]string) bool { // consulTags returns tags that should be added to the Consul service and proxy registrations. func consulTags(pod corev1.Pod) []string { var tags []string - if raw, ok := pod.Annotations[constants.AnnotationTags]; ok && raw != "" { + if raw, ok := pod.Annotations[annotationTags]; ok && raw != "" { + tags = append(tags, parsetags.ParseTags(raw)...) + } + // Get the tags from the deprecated tags annotation and combine. + if raw, ok := pod.Annotations[annotationConnectTags]; ok && raw != "" { tags = append(tags, parsetags.ParseTags(raw)...) } @@ -1315,8 +1260,8 @@ func consulTags(pod corev1.Pod) []string { } func getMultiPortIdx(pod corev1.Pod, serviceEndpoints corev1.Endpoints) int { - for i, name := range strings.Split(pod.Annotations[constants.AnnotationService], ",") { - if name == serviceName(pod, serviceEndpoints) { + for i, name := range strings.Split(pod.Annotations[annotationService], ",") { + if name == getServiceName(pod, serviceEndpoints) { return i } } diff --git a/control-plane/connect-inject/endpoints_controller_ent_test.go b/control-plane/connect-inject/endpoints_controller_ent_test.go new file mode 100644 index 0000000000..5859bd9206 --- /dev/null +++ b/control-plane/connect-inject/endpoints_controller_ent_test.go @@ -0,0 +1,1645 @@ +//go:build enterprise + +package connectinject + +import ( + "context" + "fmt" + "strings" + "testing" + + mapset "github.com/deckarep/golang-set" + logrtest "github.com/go-logr/logr/testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +// TestReconcileCreateEndpoint tests the logic to create service instances in Consul from the addresses in the Endpoints +// object. The cases test a basic endpoints object with two addresses. This test verifies that the services and their TTL +// health checks are created in the expected Consul namespace for various combinations of namespace flags. +// This test covers EndpointsController.createServiceRegistrations. +func TestReconcileCreateEndpointWithNamespaces(t *testing.T) { + t.Parallel() + nodeName := "test-node" + cases := map[string]struct { + Mirror bool + MirrorPrefix string + SourceKubeNS string + DestConsulNS string + ExpConsulNS string + }{ + "SourceKubeNS=default, DestConsulNS=default": { + SourceKubeNS: "default", + DestConsulNS: "default", + ExpConsulNS: "default", + }, + "SourceKubeNS=kube, DestConsulNS=default": { + SourceKubeNS: "kube", + DestConsulNS: "default", + ExpConsulNS: "default", + }, + "SourceKubeNS=default, DestConsulNS=other": { + SourceKubeNS: "default", + DestConsulNS: "other", + ExpConsulNS: "other", + }, + "SourceKubeNS=kube, DestConsulNS=other": { + SourceKubeNS: "kube", + DestConsulNS: "other", + ExpConsulNS: "other", + }, + "SourceKubeNS=default, Mirror=true": { + SourceKubeNS: "default", + Mirror: true, + ExpConsulNS: "default", + }, + "SourceKubeNS=kube, Mirror=true": { + SourceKubeNS: "kube", + Mirror: true, + ExpConsulNS: "kube", + }, + "SourceKubeNS=default, Mirror=true, Prefix=prefix": { + SourceKubeNS: "default", + Mirror: true, + MirrorPrefix: "prefix-", + ExpConsulNS: "prefix-default", + }, + } + for name, test := range cases { + setup := struct { + consulSvcName string + k8sObjects func() []runtime.Object + initialConsulSvcs []*api.AgentServiceRegistration + expectedNumSvcInstances int + expectedConsulSvcInstances []*api.CatalogService + expectedProxySvcInstances []*api.CatalogService + expectedAgentHealthChecks []*api.AgentCheck + }{ + consulSvcName: "service-created", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", test.SourceKubeNS, "1.2.3.4", true, true) + pod2 := createPodWithNamespace("pod2", test.SourceKubeNS, "2.2.3.4", true, true) + endpointWithTwoAddresses := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-created", + Namespace: test.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: test.SourceKubeNS, + }, + }, + { + IP: "2.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod2", + Namespace: test.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 2, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-created", + ServiceName: "service-created", + ServiceAddress: "1.2.3.4", + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: test.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + ServiceTags: []string{}, + Namespace: test.ExpConsulNS, + }, + { + ServiceID: "pod2-service-created", + ServiceName: "service-created", + ServiceAddress: "2.2.3.4", + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: test.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + ServiceTags: []string{}, + Namespace: test.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-created-sidecar-proxy", + ServiceName: "service-created-sidecar-proxy", + ServiceAddress: "1.2.3.4", + ServicePort: 20000, + ServiceProxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-created", + DestinationServiceID: "pod1-service-created", + }, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: test.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + ServiceTags: []string{}, + Namespace: test.ExpConsulNS, + }, + { + ServiceID: "pod2-service-created-sidecar-proxy", + ServiceName: "service-created-sidecar-proxy", + ServiceAddress: "2.2.3.4", + ServicePort: 20000, + ServiceProxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-created", + DestinationServiceID: "pod2-service-created", + }, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: test.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + ServiceTags: []string{}, + Namespace: test.ExpConsulNS, + }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: fmt.Sprintf("%s/pod1-service-created/kubernetes-health-check", test.SourceKubeNS), + ServiceName: "service-created", + ServiceID: "pod1-service-created", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, + Namespace: test.ExpConsulNS, + }, + { + CheckID: fmt.Sprintf("%s/pod2-service-created/kubernetes-health-check", test.SourceKubeNS), + ServiceName: "service-created", + ServiceID: "pod2-service-created", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, + Namespace: test.ExpConsulNS, + }, + }, + } + t.Run(name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, false) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + + // Add the pods namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: test.SourceKubeNS}} + // Create fake k8s client. + k8sObjects := append(setup.k8sObjects(), fakeClientPod, &ns) + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + // Create test Consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForLeader(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + Namespace: test.ExpConsulNS, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] + + _, err = namespaces.EnsureExists(consulClient, test.ExpConsulNS, "") + require.NoError(t, err) + + // Register service and proxy in Consul. + for _, svc := range setup.initialConsulSvcs { + err = consulClient.Agent().ServiceRegister(svc) + require.NoError(t, err) + } + + // Create the endpoints controller. + ep := &EndpointsController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + ReleaseName: "consul", + ReleaseNamespace: "default", + ConsulClientCfg: cfg, + EnableConsulNamespaces: true, + ConsulDestinationNamespace: test.DestConsulNS, + EnableNSMirroring: test.Mirror, + NSMirroringPrefix: test.MirrorPrefix, + } + namespacedName := types.NamespacedName{ + Namespace: test.SourceKubeNS, + Name: "service-created", + } + + resp, err := ep.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // After reconciliation, Consul should have the service with the correct number of instances. + serviceInstances, _, err := consulClient.Catalog().Service(setup.consulSvcName, "", &api.QueryOptions{Namespace: test.ExpConsulNS}) + require.NoError(t, err) + require.Len(t, serviceInstances, setup.expectedNumSvcInstances) + for i, instance := range serviceInstances { + require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) + require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) + require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) + require.Equal(t, setup.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) + require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) + require.Equal(t, setup.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) + } + proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", setup.consulSvcName), "", &api.QueryOptions{ + Namespace: test.ExpConsulNS, + }) + require.NoError(t, err) + require.Len(t, proxyServiceInstances, setup.expectedNumSvcInstances) + for i, instance := range proxyServiceInstances { + require.Equal(t, setup.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) + require.Equal(t, setup.expectedProxySvcInstances[i].ServiceName, instance.ServiceName) + require.Equal(t, setup.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) + require.Equal(t, setup.expectedProxySvcInstances[i].ServicePort, instance.ServicePort) + require.Equal(t, setup.expectedProxySvcInstances[i].ServiceProxy, instance.ServiceProxy) + require.Equal(t, setup.expectedProxySvcInstances[i].ServiceMeta, instance.ServiceMeta) + require.Equal(t, setup.expectedProxySvcInstances[i].ServiceTags, instance.ServiceTags) + } + + _, checkInfos, err := consulClient.Agent().AgentHealthServiceByName(fmt.Sprintf("%s-sidecar-proxy", setup.consulSvcName)) + expectedChecks := []string{"Proxy Public Listener", "Destination Alias"} + require.NoError(t, err) + require.Len(t, checkInfos, setup.expectedNumSvcInstances) + for _, checkInfo := range checkInfos { + checks := checkInfo.Checks + require.Contains(t, expectedChecks, checks[0].Name) + require.Contains(t, expectedChecks, checks[1].Name) + require.Equal(t, test.ExpConsulNS, checks[0].Namespace) + require.Equal(t, test.ExpConsulNS, checks[1].Namespace) + } + + // Check that the Consul health check was created for the k8s pod. + if setup.expectedAgentHealthChecks != nil { + for i := range setup.expectedConsulSvcInstances { + filter := fmt.Sprintf("CheckID == `%s`", setup.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, 1, len(check)) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[setup.expectedAgentHealthChecks[i].CheckID], setup.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } + } + }) + } +} + +// Tests updating an Endpoints object when Consul namespaces are enabled. +// - Tests updates via the register codepath: +// - When an address in an Endpoint is updated, that the corresponding service instance in Consul is updated in the correct Consul namespace. +// - When an address is added to an Endpoint, an additional service instance in Consul is registered in the correct Consul namespace. +// - Tests updates via the deregister codepath: +// - When an address is removed from an Endpoint, the corresponding service instance in Consul is deregistered. +// - When an address is removed from an Endpoint *and there are no addresses left in the Endpoint*, the +// corresponding service instance in Consul is deregistered. +// For the register and deregister codepath, this also tests that they work when the Consul service name is different +// from the K8s service name. +// This test covers EndpointsController.deregisterServiceOnAllAgents when services should be selectively deregistered +// since the map will not be nil. +func TestReconcileUpdateEndpointWithNamespaces(t *testing.T) { + t.Parallel() + nodeName := "test-node" + cases := map[string]struct { + Mirror bool + MirrorPrefix string + SourceKubeNS string + DestConsulNS string + ExpConsulNS string + }{ + "SourceKubeNS=default, DestConsulNS=default": { + SourceKubeNS: "default", + DestConsulNS: "default", + ExpConsulNS: "default", + }, + "SourceKubeNS=kube, DestConsulNS=default": { + SourceKubeNS: "kube", + DestConsulNS: "default", + ExpConsulNS: "default", + }, + "SourceKubeNS=default, DestConsulNS=other": { + SourceKubeNS: "default", + DestConsulNS: "other", + ExpConsulNS: "other", + }, + "SourceKubeNS=kube, DestConsulNS=other": { + SourceKubeNS: "kube", + DestConsulNS: "other", + ExpConsulNS: "other", + }, + "SourceKubeNS=default, Mirror=true": { + SourceKubeNS: "default", + Mirror: true, + ExpConsulNS: "default", + }, + "SourceKubeNS=kube, Mirror=true": { + SourceKubeNS: "kube", + Mirror: true, + ExpConsulNS: "kube", + }, + "SourceKubeNS=default, Mirror=true, Prefix=prefix": { + SourceKubeNS: "default", + Mirror: true, + MirrorPrefix: "prefix-", + ExpConsulNS: "prefix-default", + }, + } + for name, ts := range cases { + cases := []struct { + name string + consulSvcName string + k8sObjects func() []runtime.Object + initialConsulSvcs []*api.AgentServiceRegistration + expectedConsulSvcInstances []*api.CatalogService + expectedProxySvcInstances []*api.CatalogService + expectedAgentHealthChecks []*api.AgentCheck + enableACLs bool + }{ + { + name: "Legacy service: Health check is added to the correct namespace", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, false) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: ts.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: fmt.Sprintf("%s/pod1-service-updated/kubernetes-health-check", ts.SourceKubeNS), + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + name: "Endpoints has an updated address (pod IP change).", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "4.4.4.4", true, true) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "4.4.4.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: ts.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceAddress: "4.4.4.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "4.4.4.4", + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + name: "Different Consul service name: Endpoints has an updated address (pod IP change).", + consulSvcName: "different-consul-svc-name", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "4.4.4.4", true, true) + pod1.Annotations[annotationService] = "different-consul-svc-name" + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "4.4.4.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: ts.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", + }, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-different-consul-svc-name", + ServiceAddress: "4.4.4.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", + ServiceAddress: "4.4.4.4", + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + name: "Endpoints has additional address not in Consul.", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, true) + pod2 := createPodWithNamespace("pod2", ts.SourceKubeNS, "2.2.3.4", true, true) + endpointWithTwoAddresses := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: ts.SourceKubeNS, + }, + }, + { + IP: "2.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod2", + Namespace: ts.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + { + ServiceID: "pod2-service-updated", + ServiceAddress: "2.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + { + ServiceID: "pod2-service-updated-sidecar-proxy", + ServiceAddress: "2.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + name: "Consul has instances that are not in the Endpoints addresses", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, true) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: ts.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + name: "Different Consul service name: Consul has instances that are not in the Endpoints addresses", + consulSvcName: "different-consul-svc-name", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, true) + pod1.Annotations[annotationService] = "different-consul-svc-name" + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: ts.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + ID: "pod2-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod2-different-consul-svc-name", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-different-consul-svc-name", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + // When a k8s deployment is deleted but it's k8s service continues to exist, the endpoints has no addresses + // and the instances should be deleted from Consul. + name: "Consul has instances that are not in the endpoints, and the endpoints has no addresses.", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + } + return []runtime.Object{endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{}, + expectedProxySvcInstances: []*api.CatalogService{}, + }, + { + // With a different Consul service name, when a k8s deployment is deleted but it's k8s service continues to + // exist, the endpoints has no addresses and the instances should be deleted from Consul. + name: "Different Consul service name: Consul has instances that are not in the endpoints, and the endpoints has no addresses.", + consulSvcName: "different-consul-svc-name", + k8sObjects: func() []runtime.Object { + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + } + return []runtime.Object{endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + ID: "pod2-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod2-different-consul-svc-name", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{}, + expectedProxySvcInstances: []*api.CatalogService{}, + }, + { + name: "ACLs enabled: Endpoints has an updated address because the target pod changes", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod2 := createPodWithNamespace("pod2", ts.SourceKubeNS, "4.4.4.4", true, true) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "4.4.4.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod2", + Namespace: ts.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod2, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyManagedBy: managedByValue, + MetaKeyKubeServiceName: "service-updated", + MetaKeyPodName: "pod1", + MetaKeyKubeNS: ts.SourceKubeNS, + }, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{ + MetaKeyManagedBy: managedByValue, + MetaKeyKubeServiceName: "service-updated", + MetaKeyPodName: "pod1", + MetaKeyKubeNS: ts.SourceKubeNS, + }, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod2-service-updated", + ServiceAddress: "4.4.4.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod2-service-updated-sidecar-proxy", + ServiceAddress: "4.4.4.4", + Namespace: ts.ExpConsulNS, + }, + }, + enableACLs: true, + }, + { + name: "ACLs enabled: Consul has instances that are not in the Endpoints addresses", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod1 := createPodWithNamespace("pod1", ts.SourceKubeNS, "1.2.3.4", true, true) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: ts.SourceKubeNS, + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: ts.SourceKubeNS, + }, + }, + }, + }, + }, + } + return []runtime.Object{pod1, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + Namespace: ts.ExpConsulNS, + }, + { + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, + Namespace: ts.ExpConsulNS, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + Namespace: ts.ExpConsulNS, + }, + }, + enableACLs: true, + }, + } + for _, tt := range cases { + t.Run(fmt.Sprintf("%s: %s", name, tt.name), func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + + // Add the pods namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ts.SourceKubeNS}} + // Create fake k8s client. + k8sObjects := append(tt.k8sObjects(), fakeClientPod, &ns) + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + + adminToken := "123e4567-e89b-12d3-a456-426614174000" + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + if tt.enableACLs { + c.ACL.Enabled = true + c.ACL.Tokens.InitialManagement = adminToken + } + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForSerfCheck(t) + + cfg := &api.Config{ + Scheme: "http", + Address: consul.HTTPAddr, + Namespace: ts.ExpConsulNS, + } + if tt.enableACLs { + cfg.Token = adminToken + } + + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(cfg.Address, ":") + consulPort := addr[1] + + _, err = namespaces.EnsureExists(consulClient, ts.ExpConsulNS, "") + require.NoError(t, err) + + // Holds token accessorID for each service ID. + tokensForServices := make(map[string]string) + + // Register service and proxy in Consul. + for _, svc := range tt.initialConsulSvcs { + err = consulClient.Agent().ServiceRegister(svc) + require.NoError(t, err) + // Create a token for this service if ACLs are enabled. + if tt.enableACLs { + if svc.Kind != api.ServiceKindConnectProxy { + var writeOpts api.WriteOptions + // When mirroring is enabled, the auth method will be created in the "default" Consul namespace. + if ts.Mirror { + writeOpts.Namespace = "default" + } + test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Name, svc.Meta[MetaKeyKubeNS], ts.ExpConsulNS, ts.Mirror, ts.MirrorPrefix) + token, _, err := consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), + }, + }, &writeOpts) + + require.NoError(t, err) + + tokensForServices[svc.ID] = token.AccessorID + + // Create another token for the same service but a pod that either no longer exists + // or the endpoints controller doesn't know about it yet. + // This is to test a scenario with either orphaned tokens + // or tokens for services that haven't yet been registered with Consul. + // In that case, we have a token for the pod but the service instance + // for that pod either no longer exists or is not yet registered in Consul. + // This token should not be deleted. + token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], "does-not-exist"), + }, + }, &writeOpts) + require.NoError(t, err) + tokensForServices["does-not-exist"+svc.Name] = token.AccessorID + } + } + } + + // Create the endpoints controller. + ep := &EndpointsController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: cfg.Scheme, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + ReleaseName: "consul", + ReleaseNamespace: "default", + ConsulClientCfg: cfg, + EnableConsulNamespaces: true, + EnableNSMirroring: ts.Mirror, + NSMirroringPrefix: ts.MirrorPrefix, + ConsulDestinationNamespace: ts.DestConsulNS, + } + if tt.enableACLs { + ep.AuthMethod = test.AuthMethod + } + namespacedName := types.NamespacedName{ + Namespace: ts.SourceKubeNS, + Name: "service-updated", + } + + resp, err := ep.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // After reconciliation, Consul should have service-updated with the correct number of instances. + serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) + require.NoError(t, err) + require.Len(t, serviceInstances, len(tt.expectedProxySvcInstances)) + for i, instance := range serviceInstances { + require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) + } + proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) + require.NoError(t, err) + require.Len(t, proxyServiceInstances, len(tt.expectedProxySvcInstances)) + for i, instance := range proxyServiceInstances { + require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) + } + + // Check that the Consul health check was created for the k8s pod. + if tt.expectedAgentHealthChecks != nil { + for i := range tt.expectedConsulSvcInstances { + filter := fmt.Sprintf("CheckID == `%s`", tt.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, 1, len(check)) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[tt.expectedAgentHealthChecks[i].CheckID], tt.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } + } + + if tt.enableACLs { + // Put expected services into a map to make it easier to find service IDs. + expectedServices := mapset.NewSet() + for _, svc := range tt.expectedConsulSvcInstances { + expectedServices.Add(svc.ServiceID) + } + + initialServices := mapset.NewSet() + for _, svc := range tt.initialConsulSvcs { + initialServices.Add(svc.ID) + } + + // We only care about a case when services are deregistered, where + // the set of initial services is bigger than the set of expected services. + deregisteredServices := initialServices.Difference(expectedServices) + + // Look through the tokens we've created and check that only + // tokens for the deregistered services have been deleted. + for serviceID, tokenID := range tokensForServices { + // Read the token from Consul. + token, _, err := consulClient.ACL().TokenRead(tokenID, nil) + if deregisteredServices.Contains(serviceID) { + require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") + } else { + require.NoError(t, err, "token should exist for service instance: "+serviceID) + require.NotNil(t, token) + } + } + } + }) + } + } +} + +// Tests deleting an Endpoints object, with and without matching Consul and K8s service names when Consul namespaces are enabled. +// This test covers EndpointsController.deregisterServiceOnAllAgents when the map is nil (not selectively deregistered). +func TestReconcileDeleteEndpointWithNamespaces(t *testing.T) { + t.Parallel() + nodeName := "test-node" + cases := map[string]struct { + Mirror bool + MirrorPrefix string + SourceKubeNS string + DestConsulNS string + ExpConsulNS string + }{ + "SourceKubeNS=default, DestConsulNS=default": { + SourceKubeNS: "default", + DestConsulNS: "default", + ExpConsulNS: "default", + }, + "SourceKubeNS=kube, DestConsulNS=default": { + SourceKubeNS: "kube", + DestConsulNS: "default", + ExpConsulNS: "default", + }, + "SourceKubeNS=default, DestConsulNS=other": { + SourceKubeNS: "default", + DestConsulNS: "other", + ExpConsulNS: "other", + }, + "SourceKubeNS=kube, DestConsulNS=other": { + SourceKubeNS: "kube", + DestConsulNS: "other", + ExpConsulNS: "other", + }, + "SourceKubeNS=default, Mirror=true": { + SourceKubeNS: "default", + Mirror: true, + ExpConsulNS: "default", + }, + "SourceKubeNS=kube, Mirror=true": { + SourceKubeNS: "kube", + Mirror: true, + ExpConsulNS: "kube", + }, + "SourceKubeNS=default, Mirror=true, Prefix=prefix": { + SourceKubeNS: "default", + Mirror: true, + MirrorPrefix: "prefix-", + ExpConsulNS: "prefix-default", + }, + } + for name, ts := range cases { + cases := []struct { + name string + consulSvcName string + initialConsulSvcs []*api.AgentServiceRegistration + enableACLs bool + }{ + { + name: "Consul service name matches K8s service name", + consulSvcName: "service-deleted", + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-deleted", + Name: "service-deleted", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-deleted", + DestinationServiceID: "pod1-service-deleted", + }, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + name: "Consul service name does not match K8s service name", + consulSvcName: "different-consul-svc-name", + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", + TransparentProxy: &api.TransparentProxyConfig{}, + }, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": ts.SourceKubeNS, MetaKeyManagedBy: managedByValue}, + Namespace: ts.ExpConsulNS, + }, + }, + }, + { + name: "When ACLs are enabled, the ACL token should be deleted", + consulSvcName: "service-deleted", + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-deleted", + Name: "service-deleted", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + Namespace: ts.ExpConsulNS, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-deleted", + DestinationServiceID: "pod1-service-deleted", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: ts.SourceKubeNS, + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + Namespace: ts.ExpConsulNS, + }, + }, + enableACLs: true, + }, + } + for _, tt := range cases { + t.Run(fmt.Sprintf("%s:%s", name, tt.name), func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + + // Create fake k8s client. + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(fakeClientPod).Build() + + // Create test Consul server. + adminToken := "123e4567-e89b-12d3-a456-426614174000" + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + if tt.enableACLs { + c.ACL.Enabled = true + c.ACL.Tokens.InitialManagement = adminToken + } + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForLeader(t) + cfg := &api.Config{ + Address: consul.HTTPAddr, + Namespace: ts.ExpConsulNS, + } + if tt.enableACLs { + cfg.Token = adminToken + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] + + _, err = namespaces.EnsureExists(consulClient, ts.ExpConsulNS, "") + require.NoError(t, err) + + // Register service and proxy in consul. + var token *api.ACLToken + for _, svc := range tt.initialConsulSvcs { + err = consulClient.Agent().ServiceRegister(svc) + require.NoError(t, err) + // Create a token for it if ACLs are enabled. + if tt.enableACLs { + if svc.Kind != api.ServiceKindConnectProxy { + var writeOpts api.WriteOptions + // When mirroring is enabled, the auth method will be created in the "default" Consul namespace. + if ts.Mirror { + writeOpts.Namespace = "default" + } + test.SetupK8sAuthMethodWithNamespaces(t, consulClient, svc.Name, svc.Meta[MetaKeyKubeNS], ts.ExpConsulNS, ts.Mirror, ts.MirrorPrefix) + token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), + }, + }, &writeOpts) + + require.NoError(t, err) + } + } + } + + // Create the endpoints controller. + ep := &EndpointsController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + ReleaseName: "consul", + ReleaseNamespace: "default", + ConsulClientCfg: cfg, + EnableConsulNamespaces: true, + EnableNSMirroring: ts.Mirror, + NSMirroringPrefix: ts.MirrorPrefix, + ConsulDestinationNamespace: ts.DestConsulNS, + } + if tt.enableACLs { + ep.AuthMethod = test.AuthMethod + } + + // Set up the Endpoint that will be reconciled, and reconcile. + namespacedName := types.NamespacedName{ + Namespace: ts.SourceKubeNS, + Name: "service-deleted", + } + resp, err := ep.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: namespacedName, + }) + require.NoError(t, err) + require.False(t, resp.Requeue) + + // After reconciliation, Consul should not have any instances of service-deleted. + serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) + require.NoError(t, err) + require.Empty(t, serviceInstances) + proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", &api.QueryOptions{Namespace: ts.ExpConsulNS}) + require.NoError(t, err) + require.Empty(t, proxyServiceInstances) + + if tt.enableACLs { + _, _, err = consulClient.ACL().TokenRead(token.AccessorID, nil) + require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") + } + }) + } + } +} + +func createPodWithNamespace(name, namespace, ip string, inject bool, managedByEndpointsController bool) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Status: corev1.PodStatus{ + PodIP: ip, + HostIP: "127.0.0.1", + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + if inject { + pod.Labels[keyInjectStatus] = injected + pod.Annotations[keyInjectStatus] = injected + } + if managedByEndpointsController { + pod.Labels[keyManagedBy] = managedByValue + } + return pod + +} diff --git a/control-plane/connect-inject/controllers/endpoints/endpoints_controller_test.go b/control-plane/connect-inject/endpoints_controller_test.go similarity index 53% rename from control-plane/connect-inject/controllers/endpoints/endpoints_controller_test.go rename to control-plane/connect-inject/endpoints_controller_test.go index 63cde6404b..ce76d1fa49 100644 --- a/control-plane/connect-inject/controllers/endpoints/endpoints_controller_test.go +++ b/control-plane/connect-inject/endpoints_controller_test.go @@ -1,4 +1,4 @@ -package endpoints +package connectinject import ( "context" @@ -10,8 +10,6 @@ import ( logrtest "github.com/go-logr/logr/testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" @@ -27,8 +25,7 @@ import ( ) const ( - nodeName = "test-node" - consulNodeName = "test-node-virtual" + ttl = "ttl" ) func TestShouldIgnore(t *testing.T) { @@ -94,7 +91,7 @@ func TestHasBeenInjected(t *testing.T) { { name: "Pod with injected annotation", pod: func() corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) return *pod1 }, expected: true, @@ -102,7 +99,7 @@ func TestHasBeenInjected(t *testing.T) { { name: "Pod without injected annotation", pod: func() corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", false, true) + pod1 := createPod("pod1", "1.2.3.4", false, true) return *pod1 }, expected: false, @@ -117,8 +114,84 @@ func TestHasBeenInjected(t *testing.T) { } } +// TestProcessUpstreamsTLSandACLs enables TLS and ACLS and tests processUpstreams through +// the only path which sets up and uses a consul client: when proxy defaults need to be read. +// This test was plucked from the table test TestProcessUpstreams as the rest do not use the client. +func TestProcessUpstreamsTLSandACLs(t *testing.T) { + t.Parallel() + nodeName := "test-node" + + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" + caFile, certFile, keyFile := test.GenerateServerCerts(t) + // Create test consul server with ACLs and TLS. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + c.ACL.DefaultPolicy = "deny" + c.ACL.Tokens.InitialManagement = masterToken + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + cfg := &api.Config{ + Address: consul.HTTPSAddr, + Scheme: "https", + TLSConfig: api.TLSConfig{ + CAFile: caFile, + }, + Token: masterToken, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPSAddr, ":") + consulPort := addr[1] + + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = api.MeshGatewayModeRemote + _, _, err = consulClient.ConfigEntries().Set(pd, &api.WriteOptions{}) + require.NoError(t, err) + + ep := &EndpointsController{ + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "https", + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSetWith(), + } + + pod := createPod("pod1", "1.2.3.4", true, true) + pod.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + + upstreams, err := ep.processUpstreams(*pod, corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svcname", + Namespace: "default", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + }) + require.NoError(t, err) + + expected := []api.Upstream{ + { + DestinationType: api.UpstreamDestTypeService, + DestinationName: "upstream1", + Datacenter: "dc1", + LocalBindPort: 1234, + }, + } + require.Equal(t, expected, upstreams) +} + func TestProcessUpstreams(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { name string pod func() *corev1.Pod @@ -132,8 +205,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc only", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc:1234" return pod1 }, expected: []api.Upstream{ @@ -149,8 +222,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc and dc", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.dc1.dc:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.dc1.dc:1234" return pod1 }, expected: []api.Upstream{ @@ -167,8 +240,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc and peer", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.peer1.peer:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.peer1.peer:1234" return pod1 }, expected: []api.Upstream{ @@ -185,8 +258,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc and peer, needs ns before peer if namespaces enabled", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.peer1.peer:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.peer1.peer:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.peer1.peer:1234", @@ -196,8 +269,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc, ns, and peer", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.ns.peer1.peer:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.peer1.peer:1234" return pod1 }, expected: []api.Upstream{ @@ -215,8 +288,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc, ns, and partition", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.ns.part1.ap:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.part1.ap:1234" return pod1 }, expected: []api.Upstream{ @@ -234,8 +307,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream with svc, ns, and dc", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.ns.dc1.dc:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.dc1.dc:1234" return pod1 }, expected: []api.Upstream{ @@ -253,8 +326,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "multiple annotated upstreams", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.ns.dc1.dc:1234, upstream2.svc:2234, upstream3.svc.ns1.ns:3234, upstream4.svc.ns1.ns.peer1.peer:4234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.dc1.dc:1234, upstream2.svc:2234, upstream3.svc.ns1.ns:3234, upstream4.svc.ns1.ns.peer1.peer:4234" return pod1 }, expected: []api.Upstream{ @@ -290,8 +363,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid partition/dc/peer", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.ns.part1.err:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.part1.err:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.ns1.ns.part1.err:1234", @@ -301,8 +374,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid namespace", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.err:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.err:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.ns1.err:1234", @@ -312,8 +385,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid number of pieces in the address", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.err:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.err:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.err:1234", @@ -323,8 +396,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid peer", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.peer1.err:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.peer1.err:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.peer1.err:1234", @@ -334,19 +407,47 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: invalid number of pieces in the address without namespaces and partitions", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.err:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.err:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.err:1234", consulNamespacesEnabled: false, consulPartitionsEnabled: false, }, + { + name: "upstream with datacenter without ProxyDefaults", + pod: func() *corev1.Pod { + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + return pod1 + }, + expErr: "upstream \"upstream1:1234:dc1\" is invalid: there is no ProxyDefaults config to set mesh gateway mode", + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "upstream with datacenter with ProxyDefaults whose mesh gateway mode is not local or remote", + pod: func() *corev1.Pod { + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + return pod1 + }, + expErr: "upstream \"upstream1:1234:dc1\" is invalid: ProxyDefaults mesh gateway mode is neither \"local\" nor \"remote\"", + configEntry: func() api.ConfigEntry { + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = "bad-mode" + return pd + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, { name: "annotated upstream error: both peer and partition provided", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.ns.part1.partition.peer1.peer:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.part1.partition.peer1.peer:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.ns1.ns.part1.partition.peer1.peer:1234", @@ -356,8 +457,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: both peer and dc provided", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.ns.peer1.peer.dc1.dc:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.peer1.peer.dc1.dc:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.ns1.ns.peer1.peer.dc1.dc:1234", @@ -367,19 +468,67 @@ func TestProcessUpstreams(t *testing.T) { { name: "annotated upstream error: both dc and partition provided", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1.svc.ns1.ns.part1.partition.dc1.dc:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1.svc.ns1.ns.part1.partition.dc1.dc:1234" return pod1 }, expErr: "upstream structured incorrectly: upstream1.svc.ns1.ns.part1.partition.dc1.dc:1234", consulNamespacesEnabled: true, consulPartitionsEnabled: true, }, + { + name: "upstream with datacenter with ProxyDefaults and mesh gateway is in local mode", + pod: func() *corev1.Pod { + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + return pod1 + }, + expected: []api.Upstream{ + { + DestinationType: api.UpstreamDestTypeService, + DestinationName: "upstream1", + Datacenter: "dc1", + LocalBindPort: 1234, + }, + }, + configEntry: func() api.ConfigEntry { + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = api.MeshGatewayModeLocal + return pd + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, + { + name: "upstream with datacenter with ProxyDefaults and mesh gateway in remote mode", + pod: func() *corev1.Pod { + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" + return pod1 + }, + expected: []api.Upstream{ + { + DestinationType: api.UpstreamDestTypeService, + DestinationName: "upstream1", + Datacenter: "dc1", + LocalBindPort: 1234, + }, + }, + configEntry: func() api.ConfigEntry { + ce, _ := api.MakeConfigEntry(api.ProxyDefaults, "global") + pd := ce.(*api.ProxyConfigEntry) + pd.MeshGateway.Mode = api.MeshGatewayModeRemote + return pd + }, + consulNamespacesEnabled: false, + consulPartitionsEnabled: false, + }, { name: "when consul is unavailable, we don't return an error", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1:1234:dc1" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234:dc1" return pod1 }, expErr: "", @@ -404,8 +553,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "single upstream", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream:1234" return pod1 }, expected: []api.Upstream{ @@ -421,8 +570,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "single upstream with namespace", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream.foo:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream.foo:1234" return pod1 }, expected: []api.Upstream{ @@ -439,8 +588,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "single upstream with namespace and partition", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream.foo.bar:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream.foo.bar:1234" return pod1 }, expected: []api.Upstream{ @@ -458,8 +607,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "multiple upstreams", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1:1234, upstream2:2234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234, upstream2:2234" return pod1 }, expected: []api.Upstream{ @@ -480,8 +629,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "multiple upstreams with consul namespaces, partitions and datacenters", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1:1234, upstream2.bar:2234, upstream3.foo.baz:3234:dc2" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234, upstream2.bar:2234, upstream3.foo.baz:3234:dc2" return pod1 }, configEntry: func() api.ConfigEntry { @@ -516,8 +665,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "multiple upstreams with consul namespaces and datacenters", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1:1234, upstream2.bar:2234, upstream3.foo:3234:dc2" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "upstream1:1234, upstream2.bar:2234, upstream3.foo:3234:dc2" return pod1 }, configEntry: func() api.ConfigEntry { @@ -550,8 +699,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "prepared query upstream", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "prepared_query:queryname:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "prepared_query:queryname:1234" return pod1 }, expected: []api.Upstream{ @@ -567,8 +716,8 @@ func TestProcessUpstreams(t *testing.T) { { name: "prepared query and non-query upstreams and annotated non-query upstreams", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationUpstreams] = "prepared_query:queryname:1234, upstream1:2234, prepared_query:6687bd19-5654-76be-d764:8202, upstream2.svc:3234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUpstreams] = "prepared_query:queryname:1234, upstream1:2234, prepared_query:6687bd19-5654-76be-d764:8202, upstream2.svc:3234" return pod1 }, expected: []api.Upstream{ @@ -599,8 +748,34 @@ func TestProcessUpstreams(t *testing.T) { } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - ep := &Controller{ + // Create test consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + httpAddr := consul.HTTPAddr + if tt.consulUnavailable { + httpAddr = "hostname.does.not.exist:8500" + } + consulClient, err := api.NewClient(&api.Config{ + Address: httpAddr, + }) + require.NoError(t, err) + addr := strings.Split(httpAddr, ":") + consulPort := addr[1] + + if tt.configEntry != nil { + consulClient.ConfigEntries().Set(tt.configEntry(), &api.WriteOptions{}) + } + + ep := &EndpointsController{ Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), EnableConsulNamespaces: tt.consulNamespacesEnabled, @@ -636,8 +811,8 @@ func TestGetServiceName(t *testing.T) { { name: "single port, with annotation", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationService] = "web" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationService] = "web" return pod1 }, endpoint: &corev1.Endpoints{ @@ -651,7 +826,7 @@ func TestGetServiceName(t *testing.T) { { name: "single port, without annotation", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) return pod1 }, endpoint: &corev1.Endpoints{ @@ -665,8 +840,8 @@ func TestGetServiceName(t *testing.T) { { name: "multi port, with annotation", pod: func() *corev1.Pod { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationService] = "web,web-admin" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationService] = "web,web-admin" return pod1 }, endpoint: &corev1.Endpoints{ @@ -681,7 +856,7 @@ func TestGetServiceName(t *testing.T) { for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - svcName := serviceName(*tt.pod(), *tt.endpoint) + svcName := getServiceName(*tt.pod(), *tt.endpoint) require.Equal(t, tt.expSvcName, svcName) }) @@ -690,24 +865,25 @@ func TestGetServiceName(t *testing.T) { func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { - name string - consulSvcName string - k8sObjects func() []runtime.Object - initialConsulSvcs []*api.AgentService - expectedNumSvcInstances int - expectedConsulSvcInstances []*api.CatalogService - expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck + name string + consulSvcName string + k8sObjects func() []runtime.Object + initialConsulSvcs []*api.AgentServiceRegistration + expectedNumSvcInstances int + expectedConsulSvcInstancesMap map[string][]*api.CatalogService + expectedProxySvcInstancesMap map[string][]*api.CatalogService + expectedAgentHealthChecks []*api.AgentCheck }{ { name: "Multiport service", consulSvcName: "web,web-admin", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationPort] = "8080,9090" - pod1.Annotations[constants.AnnotationService] = "web,web-admin" - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1:1234" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationPort] = "8080,9090" + pod1.Annotations[annotationService] = "web,web-admin" + pod1.Annotations[annotationUpstreams] = "upstream1:1234" endpoint1 := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "web", @@ -717,7 +893,8 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -737,7 +914,8 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -750,162 +928,163 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { } return []runtime.Object{pod1, endpoint1, endpoint2} }, - initialConsulSvcs: nil, + initialConsulSvcs: []*api.AgentServiceRegistration{}, expectedNumSvcInstances: 1, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-web", - ServiceName: "web", - ServiceAddress: "1.2.3.4", - ServicePort: 8080, - ServiceMeta: map[string]string{ - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "web", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", + expectedConsulSvcInstancesMap: map[string][]*api.CatalogService{ + "web": { + { + ServiceID: "pod1-web", + ServiceName: "web", + ServiceAddress: "1.2.3.4", + ServicePort: 8080, + ServiceMeta: map[string]string{ + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "web", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + }, + ServiceTags: []string{}, }, - ServiceTags: []string{}, }, - { - ServiceID: "pod1-web-admin", - ServiceName: "web-admin", - ServiceAddress: "1.2.3.4", - ServicePort: 9090, - ServiceMeta: map[string]string{ - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "web-admin", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", + "web-admin": { + { + ServiceID: "pod1-web-admin", + ServiceName: "web-admin", + ServiceAddress: "1.2.3.4", + ServicePort: 9090, + ServiceMeta: map[string]string{ + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "web-admin", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + }, + ServiceTags: []string{}, }, - ServiceTags: []string{}, }, }, - expectedProxySvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-web-sidecar-proxy", - ServiceName: "web-sidecar-proxy", - ServiceAddress: "1.2.3.4", - ServicePort: 20000, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "web", - DestinationServiceID: "pod1-web", - LocalServiceAddress: "127.0.0.1", - LocalServicePort: 8080, - Upstreams: []api.Upstream{ - { - DestinationType: api.UpstreamDestTypeService, - DestinationName: "upstream1", - LocalBindPort: 1234, + expectedProxySvcInstancesMap: map[string][]*api.CatalogService{ + "web": { + { + ServiceID: "pod1-web-sidecar-proxy", + ServiceName: "web-sidecar-proxy", + ServiceAddress: "1.2.3.4", + ServicePort: 20000, + ServiceProxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "web", + DestinationServiceID: "pod1-web", + LocalServiceAddress: "127.0.0.1", + LocalServicePort: 8080, + Upstreams: []api.Upstream{ + { + DestinationType: api.UpstreamDestTypeService, + DestinationName: "upstream1", + LocalBindPort: 1234, + }, }, }, + ServiceMeta: map[string]string{ + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "web", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + }, + ServiceTags: []string{}, }, - ServiceMeta: map[string]string{ - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "web", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, }, - { - ServiceID: "pod1-web-admin-sidecar-proxy", - ServiceName: "web-admin-sidecar-proxy", - ServiceAddress: "1.2.3.4", - ServicePort: 20001, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "web-admin", - DestinationServiceID: "pod1-web-admin", - LocalServiceAddress: "127.0.0.1", - LocalServicePort: 9090, - }, - ServiceMeta: map[string]string{ - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "web-admin", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", + "web-admin": { + { + ServiceID: "pod1-web-admin-sidecar-proxy", + ServiceName: "web-admin-sidecar-proxy", + ServiceAddress: "1.2.3.4", + ServicePort: 20001, + ServiceProxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "web-admin", + DestinationServiceID: "pod1-web-admin", + LocalServiceAddress: "127.0.0.1", + LocalServicePort: 9090, + }, + ServiceMeta: map[string]string{ + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "web-admin", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + }, + ServiceTags: []string{}, }, - ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-web", + CheckID: "default/pod1-web/kubernetes-health-check", ServiceName: "web", ServiceID: "pod1-web", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-web-sidecar-proxy", - ServiceName: "web-sidecar-proxy", - ServiceID: "pod1-web-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, { - CheckID: "default/pod1-web-admin", + CheckID: "default/pod1-web-admin/kubernetes-health-check", ServiceName: "web-admin", ServiceID: "pod1-web-admin", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-web-admin-sidecar-proxy", - ServiceName: "web-admin-sidecar-proxy", - ServiceID: "pod1-web-admin-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + // Add the default namespace. ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} // Create fake k8s client - k8sObjects := append(tt.k8sObjects(), &ns, &node) + k8sObjects := append(tt.k8sObjects(), fakeClientPod, &ns) fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] // Register service and proxy in consul. for _, svc := range tt.initialConsulSvcs { - catalogRegistration := &api.CatalogRegistration{ - Node: consulNodeName, - Address: consulNodeAddress, - Service: svc, - } - _, err := consulClient.Catalog().Register(catalogRegistration, nil) + err = consulClient.Agent().ServiceRegister(svc) require.NoError(t, err) } // Create the endpoints controller - ep := &Controller{ + ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: "default", + ConsulClientCfg: cfg, } namespacedName := types.NamespacedName{ Namespace: "default", @@ -929,28 +1108,28 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { // After reconciliation, Consul should have the service with the correct number of instances svcs := strings.Split(tt.consulSvcName, ",") - for i, service := range svcs { + for _, service := range svcs { serviceInstances, _, err := consulClient.Catalog().Service(service, "", nil) require.NoError(t, err) require.Len(t, serviceInstances, tt.expectedNumSvcInstances) - for _, instance := range serviceInstances { - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) + for i, instance := range serviceInstances { + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceName, instance.ServiceName) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceAddress, instance.ServiceAddress) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServicePort, instance.ServicePort) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceMeta, instance.ServiceMeta) + require.Equal(t, tt.expectedConsulSvcInstancesMap[service][i].ServiceTags, instance.ServiceTags) } proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", service), "", nil) require.NoError(t, err) require.Len(t, proxyServiceInstances, tt.expectedNumSvcInstances) - for _, instance := range proxyServiceInstances { - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceName, instance.ServiceName) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) - require.Equal(t, tt.expectedProxySvcInstances[i].ServicePort, instance.ServicePort) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceMeta, instance.ServiceMeta) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceTags, instance.ServiceTags) + for i, instance := range proxyServiceInstances { + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceName, instance.ServiceName) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceAddress, instance.ServiceAddress) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServicePort, instance.ServicePort) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceMeta, instance.ServiceMeta) + require.Equal(t, tt.expectedProxySvcInstancesMap[service][i].ServiceTags, instance.ServiceTags) // When comparing the ServiceProxy field we ignore the DestinationNamespace // field within that struct because on Consul OSS it's set to "" but on Consul Enterprise @@ -960,45 +1139,58 @@ func TestReconcileCreateEndpoint_MultiportService(t *testing.T) { // To do the comparison that ignores that field we use go-cmp instead // of the regular require.Equal call since it supports ignoring certain // fields. - diff := cmp.Diff(tt.expectedProxySvcInstances[i].ServiceProxy, instance.ServiceProxy, + diff := cmp.Diff(tt.expectedProxySvcInstancesMap[service][i].ServiceProxy, instance.ServiceProxy, cmpopts.IgnoreFields(api.Upstream{}, "DestinationNamespace", "DestinationPartition")) require.Empty(t, diff, "expected objects to be equal") } + _, checkInfos, err := consulClient.Agent().AgentHealthServiceByName(fmt.Sprintf("%s-sidecar-proxy", service)) + expectedChecks := []string{"Proxy Public Listener", "Destination Alias"} + require.NoError(t, err) + require.Len(t, checkInfos, tt.expectedNumSvcInstances) + for _, checkInfo := range checkInfos { + checks := checkInfo.Checks + require.Contains(t, expectedChecks, checks[0].Name) + require.Contains(t, expectedChecks, checks[1].Name) + } } // Check that the Consul health check was created for the k8s pod. - for _, expectedCheck := range tt.expectedHealthChecks { - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, nil) - require.NoError(t, err) - require.Equal(t, len(checks), 1) - // Ignoring Namespace because the response from ENT includes it and OSS does not. - var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) + if tt.expectedAgentHealthChecks != nil { + for i := range tt.expectedAgentHealthChecks { + filter := fmt.Sprintf("CheckID == `%s`", tt.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, len(check), 1) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[tt.expectedAgentHealthChecks[i].CheckID], tt.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } } }) } } // TestReconcileCreateEndpoint tests the logic to create service instances in Consul from the addresses in the Endpoints -// object. This test covers Controller.createServiceRegistrations and Controller.createGatewayRegistrations. -// This test depends on a Consul binary being present on the host machine. +// object. The cases test an empty endpoints object, a basic endpoints object with one address, a basic endpoints object +// with two addresses, and an endpoints object with every possible customization. +// This test covers EndpointsController.createServiceRegistrations. func TestReconcileCreateEndpoint(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { name string - svcName string consulSvcName string k8sObjects func() []runtime.Object + initialConsulSvcs []*api.AgentServiceRegistration + expectedNumSvcInstances int expectedConsulSvcInstances []*api.CatalogService expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck - metricsEnabled bool - nodeMeta map[string]string + expectedAgentHealthChecks []*api.AgentCheck expErr string + useProxyHealthChecks bool }{ { name: "Empty endpoints", - svcName: "service-created", consulSvcName: "service-created", k8sObjects: func() []runtime.Object { endpoint := &corev1.Endpoints{ @@ -1014,19 +1206,17 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{endpoint} }, - expectedConsulSvcInstances: nil, - expectedProxySvcInstances: nil, - expectedHealthChecks: nil, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 0, + expectedConsulSvcInstances: []*api.CatalogService{}, + expectedProxySvcInstances: []*api.CatalogService{}, + expectedAgentHealthChecks: nil, }, { name: "Basic endpoints", - svcName: "service-created", consulSvcName: "service-created", - nodeMeta: map[string]string{ - "test-node": "true", - }, k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-created", @@ -1036,7 +1226,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1049,19 +1240,16 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 1, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-created", ServiceName: "service-created", ServiceAddress: "1.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, - NodeMeta: map[string]string{ - "synthetic-node": "true", - "test-node": "true", - }, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1076,63 +1264,43 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - NodeMeta: map[string]string{ - "synthetic-node": "true", - "test-node": "true", - }, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-created", + CheckID: "default/pod1-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Mesh Gateway", - svcName: "mesh-gateway", - consulSvcName: "mesh-gateway", - nodeMeta: map[string]string{ - "test-node": "true", - }, + name: "Basic endpoints with proxy healthchecks", + useProxyHealthChecks: true, + consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("mesh-gateway", "1.2.3.4", map[string]string{ - constants.AnnotationGatewayConsulServiceName: "mesh-gateway", - constants.AnnotationGatewayWANSource: "Static", - constants.AnnotationGatewayWANAddress: "2.3.4.5", - constants.AnnotationGatewayWANPort: "443", - constants.AnnotationMeshGatewayContainerPort: "8443", - constants.AnnotationGatewayKind: meshGateway}) + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationUseProxyHealthCheck] = "true" endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "mesh-gateway", + Name: "service-created", Namespace: "default", }, Subsets: []corev1.EndpointSubset{ { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", - Name: "mesh-gateway", + Name: "pod1", Namespace: "default", }, }, @@ -1140,139 +1308,77 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, }, } - return []runtime.Object{gateway, endpoint} + return []runtime.Object{pod1, endpoint} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 1, expectedConsulSvcInstances: []*api.CatalogService{ { - ServiceID: "mesh-gateway", - ServiceName: "mesh-gateway", + ServiceID: "pod1-service-created", + ServiceName: "service-created", ServiceAddress: "1.2.3.4", - ServicePort: 8443, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "mesh-gateway", metaKeyKubeServiceName: "mesh-gateway", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServicePort: 0, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 8443, - }, - "wan": { - Address: "2.3.4.5", - Port: 443, - }, - }, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, - NodeMeta: map[string]string{ - "synthetic-node": "true", - "test-node": "true", + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-created-sidecar-proxy", + ServiceName: "service-created-sidecar-proxy", + ServiceAddress: "1.2.3.4", + ServicePort: 20000, + ServiceProxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-created", + DestinationServiceID: "pod1-service-created", + LocalServiceAddress: "", + LocalServicePort: 0, }, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, + ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/mesh-gateway", - ServiceName: "mesh-gateway", - ServiceID: "mesh-gateway", - Name: consulKubernetesCheckName, + CheckID: "default/pod1-service-created/kubernetes-health-check", + ServiceName: "service-created", + ServiceID: "pod1-service-created", + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Mesh Gateway with Metrics enabled", - svcName: "mesh-gateway", - consulSvcName: "mesh-gateway", + name: "Endpoints with multiple addresses", + consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("mesh-gateway", "1.2.3.4", map[string]string{ - constants.AnnotationGatewayConsulServiceName: "mesh-gateway", - constants.AnnotationGatewayWANSource: "Static", - constants.AnnotationGatewayWANAddress: "2.3.4.5", - constants.AnnotationGatewayWANPort: "443", - constants.AnnotationMeshGatewayContainerPort: "8443", - constants.AnnotationGatewayKind: meshGateway}) - endpoint := &corev1.Endpoints{ + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod2 := createPod("pod2", "2.2.3.4", true, true) + endpointWithTwoAddresses := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "mesh-gateway", + Name: "service-created", Namespace: "default", }, Subsets: []corev1.EndpointSubset{ { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", - Name: "mesh-gateway", + Name: "pod1", Namespace: "default", }, }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "mesh-gateway", - ServiceName: "mesh-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 8443, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "mesh-gateway", metaKeyKubeServiceName: "mesh-gateway", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, - ServiceTags: []string{}, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 8443, - }, - "wan": { - Address: "2.3.4.5", - Port: 443, - }, - }, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_prometheus_bind_addr": "1.2.3.4:20200", - }, - }, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/mesh-gateway", - ServiceName: "mesh-gateway", - ServiceID: "mesh-gateway", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - }, - metricsEnabled: true, - }, - { - name: "Terminating Gateway", - svcName: "terminating-gateway", - consulSvcName: "terminating-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("terminating-gateway", "1.2.3.4", map[string]string{ - constants.AnnotationGatewayKind: terminatingGateway, - constants.AnnotationGatewayConsulServiceName: "terminating-gateway", - }) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "2.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", - Name: "terminating-gateway", + Name: "pod2", Namespace: "default", }, }, @@ -1280,357 +1386,26 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, }, } - return []runtime.Object{gateway, endpoint} + return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 2, expectedConsulSvcInstances: []*api.CatalogService{ { - ServiceID: "terminating-gateway", - ServiceName: "terminating-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 8443, - ServiceMeta: map[string]string{ - constants.MetaKeyPodName: "terminating-gateway", - metaKeyKubeServiceName: "terminating-gateway", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/terminating-gateway", - ServiceName: "terminating-gateway", - ServiceID: "terminating-gateway", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - }, - }, - { - name: "Terminating Gateway with Metrics enabled", - metricsEnabled: true, - svcName: "terminating-gateway", - consulSvcName: "terminating-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("terminating-gateway", "1.2.3.4", map[string]string{ - constants.AnnotationGatewayKind: terminatingGateway, - constants.AnnotationGatewayConsulServiceName: "terminating-gateway", - }) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminating-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "terminating-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "terminating-gateway", - ServiceName: "terminating-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 8443, - ServiceMeta: map[string]string{ - constants.MetaKeyPodName: "terminating-gateway", - metaKeyKubeServiceName: "terminating-gateway", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_prometheus_bind_addr": "1.2.3.4:20200", - }, - }, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/terminating-gateway", - ServiceName: "terminating-gateway", - ServiceID: "terminating-gateway", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - }, - }, - { - name: "Ingress Gateway", - svcName: "ingress-gateway", - consulSvcName: "ingress-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("ingress-gateway", "1.2.3.4", map[string]string{ - constants.AnnotationGatewayConsulServiceName: "ingress-gateway", - constants.AnnotationGatewayKind: ingressGateway, - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANPort: "8443", - }) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingress-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "ingress-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingress-gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "5.6.7.8", - }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint, svc} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "ingress-gateway", - ServiceName: "ingress-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 21000, - ServiceMeta: map[string]string{ - constants.MetaKeyPodName: "ingress-gateway", - metaKeyKubeServiceName: "ingress-gateway", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8443, - }, - }, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_gateway_no_default_bind": true, - "envoy_gateway_bind_addresses": map[string]interface{}{ - "all-interfaces": map[string]interface{}{ - "address": "0.0.0.0", - }, - }, - }, - }, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/ingress-gateway", - ServiceName: "ingress-gateway", - ServiceID: "ingress-gateway", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - }, - }, - { - name: "Ingress Gateway with Metrics enabled", - metricsEnabled: true, - svcName: "ingress-gateway", - consulSvcName: "ingress-gateway", - k8sObjects: func() []runtime.Object { - gateway := createGatewayPod("ingress-gateway", "1.2.3.4", map[string]string{ - constants.AnnotationGatewayConsulServiceName: "ingress-gateway", - constants.AnnotationGatewayKind: ingressGateway, - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANPort: "8443", - }) - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingress-gateway", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "ingress-gateway", - Namespace: "default", - }, - }, - }, - }, - }, - } - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingress-gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "5.6.7.8", - }, - }, - }, - }, - } - return []runtime.Object{gateway, endpoint, svc} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "ingress-gateway", - ServiceName: "ingress-gateway", - ServiceAddress: "1.2.3.4", - ServicePort: 21000, - ServiceMeta: map[string]string{ - constants.MetaKeyPodName: "ingress-gateway", - metaKeyKubeServiceName: "ingress-gateway", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, - ServiceTags: []string{}, - ServiceTaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8443, - }, - }, - ServiceProxy: &api.AgentServiceConnectProxyConfig{ - Config: map[string]interface{}{ - "envoy_gateway_no_default_bind": true, - "envoy_gateway_bind_addresses": map[string]interface{}{ - "all-interfaces": map[string]interface{}{ - "address": "0.0.0.0", - }, - }, - "envoy_prometheus_bind_addr": "1.2.3.4:20200", - }, - }, - }, - }, - expectedHealthChecks: []*api.HealthCheck{ - { - CheckID: "default/ingress-gateway", - ServiceName: "ingress-gateway", - ServiceID: "ingress-gateway", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - }, - }, - { - name: "Endpoints with multiple addresses", - svcName: "service-created", - consulSvcName: "service-created", - k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod2 := createServicePod("pod2", "2.2.3.4", true, true) - endpointWithTwoAddresses := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-created", - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod1", - Namespace: "default", - }, - }, - { - IP: "2.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod2", - Namespace: "default", - }, - }, - }, - }, - }, - } - return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} - }, - expectedConsulSvcInstances: []*api.CatalogService{ - { - ServiceID: "pod1-service-created", - ServiceName: "service-created", + ServiceID: "pod1-service-created", + ServiceName: "service-created", ServiceAddress: "1.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, { ServiceID: "pod2-service-created", ServiceName: "service-created", ServiceAddress: "2.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod2", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1645,7 +1420,7 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, { @@ -1659,46 +1434,28 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod2", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-created", + CheckID: "default/pod1-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, { - CheckID: "default/pod2-service-created", + CheckID: "default/pod2-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod2-service-created", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod2-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod2-service-created-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, @@ -1707,11 +1464,10 @@ func TestReconcileCreateEndpoint(t *testing.T) { // on the invalid address but continue and process the other addresses. We check for error specific to // pod3 being non-existant at the end, and validate the other 2 addresses have service instances. name: "Endpoints with multiple addresses but one is invalid", - svcName: "service-created", consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod2 := createServicePod("pod2", "2.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod2 := createPod("pod2", "2.2.3.4", true, true) endpointWithTwoAddresses := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-created", @@ -1722,7 +1478,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { Addresses: []corev1.EndpointAddress{ // This is an invalid address because pod3 will not exist in k8s. { - IP: "9.9.9.9", + IP: "9.9.9.9", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod3", @@ -1731,7 +1488,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, // The next two are valid addresses. { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1739,7 +1497,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, }, { - IP: "2.2.3.4", + IP: "2.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", @@ -1752,24 +1511,24 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 2, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-created", ServiceName: "service-created", ServiceAddress: "1.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, { ServiceID: "pod2-service-created", ServiceName: "service-created", ServiceAddress: "2.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod2", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1784,7 +1543,7 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, { @@ -1798,56 +1557,47 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod2", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod2", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-created", + CheckID: "default/pod1-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, { - CheckID: "default/pod2-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod2-service-created-sidecar-proxy", - Name: consulKubernetesCheckName, + CheckID: "default/pod2-service-created/kubernetes-health-check", + ServiceName: "service-created", + ServiceID: "pod2-service-created", + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, expErr: "1 error occurred:\n\t* pods \"pod3\" not found\n\n", }, { name: "Every configurable field set: port, different Consul service name, meta, tags, upstreams, metrics", - svcName: "service-created", consulSvcName: "different-consul-svc-name", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationPort] = "1234" - pod1.Annotations[constants.AnnotationService] = "different-consul-svc-name" - pod1.Annotations[fmt.Sprintf("%sname", constants.AnnotationMeta)] = "abc" - pod1.Annotations[fmt.Sprintf("%sversion", constants.AnnotationMeta)] = "2" - pod1.Annotations[fmt.Sprintf("%spod_name", constants.AnnotationMeta)] = "$POD_NAME" - pod1.Annotations[constants.AnnotationTags] = "abc\\,123,$POD_NAME" - pod1.Annotations[constants.AnnotationUpstreams] = "upstream1:1234" - pod1.Annotations[constants.AnnotationEnableMetrics] = "true" - pod1.Annotations[constants.AnnotationPrometheusScrapePort] = "12345" + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationPort] = "1234" + pod1.Annotations[annotationService] = "different-consul-svc-name" + pod1.Annotations[fmt.Sprintf("%sname", annotationMeta)] = "abc" + pod1.Annotations[fmt.Sprintf("%sversion", annotationMeta)] = "2" + pod1.Annotations[fmt.Sprintf("%spod_name", annotationMeta)] = "$POD_NAME" + pod1.Annotations[annotationTags] = "abc\\,123,$POD_NAME" + pod1.Annotations[annotationConnectTags] = "def\\,456,$POD_NAME" + pod1.Annotations[annotationUpstreams] = "upstream1:1234" + pod1.Annotations[annotationEnableMetrics] = "true" + pod1.Annotations[annotationPrometheusScrapePort] = "12345" endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-created", @@ -1857,7 +1607,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1870,6 +1621,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 1, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-different-consul-svc-name", @@ -1877,17 +1630,15 @@ func TestReconcileCreateEndpoint(t *testing.T) { ServiceAddress: "1.2.3.4", ServicePort: 1234, ServiceMeta: map[string]string{ - "name": "abc", - "version": "2", - "pod_name": "pod1", - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "service-created", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", + "name": "abc", + "version": "2", + "pod_name": "pod1", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-created", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, }, - ServiceTags: []string{"abc,123", "pod1"}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, + ServiceTags: []string{"abc,123", "pod1", "def,456", "pod1"}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -1913,36 +1664,26 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, }, ServiceMeta: map[string]string{ - "name": "abc", - "version": "2", - "pod_name": "pod1", - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "service-created", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", + "name": "abc", + "version": "2", + "pod_name": "pod1", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-created", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, }, - ServiceTags: []string{"abc,123", "pod1"}, + ServiceTags: []string{"abc,123", "pod1", "def,456", "pod1"}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-different-consul-svc-name", + CheckID: "default/pod1-different-consul-svc-name/kubernetes-health-check", ServiceName: "different-consul-svc-name", ServiceID: "pod1-different-consul-svc-name", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-different-consul-svc-name-sidecar-proxy", - ServiceName: "different-consul-svc-name-sidecar-proxy", - ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, @@ -1950,11 +1691,10 @@ func TestReconcileCreateEndpoint(t *testing.T) { // register the mesh pods. { name: "Some endpoints injected, some not.", - svcName: "service-created", consulSvcName: "service-created", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod2 := createServicePod("pod2", "2.3.4.5", false, false) + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod2 := createPod("pod2", "2.3.4.5", false, false) // NOTE: the order of the addresses is important. The non-mesh pod must be first to correctly // reproduce the bug where we were exiting the loop early if any pod was non-mesh. @@ -1967,7 +1707,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "2.3.4.5", + IP: "2.3.4.5", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", @@ -1975,7 +1716,8 @@ func TestReconcileCreateEndpoint(t *testing.T) { }, }, { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -1988,15 +1730,16 @@ func TestReconcileCreateEndpoint(t *testing.T) { } return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, + initialConsulSvcs: []*api.AgentServiceRegistration{}, + expectedNumSvcInstances: 1, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-created", ServiceName: "service-created", ServiceAddress: "1.2.3.4", ServicePort: 0, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, - ServiceProxy: &api.AgentServiceConnectProxyConfig{}, }, }, expectedProxySvcInstances: []*api.CatalogService{ @@ -2011,67 +1754,77 @@ func TestReconcileCreateEndpoint(t *testing.T) { LocalServiceAddress: "", LocalServicePort: 0, }, - ServiceMeta: map[string]string{constants.MetaKeyPodName: "pod1", metaKeyKubeServiceName: "service-created", constants.MetaKeyKubeNS: "default", metaKeyManagedBy: constants.ManagedByValue, metaKeySyntheticNode: "true"}, + ServiceMeta: map[string]string{MetaKeyPodName: "pod1", MetaKeyKubeServiceName: "service-created", MetaKeyKubeNS: "default", MetaKeyManagedBy: managedByValue}, ServiceTags: []string{}, }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-created", + CheckID: "default/pod1-service-created/kubernetes-health-check", ServiceName: "service-created", ServiceID: "pod1-service-created", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-created-sidecar-proxy", - ServiceName: "service-created-sidecar-proxy", - ServiceID: "pod1-service-created-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + // Add the default namespace. ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} // Create fake k8s client - k8sObjects := append(tt.k8sObjects(), &ns, &node) + k8sObjects := append(tt.k8sObjects(), fakeClientPod, &ns) fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + // Create test consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) - // Create the endpoints controller. - ep := &Controller{ + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] + + // Register service and proxy in consul. + for _, svc := range tt.initialConsulSvcs { + err = consulClient.Agent().ServiceRegister(svc) + require.NoError(t, err) + } + + // Create the endpoints controller + ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consulServer", + ReleaseName: "consul", ReleaseNamespace: "default", - NodeMeta: tt.nodeMeta, - } - if tt.metricsEnabled { - ep.MetricsConfig = metrics.Config{ - DefaultEnableMetrics: true, - EnableGatewayMetrics: true, - } + ConsulClientCfg: cfg, } namespacedName := types.NamespacedName{ Namespace: "default", - Name: tt.svcName, + Name: "service-created", } resp, err := ep.Reconcile(context.Background(), ctrl.Request{ @@ -2087,7 +1840,7 @@ func TestReconcileCreateEndpoint(t *testing.T) { // After reconciliation, Consul should have the service with the correct number of instances serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", nil) require.NoError(t, err) - require.Len(t, serviceInstances, len(tt.expectedConsulSvcInstances)) + require.Len(t, serviceInstances, tt.expectedNumSvcInstances) for i, instance := range serviceInstances { require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceName, instance.ServiceName) @@ -2095,15 +1848,10 @@ func TestReconcileCreateEndpoint(t *testing.T) { require.Equal(t, tt.expectedConsulSvcInstances[i].ServicePort, instance.ServicePort) require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceMeta, instance.ServiceMeta) require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceTags, instance.ServiceTags) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceTaggedAddresses, instance.ServiceTaggedAddresses) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceProxy, instance.ServiceProxy) - if tt.nodeMeta != nil { - require.Equal(t, tt.expectedConsulSvcInstances[i].NodeMeta, instance.NodeMeta) - } } proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", nil) require.NoError(t, err) - require.Len(t, proxyServiceInstances, len(tt.expectedProxySvcInstances)) + require.Len(t, proxyServiceInstances, tt.expectedNumSvcInstances) for i, instance := range proxyServiceInstances { require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) require.Equal(t, tt.expectedProxySvcInstances[i].ServiceName, instance.ServiceName) @@ -2111,9 +1859,7 @@ func TestReconcileCreateEndpoint(t *testing.T) { require.Equal(t, tt.expectedProxySvcInstances[i].ServicePort, instance.ServicePort) require.Equal(t, tt.expectedProxySvcInstances[i].ServiceMeta, instance.ServiceMeta) require.Equal(t, tt.expectedProxySvcInstances[i].ServiceTags, instance.ServiceTags) - if tt.nodeMeta != nil { - require.Equal(t, tt.expectedProxySvcInstances[i].NodeMeta, instance.NodeMeta) - } + // When comparing the ServiceProxy field we ignore the DestinationNamespace // field within that struct because on Consul OSS it's set to "" but on Consul Enterprise // it's set to "default" and we want to re-use this test for both OSS and Ent. @@ -2127,20 +1873,43 @@ func TestReconcileCreateEndpoint(t *testing.T) { require.Empty(t, diff, "expected objects to be equal") } - // Check that the Consul health expectedCheck was created for the k8s pod. - for _, expectedCheck := range tt.expectedHealthChecks { - filter := fmt.Sprintf("ServiceID == %q", expectedCheck.ServiceID) - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, &api.QueryOptions{Filter: filter}) - require.NoError(t, err) - require.Equal(t, len(checks), 1) - // Ignoring Namespace because the response from ENT includes it and OSS does not. - var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) + _, checkInfos, err := consulClient.Agent().AgentHealthServiceByName(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName)) + expectedChecks := []string{"Proxy Public Listener", "Destination Alias"} + require.NoError(t, err) + require.Len(t, checkInfos, tt.expectedNumSvcInstances) + for _, checkInfo := range checkInfos { + checks := checkInfo.Checks + require.Contains(t, expectedChecks, checks[0].Name) + require.Contains(t, expectedChecks, checks[1].Name) } - }) - } -} - + agentChecks, err := consulClient.Agent().Checks() + require.NoError(t, err) + for _, check := range agentChecks { + if check.Name == "Proxy Public Listener" { + if tt.useProxyHealthChecks { + require.Equal(t, "http", check.Type) + } else { + require.Equal(t, "tcp", check.Type) + } + } + } + + // Check that the Consul health check was created for the k8s pod. + if tt.expectedAgentHealthChecks != nil { + for i := range tt.expectedConsulSvcInstances { + filter := fmt.Sprintf("CheckID == `%s`", tt.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, len(check), 1) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[tt.expectedAgentHealthChecks[i].CheckID], tt.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } + } + }) + } +} + // Tests updating an Endpoints object. // - Tests updates via the register codepath: // - When an address in an Endpoint is updated, that the corresponding service instance in Consul is updated. @@ -2153,25 +1922,28 @@ func TestReconcileCreateEndpoint(t *testing.T) { // // For the register and deregister codepath, this also tests that they work when the Consul service name is different // from the K8s service name. -// This test covers Controller.deregisterService when services should be selectively deregistered +// This test covers EndpointsController.deregisterServiceOnAllAgents when services should be selectively deregistered // since the map will not be nil. func TestReconcileUpdateEndpoint(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { name string consulSvcName string k8sObjects func() []runtime.Object - initialConsulSvcs []*api.CatalogRegistration + initialConsulSvcs []*api.AgentServiceRegistration expectedConsulSvcInstances []*api.CatalogService expectedProxySvcInstances []*api.CatalogService - expectedHealthChecks []*api.HealthCheck + expectedAgentHealthChecks []*api.AgentCheck enableACLs bool }{ + // Legacy services are not managed by endpoints controller, but endpoints controller + // will still add/update the legacy service's health checks. { - name: "Endpoints has an updated address because health check changes from unhealthy to healthy", + name: "Legacy service: Health check is added when the pod is healthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2181,7 +1953,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2194,48 +1967,22 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{constants.MetaKeyKubeNS: "default"}, - }, - Check: &api.AgentCheck{ - CheckID: "default/pod1-service-updated", - Name: consulKubernetesCheckName, - Type: consulKubernetesCheckType, - Status: api.HealthCritical, - ServiceID: "pod1-service-updated", - ServiceName: "service-updated", - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Meta: map[string]string{constants.MetaKeyKubeNS: "default"}, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - }, - Check: &api.AgentCheck{ - CheckID: "default/pod1-service-updated-sidecar-proxy", - Name: consulKubernetesCheckName, - Type: consulKubernetesCheckType, - Status: api.HealthCritical, - ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, @@ -2251,32 +1998,23 @@ func TestReconcileUpdateEndpoint(t *testing.T) { ServiceAddress: "1.2.3.4", }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-updated", + CheckID: "default/pod1-service-updated/kubernetes-health-check", ServiceName: "service-updated", ServiceID: "pod1-service-updated", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod1-service-updated-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Endpoints has an updated address because health check changes from healthy to unhealthy", + name: "Legacy service: Health check is added when the pod is unhealthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2286,7 +2024,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { NotReadyAddresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2299,48 +2038,22 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{constants.MetaKeyKubeNS: "default"}, - }, - Check: &api.AgentCheck{ - CheckID: "default/pod1-service-updated", - Name: consulKubernetesCheckName, - Type: consulKubernetesCheckType, - Status: api.HealthPassing, - ServiceName: "service-updated", - ServiceID: "pod1-service-updated", - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", }, { - Node: consulNodeName, - Address: "127.0.0.1", - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Meta: map[string]string{constants.MetaKeyKubeNS: "default"}, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - }, - Check: &api.AgentCheck{ - CheckID: "default/pod1-service-updated-sidecar-proxy", - Name: consulKubernetesCheckName, - Type: consulKubernetesCheckType, - Status: api.HealthPassing, - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod1-service-updated-sidecar-proxy", + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, @@ -2356,32 +2069,23 @@ func TestReconcileUpdateEndpoint(t *testing.T) { ServiceAddress: "1.2.3.4", }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-updated", + CheckID: "default/pod1-service-updated/kubernetes-health-check", ServiceName: "service-updated", ServiceID: "pod1-service-updated", - Name: consulKubernetesCheckName, - Status: api.HealthCritical, - Output: "Pod \"default/pod1\" is not ready", - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod1-service-updated-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthCritical, Output: "Pod \"default/pod1\" is not ready", - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Endpoints has an updated address (pod IP change).", + name: "Legacy service: Service health check is updated when the pod goes from healthy --> unhealthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "4.4.4.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2389,9 +2093,10 @@ func TestReconcileUpdateEndpoint(t *testing.T) { }, Subsets: []corev1.EndpointSubset{ { - Addresses: []corev1.EndpointAddress{ + NotReadyAddresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2404,66 +2109,62 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - constants.MetaKeyKubeNS: "default", - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "service-updated", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Check: &api.AgentServiceCheck{ + CheckID: "default/pod1-service-updated/kubernetes-health-check", + Name: "Kubernetes Health Check", + TTL: "100000h", + Status: api.HealthPassing, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, }, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Meta: map[string]string{ - constants.MetaKeyKubeNS: "default", - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "service-updated", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated", - ServiceAddress: "4.4.4.4", + ServiceAddress: "1.2.3.4", }, }, expectedProxySvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceAddress: "4.4.4.4", + ServiceAddress: "1.2.3.4", + }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: "default/pod1-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthCritical, + Output: "Pod \"default/pod1\" is not ready", + Type: ttl, }, }, }, { - name: "Different Consul service name: Endpoints has an updated address (pod IP change).", - consulSvcName: "different-consul-svc-name", + name: "Legacy service: Service health check is updated when the pod goes from unhealthy --> healthy", + consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "4.4.4.4", true, true) - pod1.Annotations[constants.AnnotationService] = "different-consul-svc-name" + pod1 := createPod("pod1", "1.2.3.4", true, false) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2473,7 +2174,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2486,67 +2188,63 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyKubeNS: "default", - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "service-updated", - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Check: &api.AgentServiceCheck{ + CheckID: "default/pod1-service-updated/kubernetes-health-check", + Name: "Kubernetes Health Check", + TTL: "100000h", + Status: api.HealthCritical, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, }, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{ - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyKubeNS: "default", - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "service-updated", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, expectedConsulSvcInstances: []*api.CatalogService{ { - ServiceID: "pod1-different-consul-svc-name", - ServiceAddress: "4.4.4.4", + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", }, }, expectedProxySvcInstances: []*api.CatalogService{ { - ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", - ServiceAddress: "4.4.4.4", + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: "default/pod1-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, }, }, }, { - name: "Endpoints has additional address not in Consul", + name: "Endpoints has an updated address because health check changes from unhealthy to healthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod2 := createServicePod("pod2", "2.2.3.4", true, true) - endpointWithTwoAddresses := &corev1.Endpoints{ + pod1 := createPod("pod1", "1.2.3.4", true, true) + endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", Namespace: "default", @@ -2555,52 +2253,46 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", Namespace: "default", }, }, - { - IP: "2.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: "pod2", - Namespace: "default", - }, - }, }, }, }, } - return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} + return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyKubeNS: "default"}, + Check: &api.AgentServiceCheck{ + CheckID: "default/pod1-service-updated/kubernetes-health-check", + Name: "Kubernetes Health Check", + TTL: "100000h", + Status: api.HealthCritical, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, }, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyKubeNS: "default"}, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, @@ -2609,65 +2301,30 @@ func TestReconcileUpdateEndpoint(t *testing.T) { ServiceID: "pod1-service-updated", ServiceAddress: "1.2.3.4", }, - { - ServiceID: "pod2-service-updated", - ServiceAddress: "2.2.3.4", - }, }, expectedProxySvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated-sidecar-proxy", ServiceAddress: "1.2.3.4", }, - { - ServiceID: "pod2-service-updated-sidecar-proxy", - ServiceAddress: "2.2.3.4", - }, }, - expectedHealthChecks: []*api.HealthCheck{ + expectedAgentHealthChecks: []*api.AgentCheck{ { - CheckID: "default/pod1-service-updated", + CheckID: "default/pod1-service-updated/kubernetes-health-check", ServiceName: "service-updated", ServiceID: "pod1-service-updated", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod1-service-updated-sidecar-proxy", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod2-service-updated", - ServiceName: "service-updated", - ServiceID: "pod2-service-updated", - Name: consulKubernetesCheckName, - Status: api.HealthPassing, - Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, - }, - { - CheckID: "default/pod2-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", - ServiceID: "pod2-service-updated-sidecar-proxy", - Name: consulKubernetesCheckName, + Name: "Kubernetes Health Check", Status: api.HealthPassing, Output: kubernetesSuccessReasonMsg, - Type: consulKubernetesCheckType, + Type: ttl, }, }, }, { - name: "Consul has instances that are not in the Endpoints addresses", + name: "Endpoints has an updated address because health check changes from healthy to unhealthy", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2675,9 +2332,10 @@ func TestReconcileUpdateEndpoint(t *testing.T) { }, Subsets: []corev1.EndpointSubset{ { - Addresses: []corev1.EndpointAddress{ + NotReadyAddresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2690,59 +2348,32 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, - }, + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyKubeNS: "default"}, + Check: &api.AgentServiceCheck{ + CheckID: "default/pod1-service-updated/kubernetes-health-check", + Name: "Kubernetes Health Check", + TTL: "100000h", + Status: api.HealthPassing, + SuccessBeforePassing: 1, + FailuresBeforeCritical: 1, }, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Meta: map[string]string{MetaKeyKubeNS: "default"}, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, @@ -2758,13 +2389,23 @@ func TestReconcileUpdateEndpoint(t *testing.T) { ServiceAddress: "1.2.3.4", }, }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: "default/pod1-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthCritical, + Output: "Pod \"default/pod1\" is not ready", + Type: ttl, + }, + }, }, { - name: "Different Consul service name: Consul has instances that are not in the Endpoints addresses", - consulSvcName: "different-consul-svc-name", + name: "Endpoints has an updated address (pod IP change).", + consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationService] = "different-consul-svc-name" + pod1 := createPod("pod1", "4.4.4.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -2774,7 +2415,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "4.4.4.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -2787,227 +2429,130 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, - }, + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", + MetaKeyManagedBy: managedByValue, }, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", + MetaKeyManagedBy: managedByValue, }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod2-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, expectedConsulSvcInstances: []*api.CatalogService{ { - ServiceID: "pod1-different-consul-svc-name", - ServiceAddress: "1.2.3.4", + ServiceID: "pod1-service-updated", + ServiceAddress: "4.4.4.4", }, }, expectedProxySvcInstances: []*api.CatalogService{ { - ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", - ServiceAddress: "1.2.3.4", + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "4.4.4.4", }, }, }, { - // When a k8s deployment is deleted but it's k8s service continues to exist, the endpoints has no addresses - // and the instances should be deleted from Consul. - name: "Consul has instances that are not in the endpoints, and the endpoints has no addresses.", - consulSvcName: "service-updated", + name: "Different Consul service name: Endpoints has an updated address (pod IP change).", + consulSvcName: "different-consul-svc-name", k8sObjects: func() []runtime.Object { + pod1 := createPod("pod1", "4.4.4.4", true, true) + pod1.Annotations[annotationService] = "different-consul-svc-name" endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", Namespace: "default", }, - } - return []runtime.Object{endpoint} - }, - initialConsulSvcs: []*api.CatalogRegistration{ - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, - }, - { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "4.4.4.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: "default", + }, + }, + }, }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, - }, - }, - expectedConsulSvcInstances: []*api.CatalogService{}, - expectedProxySvcInstances: []*api.CatalogService{}, - }, - { - // With a different Consul service name, when a k8s deployment is deleted but it's k8s service continues to - // exist, the endpoints has no addresses and the instances should be deleted from Consul. - name: "Different Consul service name: Consul has instances that are not in the endpoints, and the endpoints has no addresses.", - consulSvcName: "different-consul-svc-name", - k8sObjects: func() []runtime.Object { - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-updated", - Namespace: "default", }, } - return []runtime.Object{endpoint} + return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyManagedBy: managedByValue, + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", }, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod1-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", + }, + Meta: map[string]string{ + MetaKeyManagedBy: managedByValue, + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", }, }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-different-consul-svc-name", - Service: "different-consul-svc-name", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, + ServiceID: "pod1-different-consul-svc-name", + ServiceAddress: "4.4.4.4", }, + }, + expectedProxySvcInstances: []*api.CatalogService{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "different-consul-svc-name", - DestinationServiceID: "pod2-different-consul-svc-name", - }, - Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, - }, + ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", + ServiceAddress: "4.4.4.4", }, }, - expectedConsulSvcInstances: []*api.CatalogService{}, - expectedProxySvcInstances: []*api.CatalogService{}, }, { - name: "ACLs enabled: Endpoints has an updated address because the target pod changes", + name: "Endpoints has additional address not in Consul", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod2 := createServicePod("pod2", "4.4.4.4", true, true) - endpoint := &corev1.Endpoints{ + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod2 := createPod("pod2", "2.2.3.4", true, true) + endpointWithTwoAddresses := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", Namespace: "default", @@ -3016,7 +2561,17 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "4.4.4.4", + IP: "1.2.3.4", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + Namespace: "default", + }, + }, + { + IP: "2.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod2", @@ -3027,82 +2582,74 @@ func TestReconcileUpdateEndpoint(t *testing.T) { }, }, } - return []runtime.Object{pod2, endpoint} + return []runtime.Object{pod1, pod2, endpointWithTwoAddresses} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - constants.MetaKeyKubeNS: "default", - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "service-updated", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Meta: map[string]string{ - constants.MetaKeyKubeNS: "default", - constants.MetaKeyPodName: "pod1", - metaKeyKubeServiceName: "service-updated", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - }, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, }, }, expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated", + ServiceAddress: "1.2.3.4", + }, { ServiceID: "pod2-service-updated", - ServiceAddress: "4.4.4.4", - ServiceMeta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod2", - }, + ServiceAddress: "2.2.3.4", }, }, expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + }, { ServiceID: "pod2-service-updated-sidecar-proxy", - ServiceAddress: "4.4.4.4", - ServiceMeta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod2", - }, + ServiceAddress: "2.2.3.4", + }, + }, + expectedAgentHealthChecks: []*api.AgentCheck{ + { + CheckID: "default/pod1-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod1-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, + }, + { + CheckID: "default/pod2-service-updated/kubernetes-health-check", + ServiceName: "service-updated", + ServiceID: "pod2-service-updated", + Name: "Kubernetes Health Check", + Status: api.HealthPassing, + Output: kubernetesSuccessReasonMsg, + Type: ttl, }, }, - enableACLs: true, }, { - name: "ACLs enabled: Consul has instances that are not in the Endpoints addresses", + name: "Consul has instances that are not in the Endpoints addresses", consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -3112,7 +2659,8 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -3125,123 +2673,65 @@ func TestReconcileUpdateEndpoint(t *testing.T) { } return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", - }, - }, + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod2-service-updated", - Service: "service-updated", - Port: 80, - Address: "2.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod2", - }, - }, + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod2-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "2.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod2-service-updated", - }, - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod2", - }, + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, }, expectedConsulSvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated", - ServiceName: "service-updated", ServiceAddress: "1.2.3.4", - ServiceMeta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", - }, }, }, expectedProxySvcInstances: []*api.CatalogService{ { ServiceID: "pod1-service-updated-sidecar-proxy", - ServiceName: "service-updated-sidecar-proxy", ServiceAddress: "1.2.3.4", - ServiceMeta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", - }, }, }, - enableACLs: true, }, - // When a Deployment has the mesh annotation removed, Kube will delete the old pods. When it deletes the last Pod, - // the endpoints object will contain only non-mesh pods, but you'll still have one consul service instance to clean up. { - name: "When a Deployment moves from mesh to non mesh its service instances should be deleted", - consulSvcName: "service-updated", + name: "Different Consul service name: Consul has instances that are not in the Endpoints addresses", + consulSvcName: "different-consul-svc-name", k8sObjects: func() []runtime.Object { - pod2 := createServicePod("pod2", "2.3.4.5", false, false) + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationService] = "different-consul-svc-name" endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -3251,10 +2741,11 @@ func TestReconcileUpdateEndpoint(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "2.3.4.5", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", - Name: "pod2", + Name: "pod1", Namespace: "default", }, }, @@ -3262,209 +2753,180 @@ func TestReconcileUpdateEndpoint(t *testing.T) { }, }, } - return []runtime.Object{pod2, endpoint} + return []runtime.Object{pod1, endpoint} }, - initialConsulSvcs: []*api.CatalogRegistration{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-service-updated", - Service: "service-updated", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", - }, + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - Kind: api.ServiceKindConnectProxy, - ID: "pod1-service-updated-sidecar-proxy", - Service: "service-updated-sidecar-proxy", - Port: 20000, - Address: "1.2.3.4", - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "service-updated", - DestinationServiceID: "pod1-service-updated", - }, - Meta: map[string]string{ - metaKeyKubeServiceName: "service-updated", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", - }, + ID: "pod2-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod2-different-consul-svc-name", }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-different-consul-svc-name", + ServiceAddress: "1.2.3.4", + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-different-consul-svc-name-sidecar-proxy", + ServiceAddress: "1.2.3.4", }, }, - expectedConsulSvcInstances: nil, - expectedProxySvcInstances: nil, }, - } - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - // Add the default namespace. - ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - // Create fake k8s client. - k8sObjects := append(tt.k8sObjects(), &ns, &node) - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - - // Create test consulServer server - adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs - c.ACL.Tokens.InitialManagement = adminToken + { + // When a k8s deployment is deleted but it's k8s service continues to exist, the endpoints has no addresses + // and the instances should be deleted from Consul. + name: "Consul has instances that are not in the endpoints, and the endpoints has no addresses.", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, } - }) - consulClient := testClient.APIClient - - // Holds token accessorID for each service ID. - tokensForServices := make(map[string]string) - - // Register service and proxy in consul. - for _, svc := range tt.initialConsulSvcs { - _, err := consulClient.Catalog().Register(svc, nil) - require.NoError(t, err) - - // Create a token for this service if ACLs are enabled. - if tt.enableACLs { - if svc.Service.Kind != api.ServiceKindConnectProxy { - test.SetupK8sAuthMethod(t, consulClient, svc.Service.Service, svc.Service.Meta[constants.MetaKeyKubeNS]) - token, _, err := consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - tokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Service.Meta[constants.MetaKeyKubeNS], svc.Service.Meta[constants.MetaKeyPodName]), - }, - }, nil) - // Record each token we create. - require.NoError(t, err) - tokensForServices[svc.ID] = token.AccessorID - - // Create another token for the same service but a pod that either no longer exists - // or the endpoints controller doesn't know about it yet. - // This is to test a scenario with either orphaned tokens - // or tokens for services that haven't yet been registered with Consul. - // In that case, we have a token for the pod but the service instance - // for that pod either no longer exists or is not yet registered in Consul. - // This token should not be deleted. - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - tokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Service.Meta[constants.MetaKeyKubeNS], "does-not-exist"), - }, - }, nil) - require.NoError(t, err) - tokensForServices["does-not-exist"+svc.Service.Service] = token.AccessorID - } - } - } - - // Create the endpoints controller. - ep := &Controller{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - AllowK8sNamespacesSet: mapset.NewSetWith("*"), - DenyK8sNamespacesSet: mapset.NewSetWith(), - ReleaseName: "consul", - ReleaseNamespace: "default", - } - if tt.enableACLs { - ep.AuthMethod = test.AuthMethod - } - namespacedName := types.NamespacedName{Namespace: "default", Name: "service-updated"} - - resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) - require.NoError(t, err) - require.False(t, resp.Requeue) - - // After reconciliation, Consul should have service-updated with the correct number of instances. - serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", nil) - require.NoError(t, err) - require.Len(t, serviceInstances, len(tt.expectedConsulSvcInstances)) - for i, instance := range serviceInstances { - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) - } - proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", nil) - require.NoError(t, err) - require.Len(t, proxyServiceInstances, len(tt.expectedProxySvcInstances)) - for i, instance := range proxyServiceInstances { - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) - require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) - } - // Check that the Consul health check was created for the k8s pod. - for _, expectedCheck := range tt.expectedHealthChecks { - filter := fmt.Sprintf("ServiceID == %q", expectedCheck.ServiceID) - checks, _, err := consulClient.Health().Checks(expectedCheck.ServiceName, &api.QueryOptions{Filter: filter}) - require.NoError(t, err) - require.Equal(t, 1, len(checks)) - // Ignoring Namespace because the response from ENT includes it and OSS does not. - var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition", "CreateIndex", "ModifyIndex", "ServiceTags"} - require.True(t, cmp.Equal(checks[0], expectedCheck, cmpopts.IgnoreFields(api.HealthCheck{}, ignoredFields...))) - } - - if tt.enableACLs { - // Put expected services into a map to make it easier to find service IDs. - expectedServices := mapset.NewSet() - for _, svc := range tt.expectedConsulSvcInstances { - expectedServices.Add(svc.ServiceID) - } - - initialServices := mapset.NewSet() - for _, svc := range tt.initialConsulSvcs { - initialServices.Add(svc.ID) - } - - // We only care about a case when services are deregistered, where - // the set of initial services is bigger than the set of expected services. - deregisteredServices := initialServices.Difference(expectedServices) - - // Look through the tokens we've created and check that only - // tokens for the deregistered services have been deleted. - for sID, tokenID := range tokensForServices { - // Read the token from Consul. - token, _, err := consulClient.ACL().TokenRead(tokenID, nil) - if deregisteredServices.Contains(sID) { - require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") - } else { - require.NoError(t, err, "token should exist for service instance: "+sID) - require.NotNil(t, token) - } + return []runtime.Object{endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{}, + expectedProxySvcInstances: []*api.CatalogService{}, + }, + { + // With a different Consul service name, when a k8s deployment is deleted but it's k8s service continues to + // exist, the endpoints has no addresses and the instances should be deleted from Consul. + name: "Different Consul service name: Consul has instances that are not in the endpoints, and the endpoints has no addresses.", + consulSvcName: "different-consul-svc-name", + k8sObjects: func() []runtime.Object { + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, } - } - }) - } -} - -// TestReconcileUpdateEndpoint_LegacyService tests that we can update health checks on a consul client. -func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { - t.Parallel() - cases := []struct { - name string - k8sObjects func() []runtime.Object - initialConsulSvcs []*api.AgentServiceRegistration - expectedHealthChecks []*api.AgentCheck - }{ + return []runtime.Object{endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod1-different-consul-svc-name", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + ID: "pod2-different-consul-svc-name", + Name: "different-consul-svc-name", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "different-consul-svc-name", + DestinationServiceID: "pod2-different-consul-svc-name", + }, + Meta: map[string]string{"k8s-service-name": "service-updated", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, + }, + }, + expectedConsulSvcInstances: []*api.CatalogService{}, + expectedProxySvcInstances: []*api.CatalogService{}, + }, { - name: "Health check changes from unhealthy to healthy", + name: "ACLs enabled: Endpoints has an updated address because the target pod changes", + consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Status.HostIP = "127.0.0.1" - pod1.Annotations[constants.AnnotationConsulK8sVersion] = "0.99.0" // We want a version less than 1.0.0. + pod2 := createPod("pod2", "4.4.4.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -3474,10 +2936,11 @@ func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "4.4.4.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", - Name: "pod1", + Name: "pod2", Namespace: "default", }, }, @@ -3485,7 +2948,7 @@ func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { }, }, } - return []runtime.Object{pod1, endpoint} + return []runtime.Object{pod2, endpoint} }, initialConsulSvcs: []*api.AgentServiceRegistration{ { @@ -3493,11 +2956,11 @@ func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { Name: "service-updated", Port: 80, Address: "1.2.3.4", - Check: &api.AgentServiceCheck{ - CheckID: "default/pod1-service-updated/kubernetes-health-check", - TTL: "100000h", - Name: "Kubernetes Health Check", - Status: api.HealthCritical, + Meta: map[string]string{ + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", + MetaKeyManagedBy: managedByValue, }, }, { @@ -3506,30 +2969,49 @@ func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { Name: "service-updated-sidecar-proxy", Port: 20000, Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeNS: "default", + MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-updated", + MetaKeyManagedBy: managedByValue, + }, Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "service-updated", DestinationServiceID: "pod1-service-updated", }, }, }, - expectedHealthChecks: []*api.AgentCheck{ + expectedConsulSvcInstances: []*api.CatalogService{ { - CheckID: "default/pod1-service-updated/kubernetes-health-check", - ServiceName: "service-updated", - ServiceID: "pod1-service-updated", - Name: "Kubernetes Health Check", - Status: api.HealthPassing, - Output: "Kubernetes health checks passing", - Type: "ttl", + ServiceID: "pod2-service-updated", + ServiceAddress: "4.4.4.4", + ServiceMeta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod2-service-updated-sidecar-proxy", + ServiceAddress: "4.4.4.4", + ServiceMeta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, }, }, + enableACLs: true, }, { - name: "Health check changes from healthy to unhealthy", + name: "ACLs enabled: Consul has instances that are not in the Endpoints addresses", + consulSvcName: "service-updated", k8sObjects: func() []runtime.Object { - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Status.HostIP = "127.0.0.1" - pod1.Annotations[constants.AnnotationConsulK8sVersion] = "0.99.0" // We want a version less than 1.0.0. + pod1 := createPod("pod1", "1.2.3.4", true, true) endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "service-updated", @@ -3537,9 +3019,10 @@ func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { }, Subsets: []corev1.EndpointSubset{ { - NotReadyAddresses: []corev1.EndpointAddress{ + Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -3558,11 +3041,11 @@ func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { Name: "service-updated", Port: 80, Address: "1.2.3.4", - Check: &api.AgentServiceCheck{ - CheckID: "default/pod1-service-updated/kubernetes-health-check", - TTL: "100000h", - Name: "Kubernetes Health Check", - Status: api.HealthPassing, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", }, }, { @@ -3575,63 +3058,231 @@ func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { DestinationServiceName: "service-updated", DestinationServiceID: "pod1-service-updated", }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + { + ID: "pod2-service-updated", + Name: "service-updated", + Port: 80, + Address: "2.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod2-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "2.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod2-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod2", + }, }, }, - expectedHealthChecks: []*api.AgentCheck{ + expectedConsulSvcInstances: []*api.CatalogService{ { - CheckID: "default/pod1-service-updated/kubernetes-health-check", - ServiceName: "service-updated", - ServiceID: "pod1-service-updated", - Name: "Kubernetes Health Check", - Status: api.HealthCritical, - Output: "Pod \"default/pod1\" is not ready", - Type: "ttl", + ServiceID: "pod1-service-updated", + ServiceName: "service-updated", + ServiceAddress: "1.2.3.4", + ServiceMeta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + }, + expectedProxySvcInstances: []*api.CatalogService{ + { + ServiceID: "pod1-service-updated-sidecar-proxy", + ServiceName: "service-updated-sidecar-proxy", + ServiceAddress: "1.2.3.4", + ServiceMeta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, }, }, + enableACLs: true, }, - } - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - // Add the default namespace. - ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} - // Create fake k8s client. - k8sObjects := append(tt.k8sObjects(), &ns) - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() + // When a Deployment has the mesh annotation removed, Kube will delete the old pods. When it deletes the last Pod, + // the endpoints object will contain only non-mesh pods, but you'll still have one consul service instance to clean up. + { + name: "When a Deployment moves from mesh to non mesh its service instances should be deleted", + consulSvcName: "service-updated", + k8sObjects: func() []runtime.Object { + pod2 := createPod("pod2", "2.3.4.5", false, false) + endpoint := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-updated", + Namespace: "default", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "2.3.4.5", + NodeName: &nodeName, + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: "pod2", + Namespace: "default", + }, + }, + }, + }, + }, + } + return []runtime.Object{pod2, endpoint} + }, + initialConsulSvcs: []*api.AgentServiceRegistration{ + { + ID: "pod1-service-updated", + Name: "service-updated", + Port: 80, + Address: "1.2.3.4", + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-updated-sidecar-proxy", + Name: "service-updated-sidecar-proxy", + Port: 20000, + Address: "1.2.3.4", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-updated", + DestinationServiceID: "pod1-service-updated", + }, + Meta: map[string]string{ + MetaKeyKubeServiceName: "service-updated", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", + }, + }, + }, + expectedConsulSvcInstances: nil, + expectedProxySvcInstances: nil, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} - // Create test consulServer server - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) + // Add the default namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + // Create fake k8s client. + k8sObjects := append(tt.k8sObjects(), fakeClientPod, &ns) + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create a consul client joined with this server. - var consulClientHttpPort int - consulClientAgent, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - c.Server = false - c.Bootstrap = false - consulClientHttpPort = c.Ports.HTTP + // Create test consul server. + adminToken := "123e4567-e89b-12d3-a456-426614174000" + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + if tt.enableACLs { + c.ACL.Enabled = tt.enableACLs + c.ACL.Tokens.InitialManagement = adminToken + } + c.NodeName = nodeName }) require.NoError(t, err) - consulClientAgent.JoinLAN(t, testClient.TestServer.LANAddr) - consulClientAgent.WaitForSerfCheck(t) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] - consulClient, err := api.NewClient(&api.Config{Address: consulClientAgent.HTTPAddr}) + cfg := &api.Config{Scheme: "http", Address: consul.HTTPAddr} + if tt.enableACLs { + cfg.Token = adminToken + } + consulClient, err := api.NewClient(cfg) require.NoError(t, err) + // Holds token accessorID for each service ID. + tokensForServices := make(map[string]string) + // Register service and proxy in consul. for _, svc := range tt.initialConsulSvcs { - err := consulClient.Agent().ServiceRegister(svc) + err = consulClient.Agent().ServiceRegister(svc) require.NoError(t, err) + + // Create a token for this service if ACLs are enabled. + if tt.enableACLs { + if svc.Kind != api.ServiceKindConnectProxy { + test.SetupK8sAuthMethod(t, consulClient, svc.Name, svc.Meta[MetaKeyKubeNS]) + token, _, err := consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), + }, + }, nil) + // Record each token we create. + require.NoError(t, err) + tokensForServices[svc.ID] = token.AccessorID + + // Create another token for the same service but a pod that either no longer exists + // or the endpoints controller doesn't know about it yet. + // This is to test a scenario with either orphaned tokens + // or tokens for services that haven't yet been registered with Consul. + // In that case, we have a token for the pod but the service instance + // for that pod either no longer exists or is not yet registered in Consul. + // This token should not be deleted. + token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + TokenMetaPodNameKey: fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], "does-not-exist"), + }, + }, nil) + require.NoError(t, err) + tokensForServices["does-not-exist"+svc.Name] = token.AccessorID + } + } } // Create the endpoints controller. - ep := &Controller{ + ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: cfg.Scheme, AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: "default", - consulClientHttpPort: consulClientHttpPort, + ConsulClientCfg: cfg, + } + if tt.enableACLs { + ep.AuthMethod = test.AuthMethod } namespacedName := types.NamespacedName{Namespace: "default", Name: "service-updated"} @@ -3639,39 +3290,88 @@ func TestReconcileUpdateEndpoint_LegacyService(t *testing.T) { require.NoError(t, err) require.False(t, resp.Requeue) - // After reconciliation, Consul should have service-updated with the correct health check status. - for _, expectedCheck := range tt.expectedHealthChecks { - filter := fmt.Sprintf("ServiceID == %q", expectedCheck.ServiceID) - checks, err := consulClient.Agent().ChecksWithFilter(filter) - require.NoError(t, err) - require.Equal(t, 1, len(checks)) - // Ignoring Namespace because the response from ENT includes it and OSS does not. - var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} - require.True(t, cmp.Equal(checks[expectedCheck.CheckID], expectedCheck, cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + // After reconciliation, Consul should have service-updated with the correct number of instances. + serviceInstances, _, err := consulClient.Catalog().Service(tt.consulSvcName, "", nil) + require.NoError(t, err) + require.Len(t, serviceInstances, len(tt.expectedConsulSvcInstances)) + for i, instance := range serviceInstances { + require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedConsulSvcInstances[i].ServiceAddress, instance.ServiceAddress) + } + proxyServiceInstances, _, err := consulClient.Catalog().Service(fmt.Sprintf("%s-sidecar-proxy", tt.consulSvcName), "", nil) + require.NoError(t, err) + require.Len(t, proxyServiceInstances, len(tt.expectedProxySvcInstances)) + for i, instance := range proxyServiceInstances { + require.Equal(t, tt.expectedProxySvcInstances[i].ServiceID, instance.ServiceID) + require.Equal(t, tt.expectedProxySvcInstances[i].ServiceAddress, instance.ServiceAddress) + } + // Check that the Consul health check was created for the k8s pod. + if tt.expectedAgentHealthChecks != nil { + for i := range tt.expectedConsulSvcInstances { + filter := fmt.Sprintf("CheckID == `%s`", tt.expectedAgentHealthChecks[i].CheckID) + check, err := consulClient.Agent().ChecksWithFilter(filter) + require.NoError(t, err) + require.EqualValues(t, len(check), 1) + // Ignoring Namespace because the response from ENT includes it and OSS does not. + var ignoredFields = []string{"Node", "Definition", "Namespace", "Partition"} + require.True(t, cmp.Equal(check[tt.expectedAgentHealthChecks[i].CheckID], tt.expectedAgentHealthChecks[i], cmpopts.IgnoreFields(api.AgentCheck{}, ignoredFields...))) + } + } + + if tt.enableACLs { + // Put expected services into a map to make it easier to find service IDs. + expectedServices := mapset.NewSet() + for _, svc := range tt.expectedConsulSvcInstances { + expectedServices.Add(svc.ServiceID) + } + + initialServices := mapset.NewSet() + for _, svc := range tt.initialConsulSvcs { + initialServices.Add(svc.ID) + } + + // We only care about a case when services are deregistered, where + // the set of initial services is bigger than the set of expected services. + deregisteredServices := initialServices.Difference(expectedServices) + + // Look through the tokens we've created and check that only + // tokens for the deregistered services have been deleted. + for serviceID, tokenID := range tokensForServices { + // Read the token from Consul. + token, _, err := consulClient.ACL().TokenRead(tokenID, nil) + if deregisteredServices.Contains(serviceID) { + require.EqualError(t, err, "Unexpected response code: 403 (ACL not found)") + } else { + require.NoError(t, err, "token should exist for service instance: "+serviceID) + require.NotNil(t, token) + } + } } }) } } // Tests deleting an Endpoints object, with and without matching Consul and K8s service names. -// This test covers Controller.deregisterService when the map is nil (not selectively deregistered). +// This test covers EndpointsController.deregisterServiceOnAllAgents when the map is nil (not selectively deregistered). func TestReconcileDeleteEndpoint(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { name string consulSvcName string expectServicesToBeDeleted bool - initialConsulSvcs []*api.AgentService + initialConsulSvcs []*api.AgentServiceRegistration enableACLs bool + consulClientReady bool }{ { name: "Legacy service: does not delete", consulSvcName: "service-deleted", expectServicesToBeDeleted: false, - initialConsulSvcs: []*api.AgentService{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { ID: "pod1-service-deleted", - Service: "service-deleted", + Name: "service-deleted", Port: 80, Address: "1.2.3.4", Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default"}, @@ -3679,7 +3379,7 @@ func TestReconcileDeleteEndpoint(t *testing.T) { { Kind: api.ServiceKindConnectProxy, ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ @@ -3689,81 +3389,83 @@ func TestReconcileDeleteEndpoint(t *testing.T) { Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default"}, }, }, + consulClientReady: true, }, { name: "Consul service name matches K8s service name", consulSvcName: "service-deleted", expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { ID: "pod1-service-deleted", - Service: "service-deleted", + Name: "service-deleted", Port: 80, Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { Kind: api.ServiceKindConnectProxy, ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "service-deleted", DestinationServiceID: "pod1-service-deleted", }, - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, }, + consulClientReady: true, }, { name: "Consul service name does not match K8s service name", consulSvcName: "different-consul-svc-name", expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { ID: "pod1-different-consul-svc-name", - Service: "different-consul-svc-name", + Name: "different-consul-svc-name", Port: 80, Address: "1.2.3.4", - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, { Kind: api.ServiceKindConnectProxy, ID: "pod1-different-consul-svc-name-sidecar-proxy", - Service: "different-consul-svc-name-sidecar-proxy", + Name: "different-consul-svc-name-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "different-consul-svc-name", DestinationServiceID: "pod1-different-consul-svc-name", }, - Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", metaKeyManagedBy: constants.ManagedByValue}, + Meta: map[string]string{"k8s-service-name": "service-deleted", "k8s-namespace": "default", MetaKeyManagedBy: managedByValue}, }, }, + consulClientReady: true, }, { name: "When ACLs are enabled, the token should be deleted", consulSvcName: "service-deleted", expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + initialConsulSvcs: []*api.AgentServiceRegistration{ { ID: "pod1-service-deleted", - Service: "service-deleted", + Name: "service-deleted", Port: 80, Address: "1.2.3.4", Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", }, }, { Kind: api.ServiceKindConnectProxy, ID: "pod1-service-deleted-sidecar-proxy", - Service: "service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", Port: 20000, Address: "1.2.3.4", Proxy: &api.AgentServiceConnectProxyConfig{ @@ -3771,240 +3473,128 @@ func TestReconcileDeleteEndpoint(t *testing.T) { DestinationServiceID: "pod1-service-deleted", }, Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", - }, - }, - }, - enableACLs: true, - }, - { - name: "Mesh Gateway", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ - { - ID: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Service: "mesh-gateway", - Port: 80, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "mesh-gateway", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", }, }, }, + enableACLs: true, + consulClientReady: true, }, { - name: "When ACLs are enabled, the mesh-gateway token should be deleted", + name: "When Consul client pod is not ready, services are not deleted", consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ + expectServicesToBeDeleted: false, + initialConsulSvcs: []*api.AgentServiceRegistration{ { - ID: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Service: "mesh-gateway", + ID: "pod1-service-deleted", + Name: "service-deleted", Port: 80, Address: "1.2.3.4", Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "mesh-gateway", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 80, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - }, - }, - enableACLs: true, - }, - { - name: "Ingress Gateway", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ - { - ID: "ingress-gateway", - Kind: api.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 21000, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "ingress-gateway", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, - }, - }, - }, - }, - { - name: "When ACLs are enabled, the ingress-gateway token should be deleted", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ - { - ID: "ingress-gateway", - Kind: api.ServiceKindIngressGateway, - Service: "ingress-gateway", - Port: 21000, - Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "ingress-gateway", - }, - TaggedAddresses: map[string]api.ServiceAddress{ - "lan": { - Address: "1.2.3.4", - Port: 21000, - }, - "wan": { - Address: "5.6.7.8", - Port: 8080, - }, + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", }, }, - }, - enableACLs: true, - }, - { - name: "Terminating Gateway", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ { - ID: "terminating-gateway", - Kind: api.ServiceKindTerminatingGateway, - Service: "terminating-gateway", - Port: 8443, + Kind: api.ServiceKindConnectProxy, + ID: "pod1-service-deleted-sidecar-proxy", + Name: "service-deleted-sidecar-proxy", + Port: 20000, Address: "1.2.3.4", - Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "terminating-gateway", + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "service-deleted", + DestinationServiceID: "pod1-service-deleted", }, - }, - }, - }, - { - name: "When ACLs are enabled, the terminating-gateway token should be deleted", - consulSvcName: "service-deleted", - expectServicesToBeDeleted: true, - initialConsulSvcs: []*api.AgentService{ - { - ID: "terminating-gateway", - Kind: api.ServiceKindTerminatingGateway, - Service: "terminating-gateway", - Port: 8443, - Address: "1.2.3.4", Meta: map[string]string{ - metaKeyKubeServiceName: "service-deleted", - constants.MetaKeyKubeNS: "default", - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "terminating-gateway", + MetaKeyKubeServiceName: "service-deleted", + MetaKeyKubeNS: "default", + MetaKeyManagedBy: managedByValue, + MetaKeyPodName: "pod1", }, }, }, - enableACLs: true, + consulClientReady: false, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + // The agent pod needs to have the address 127.0.0.1 so when the + // code gets the agent pods via the label component=client, and + // makes requests against the agent API, it will actually hit the + // test server we have on localhost. + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} + if !tt.consulClientReady { + fakeClientPod.Status.Conditions = []corev1.PodCondition{{Type: corev1.PodReady, Status: corev1.ConditionFalse}} + } + // Add the default namespace. ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} // Create fake k8s client. - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(&ns, &node).Build() + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(fakeClientPod, &ns).Build() - // Create test consulServer server + // Create test consul server. adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { if tt.enableACLs { - c.ACL.Enabled = tt.enableACLs + c.ACL.Enabled = true c.ACL.Tokens.InitialManagement = adminToken } + c.NodeName = nodeName }) - consulClient := testClient.APIClient + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + cfg := &api.Config{Address: consul.HTTPAddr} + if tt.enableACLs { + cfg.Token = adminToken + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] // Register service and proxy in consul var token *api.ACLToken for _, svc := range tt.initialConsulSvcs { - serviceRegistration := &api.CatalogRegistration{ - Node: consulNodeName, - Address: consulNodeAddress, - Service: svc, - } - _, err := consulClient.Catalog().Register(serviceRegistration, nil) + err = consulClient.Agent().ServiceRegister(svc) require.NoError(t, err) // Create a token for it if ACLs are enabled. if tt.enableACLs { - test.SetupK8sAuthMethod(t, consulClient, svc.Service, "default") - token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ - AuthMethod: test.AuthMethod, - BearerToken: test.ServiceAccountJWTToken, - Meta: map[string]string{ - "pod": fmt.Sprintf("%s/%s", svc.Meta[constants.MetaKeyKubeNS], svc.Meta[constants.MetaKeyPodName]), - "component": tt.consulSvcName, - }, - }, nil) - require.NoError(t, err) + test.SetupK8sAuthMethod(t, consulClient, svc.Name, "default") + if svc.Kind != api.ServiceKindConnectProxy { + token, _, err = consulClient.ACL().Login(&api.ACLLoginParams{ + AuthMethod: test.AuthMethod, + BearerToken: test.ServiceAccountJWTToken, + Meta: map[string]string{ + "pod": fmt.Sprintf("%s/%s", svc.Meta[MetaKeyKubeNS], svc.Meta[MetaKeyPodName]), + }, + }, nil) + + require.NoError(t, err) + } } } // Create the endpoints controller - ep := &Controller{ + ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: "default", + ConsulClientCfg: cfg, } if tt.enableACLs { ep.AuthMethod = test.AuthMethod @@ -4048,7 +3638,8 @@ func TestReconcileDeleteEndpoint(t *testing.T) { // label is added. func TestReconcileIgnoresServiceIgnoreLabel(t *testing.T) { t.Parallel() - svcName := "service-ignored" + nodeName := "test-node" + serviceName := "service-ignored" namespace := "default" cases := map[string]struct { @@ -4059,14 +3650,14 @@ func TestReconcileIgnoresServiceIgnoreLabel(t *testing.T) { "Registered endpoint with label is deregistered.": { svcInitiallyRegistered: true, serviceLabels: map[string]string{ - constants.LabelServiceIgnore: "true", + labelServiceIgnore: "true", }, expectedNumSvcInstances: 0, }, "Not registered endpoint with label is never registered": { svcInitiallyRegistered: false, serviceLabels: map[string]string{ - constants.LabelServiceIgnore: "true", + labelServiceIgnore: "true", }, expectedNumSvcInstances: 0, }, @@ -4087,7 +3678,7 @@ func TestReconcileIgnoresServiceIgnoreLabel(t *testing.T) { // Set up the fake Kubernetes client with an endpoint, pod, consul client, and the default namespace. endpoint := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: svcName, + Name: serviceName, Namespace: namespace, Labels: tt.serviceLabels, }, @@ -4095,7 +3686,8 @@ func TestReconcileIgnoresServiceIgnoreLabel(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -4106,63 +3698,78 @@ func TestReconcileIgnoresServiceIgnoreLabel(t *testing.T) { }, }, } - pod1 := createServicePod("pod1", "1.2.3.4", true, true) + pod1 := createPod("pod1", "1.2.3.4", true, true) + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - k8sObjects := []runtime.Object{endpoint, pod1, &ns, &node} + k8sObjects := []runtime.Object{endpoint, pod1, fakeClientPod, &ns} fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + // Create test Consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.NodeName = nodeName }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + cfg := &api.Config{Address: consul.HTTPAddr} + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] // Set up the initial Consul services. if tt.svcInitiallyRegistered { - serviceRegistration := &api.CatalogRegistration{ - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-" + svcName, - Service: svcName, - Port: 0, - Address: "1.2.3.4", - Meta: map[string]string{ - constants.MetaKeyKubeNS: namespace, - metaKeyKubeServiceName: svcName, - metaKeyManagedBy: constants.ManagedByValue, - metaKeySyntheticNode: "true", - constants.MetaKeyPodName: "pod1", - }, + err = consulClient.Agent().ServiceRegister(&api.AgentServiceRegistration{ + ID: "pod1-" + serviceName, + Name: serviceName, + Port: 0, + Address: "1.2.3.4", + Meta: map[string]string{ + "k8s-namespace": namespace, + "k8s-service-name": serviceName, + "managed-by": "consul-k8s-endpoints-controller", + "pod-name": "pod1", }, - } - _, err := consulClient.Catalog().Register(serviceRegistration, nil) + }) require.NoError(t, err) + err = consulClient.Agent().ServiceRegister(&api.AgentServiceRegistration{ + ID: "pod1-sidecar-proxy-" + serviceName, + Name: serviceName + "-sidecar-proxy", + Port: 0, + Meta: map[string]string{ + "k8s-namespace": namespace, + "k8s-service-name": serviceName, + "managed-by": "consul-k8s-endpoints-controller", + "pod-name": "pod1", + }, + }) require.NoError(t, err) } // Create the endpoints controller. - ep := &Controller{ + ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: namespace, + ConsulClientCfg: cfg, } // Run the reconcile process to deregister the service if it was registered before. - namespacedName := types.NamespacedName{Namespace: namespace, Name: svcName} + namespacedName := types.NamespacedName{Namespace: namespace, Name: serviceName} resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) require.NoError(t, err) require.False(t, resp.Requeue) // Check that the correct number of services are registered with Consul. - serviceInstances, _, err := consulClient.Catalog().Service(svcName, "", nil) + serviceInstances, _, err := consulClient.Catalog().Service(serviceName, "", nil) require.NoError(t, err) require.Len(t, serviceInstances, tt.expectedNumSvcInstances) - proxyServiceInstances, _, err := consulClient.Catalog().Service(svcName+"-sidecar-proxy", "", nil) + proxyServiceInstances, _, err := consulClient.Catalog().Service(serviceName+"-sidecar-proxy", "", nil) require.NoError(t, err) require.Len(t, proxyServiceInstances, tt.expectedNumSvcInstances) }) @@ -4172,6 +3779,7 @@ func TestReconcileIgnoresServiceIgnoreLabel(t *testing.T) { // Test that when an endpoints pod specifies the name for the Kubernetes service it wants to use // for registration, all other endpoints for that pod are skipped. func TestReconcile_podSpecifiesExplicitService(t *testing.T) { + nodeName := "test-node" namespace := "default" // Set up the fake Kubernetes client with a few endpoints, pod, consul client, and the default namespace. @@ -4184,7 +3792,8 @@ func TestReconcile_podSpecifiesExplicitService(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -4204,7 +3813,8 @@ func TestReconcile_podSpecifiesExplicitService(t *testing.T) { { Addresses: []corev1.EndpointAddress{ { - IP: "1.2.3.4", + IP: "1.2.3.4", + NodeName: &nodeName, TargetRef: &corev1.ObjectReference{ Kind: "Pod", Name: "pod1", @@ -4215,598 +3825,821 @@ func TestReconcile_podSpecifiesExplicitService(t *testing.T) { }, }, } - pod1 := createServicePod("pod1", "1.2.3.4", true, true) - pod1.Annotations[constants.AnnotationKubernetesService] = endpoint.Name + pod1 := createPod("pod1", "1.2.3.4", true, true) + pod1.Annotations[annotationKubernetesService] = endpoint.Name + fakeClientPod := createPod("fake-consul-client", "127.0.0.1", false, true) + fakeClientPod.Labels = map[string]string{"component": "client", "app": "consul", "release": "consul"} ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - k8sObjects := []runtime.Object{badEndpoint, endpoint, pod1, &ns, &node} + k8sObjects := []runtime.Object{badEndpoint, endpoint, pod1, fakeClientPod, &ns} fakeClient := fake.NewClientBuilder().WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + // Create test Consul server. + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.NodeName = nodeName }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + cfg := &api.Config{Address: consul.HTTPAddr} + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + addr := strings.Split(consul.HTTPAddr, ":") + consulPort := addr[1] // Create the endpoints controller. - ep := &Controller{ + ep := &EndpointsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, + ConsulPort: consulPort, + ConsulScheme: "http", AllowK8sNamespacesSet: mapset.NewSetWith("*"), DenyK8sNamespacesSet: mapset.NewSetWith(), ReleaseName: "consul", ReleaseNamespace: namespace, + ConsulClientCfg: cfg, } - svcName := badEndpoint.Name + serviceName := badEndpoint.Name // Initially register the pod with the bad endpoint - _, err := consulClient.Catalog().Register(&api.CatalogRegistration{ - Node: consulNodeName, - Address: consulNodeAddress, - Service: &api.AgentService{ - ID: "pod1-" + svcName, - Service: svcName, - Port: 0, - Address: "1.2.3.4", - Meta: map[string]string{ - "k8s-namespace": namespace, - "k8s-service-name": svcName, - "managed-by": "consul-k8s-endpoints-controller", - "pod-name": "pod1", - }, - }, - }, nil) + err = consulClient.Agent().ServiceRegister(&api.AgentServiceRegistration{ + ID: "pod1-" + serviceName, + Name: serviceName, + Port: 0, + Address: "1.2.3.4", + Meta: map[string]string{ + "k8s-namespace": namespace, + "k8s-service-name": serviceName, + "managed-by": "consul-k8s-endpoints-controller", + "pod-name": "pod1", + }, + }) require.NoError(t, err) - serviceInstances, _, err := consulClient.Catalog().Service(svcName, "", nil) + serviceInstances, _, err := consulClient.Catalog().Service(serviceName, "", nil) require.NoError(t, err) require.Len(t, serviceInstances, 1) // Run the reconcile process to check service deregistration. - namespacedName := types.NamespacedName{Namespace: badEndpoint.Namespace, Name: svcName} + namespacedName := types.NamespacedName{Namespace: badEndpoint.Namespace, Name: serviceName} resp, err := ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) require.NoError(t, err) require.False(t, resp.Requeue) // Check that the service has been deregistered with Consul. - serviceInstances, _, err = consulClient.Catalog().Service(svcName, "", nil) + serviceInstances, _, err = consulClient.Catalog().Service(serviceName, "", nil) require.NoError(t, err) require.Len(t, serviceInstances, 0) - proxyServiceInstances, _, err := consulClient.Catalog().Service(svcName+"-sidecar-proxy", "", nil) + proxyServiceInstances, _, err := consulClient.Catalog().Service(serviceName+"-sidecar-proxy", "", nil) require.NoError(t, err) require.Len(t, proxyServiceInstances, 0) // Run the reconcile again with the service we want to register. - svcName = endpoint.Name - namespacedName = types.NamespacedName{Namespace: endpoint.Namespace, Name: svcName} + serviceName = endpoint.Name + namespacedName = types.NamespacedName{Namespace: endpoint.Namespace, Name: serviceName} resp, err = ep.Reconcile(context.Background(), ctrl.Request{NamespacedName: namespacedName}) require.NoError(t, err) require.False(t, resp.Requeue) // Check that the correct services are registered with Consul. - serviceInstances, _, err = consulClient.Catalog().Service(svcName, "", nil) + serviceInstances, _, err = consulClient.Catalog().Service(serviceName, "", nil) require.NoError(t, err) require.Len(t, serviceInstances, 1) - proxyServiceInstances, _, err = consulClient.Catalog().Service(svcName+"-sidecar-proxy", "", nil) + proxyServiceInstances, _, err = consulClient.Catalog().Service(serviceName+"-sidecar-proxy", "", nil) require.NoError(t, err) require.Len(t, proxyServiceInstances, 1) } -func TestServiceInstancesForK8SServiceNameAndNamespace(t *testing.T) { +func TestFilterAgentPods(t *testing.T) { t.Parallel() - - const ( - k8sSvc = "k8s-svc" - k8sNS = "k8s-ns" - ) - cases := []struct { - name string - k8sServiceNameMeta string - k8sNamespaceMeta string - expected []*api.AgentService + cases := map[string]struct { + object client.Object + expected bool }{ - { - "no k8s service name or namespace meta", - "", - "", - nil, + "label[app]=consul label[component]=client label[release] consul": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "component": "client", + "release": "consul", + }, + }, + }, + expected: true, }, - { - "k8s service name set, but no namespace meta", - k8sSvc, - "", - nil, + "no labels": { + object: &corev1.Pod{}, + expected: false, }, - { - "k8s namespace set, but no k8s service name meta", - "", - k8sNS, - nil, + "label[app] empty": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "component": "client", + "release": "consul", + }, + }, + }, + expected: false, }, - { - "both k8s service name and namespace set", - k8sSvc, - k8sNS, - []*api.AgentService{ - { - ID: "foo1", - Service: "foo", - Meta: map[string]string{"k8s-service-name": k8sSvc, "k8s-namespace": k8sNS}, + "label[component] empty": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "release": "consul", + }, }, - { - Kind: api.ServiceKindConnectProxy, - ID: "foo1-proxy", - Service: "foo-sidecar-proxy", - Port: 20000, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "foo", - DestinationServiceID: "foo1", + }, + expected: false, + }, + "label[release] empty": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "component": "client", }, - Meta: map[string]string{"k8s-service-name": k8sSvc, "k8s-namespace": k8sNS}, }, }, + expected: false, }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - servicesInConsul := []*api.AgentService{ - { - ID: "foo1", - Service: "foo", - Tags: []string{}, - Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": c.k8sNamespaceMeta}, + "label[app]!=consul label[component]=client label[release]=consul": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "not-consul", + "component": "client", + "release": "consul", + }, }, - { - Kind: api.ServiceKindConnectProxy, - ID: "foo1-proxy", - Service: "foo-sidecar-proxy", - Port: 20000, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "foo", - DestinationServiceID: "foo1", + }, + expected: false, + }, + "label[component]!=client label[app]=consul label[release]=consul": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "component": "not-client", + "release": "consul", }, - Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": c.k8sNamespaceMeta}, }, - { - ID: "k8s-service-different-ns-id", - Service: "k8s-service-different-ns", - Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": "different-ns"}, + }, + expected: false, + }, + "label[release]!=consul label[app]=consul label[component]=client": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "consul", + "component": "client", + "release": "not-consul", + }, }, - { - Kind: api.ServiceKindConnectProxy, - ID: "k8s-service-different-ns-proxy", - Service: "k8s-service-different-ns-proxy", - Port: 20000, - Tags: []string{}, - Proxy: &api.AgentServiceConnectProxyConfig{ - DestinationServiceName: "k8s-service-different-ns", - DestinationServiceID: "k8s-service-different-ns-id", + }, + expected: false, + }, + "label[app]!=consul label[component]!=client label[release]!=consul": { + object: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "not-consul", + "component": "not-client", + "release": "not-consul", }, - Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": "different-ns"}, }, - } - - consul, err := testutil.NewTestServerConfigT(t, nil) - require.NoError(t, err) - defer consul.Stop() - - consul.WaitForServiceIntentions(t) - consulClient, err := api.NewClient(&api.Config{ - Address: consul.HTTPAddr, - }) - require.NoError(t, err) + }, + expected: false, + }, + } - for _, svc := range servicesInConsul { - catalogRegistration := &api.CatalogRegistration{ - Node: consulNodeName, - Address: "127.0.0.1", - Service: svc, - } - _, err = consulClient.Catalog().Register(catalogRegistration, nil) - require.NoError(t, err) + for name, test := range cases { + t.Run(name, func(t *testing.T) { + controller := EndpointsController{ + ReleaseName: "consul", } - ep := Controller{} - svcs, err := ep.serviceInstancesForK8SServiceNameAndNamespace(consulClient, k8sSvc, k8sNS, consulNodeName) - require.NoError(t, err) - if len(svcs.Services) > 0 { - require.Len(t, svcs, 2) - require.NotNil(t, c.expected[0], svcs.Services[0]) - require.Equal(t, c.expected[0].Service, svcs.Services[0].Service) - require.NotNil(t, c.expected[1], svcs.Services[1]) - require.Equal(t, c.expected[1].Service, svcs.Services[1].Service) - } + result := controller.filterAgentPods(test.object) + require.Equal(t, test.expected, result) }) } } -func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { +func TestRequestsForRunningAgentPods(t *testing.T) { t.Parallel() - - const serviceName = "test-service" - cases := map[string]struct { - tproxyGlobalEnabled bool - overwriteProbes bool - podContainers []corev1.Container - podAnnotations map[string]string - namespaceLabels map[string]string - service *corev1.Service - expTaggedAddresses map[string]api.ServiceAddress - expProxyMode api.ProxyMode - expExposePaths []api.ExposePath - expErr string + agentPod *corev1.Pod + existingEndpoints []*corev1.Endpoints + expectedRequests []ctrl.Request }{ - "tproxy enabled globally, annotation not provided": { - tproxyGlobalEnabled: true, - podContainers: []corev1.Container{ - { - Name: "test", - Ports: []corev1.ContainerPort{ - { - Name: "tcp", - ContainerPort: 8081, - }, + "pod=running, all endpoints need to be reconciled": { + agentPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "consul-agent", + }, + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Name: "http", - ContainerPort: 8080, + Type: corev1.PodReady, + Status: corev1.ConditionTrue, }, }, + Phase: corev1.PodRunning, }, }, - service: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ { - Port: 8081, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, }, }, }, }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 8081, - }, - }, - expErr: "", - }, - "tproxy enabled globally, annotation is false": { - tproxyGlobalEnabled: true, - podAnnotations: map[string]string{constants.KeyTransparentProxy: "false"}, - podContainers: []corev1.Container{ + expectedRequests: []ctrl.Request{ { - Name: "test", - Ports: []corev1.ContainerPort{ - { - Name: "tcp", - ContainerPort: 8081, - }, - { - Name: "http", - ContainerPort: 8080, - }, + NamespacedName: types.NamespacedName{ + Name: "endpoint-1", }, }, }, - service: &corev1.Service{ + }, + "pod=running, endpoints with ready address need to be reconciled": { + agentPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", + Name: "consul-agent", }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Port: 80, + Type: corev1.PodReady, + Status: corev1.ConditionTrue, }, }, + Phase: corev1.PodRunning, }, }, - expProxyMode: api.ProxyModeDefault, - expTaggedAddresses: nil, - expErr: "", - }, - "tproxy enabled globally, annotation is true": { - tproxyGlobalEnabled: true, - podAnnotations: map[string]string{constants.KeyTransparentProxy: "true"}, - podContainers: []corev1.Container{ + existingEndpoints: []*corev1.Endpoints{ { - Name: "test", - Ports: []corev1.ContainerPort{ - { - Name: "tcp", - ContainerPort: 8081, - }, + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "http", - ContainerPort: 8080, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, }, }, }, }, - service: &corev1.Service{ + expectedRequests: []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: "endpoint-1", + }, + }, + }, + }, + "pod=running, endpoints with not-ready address need to be reconciled": { + agentPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", + Name: "consul-agent", }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Port: 8081, + Type: corev1.PodReady, + Status: corev1.ConditionTrue, }, }, + Phase: corev1.PodRunning, }, }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 8081, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + }, + }, + }, + }, + expectedRequests: []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: "endpoint-1", + }, }, }, - expErr: "", }, - "tproxy disabled globally, annotation not provided": { - tproxyGlobalEnabled: false, - podAnnotations: nil, - service: &corev1.Service{ + "pod=running, some endpoints need to be reconciled": { + agentPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", + Name: "consul-agent", }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Port: 80, + Type: corev1.PodReady, + Status: corev1.ConditionTrue, }, }, + Phase: corev1.PodRunning, }, }, - expProxyMode: api.ProxyModeDefault, - expTaggedAddresses: nil, - expErr: "", - }, - "tproxy disabled globally, annotation is false": { - tproxyGlobalEnabled: false, - podAnnotations: map[string]string{constants.KeyTransparentProxy: "false"}, - podContainers: []corev1.Container{ + existingEndpoints: []*corev1.Endpoints{ { - Name: "test", - Ports: []corev1.ContainerPort{ + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "tcp", - ContainerPort: 8081, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-2", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "http", - ContainerPort: 8080, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-other"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, }, }, }, - }, - service: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ { - Port: 80, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, }, }, }, }, - expProxyMode: api.ProxyModeDefault, - expTaggedAddresses: nil, - expErr: "", - }, - "tproxy disabled globally, annotation is true": { - tproxyGlobalEnabled: false, - podContainers: []corev1.Container{ + expectedRequests: []ctrl.Request{ { - Name: "test", - Ports: []corev1.ContainerPort{ - { - Name: "tcp", - ContainerPort: 8081, - }, - { - Name: "http", - ContainerPort: 8080, - }, + NamespacedName: types.NamespacedName{ + Name: "endpoint-1", + }, + }, + { + NamespacedName: types.NamespacedName{ + Name: "endpoint-3", }, }, }, - podAnnotations: map[string]string{constants.KeyTransparentProxy: "true"}, - service: &corev1.Service{ + }, + "pod=running, no endpoints need to be reconciled": { + agentPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", + Name: "consul-agent", }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Port: 8081, + Type: corev1.PodReady, + Status: corev1.ConditionTrue, }, }, + Phase: corev1.PodRunning, }, }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 8081, - }, - }, - expErr: "", - }, - "tproxy disabled globally, namespace enabled, no annotation": { - tproxyGlobalEnabled: false, - podContainers: []corev1.Container{ + existingEndpoints: []*corev1.Endpoints{ { - Name: "test", - Ports: []corev1.ContainerPort{ + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "tcp", - ContainerPort: 8081, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-2", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "http", - ContainerPort: 8080, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, }, }, }, - }, - service: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ { - Port: 8081, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-bar"), + }, + }, + NotReadyAddresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-baz"), + }, + }, }, }, }, }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 8081, - }, - }, - namespaceLabels: map[string]string{constants.KeyTransparentProxy: "true"}, - expErr: "", + expectedRequests: []ctrl.Request{}, }, - "tproxy enabled globally, namespace disabled, no annotation": { - tproxyGlobalEnabled: true, - service: &corev1.Service{ + "pod not ready, no endpoints need to be reconciled": { + agentPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", + Name: "consul-agent", }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Port: 80, + Type: corev1.PodReady, + Status: corev1.ConditionFalse, }, }, + Phase: corev1.PodRunning, }, }, - expProxyMode: api.ProxyModeDefault, - expTaggedAddresses: nil, - namespaceLabels: map[string]string{constants.KeyTransparentProxy: "false"}, - expErr: "", - }, - // This case is impossible since we're always passing an endpoints object to this function, - // and Kubernetes will ensure that there is only an endpoints object if there is a service object. - // However, we're testing this case to check that we return an error in case we cannot get the service from k8s. - "no service": { - tproxyGlobalEnabled: true, - service: nil, - expTaggedAddresses: nil, - expProxyMode: api.ProxyModeDefault, - expErr: "services \"test-service\" not found", - }, - "service with a single port without a target port": { - tproxyGlobalEnabled: true, - podContainers: []corev1.Container{ + existingEndpoints: []*corev1.Endpoints{ { - Name: "test", - Ports: []corev1.ContainerPort{ + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "tcp", - ContainerPort: 8081, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "http", - ContainerPort: 8080, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, }, }, }, }, - service: &corev1.Service{ + expectedRequests: []ctrl.Request{}, + }, + "pod not running, no endpoints need to be reconciled": { + agentPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", + Name: "consul-agent", }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + Spec: corev1.PodSpec{ + NodeName: "node-foo", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Port: 8081, + Type: corev1.PodReady, + Status: corev1.ConditionTrue, }, }, + Phase: corev1.PodUnknown, }, }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 8081, - }, - }, - expErr: "", - }, - "service with a single port and a target port that is a port name": { - tproxyGlobalEnabled: true, - podContainers: []corev1.Container{ + existingEndpoints: []*corev1.Endpoints{ { - Name: "test", - Ports: []corev1.ContainerPort{ + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "tcp", - ContainerPort: 8081, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ { - Name: "http", - ContainerPort: 8080, + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, }, }, }, }, - service: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", + expectedRequests: []ctrl.Request{}, + }, + "pod is deleted, no endpoints need to be reconciled": { + agentPod: nil, + existingEndpoints: []*corev1.Endpoints{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-1", + }, + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, + }, + }, }, - Spec: corev1.ServiceSpec{ - ClusterIP: "10.0.0.1", - Ports: []corev1.ServicePort{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-3", + }, + Subsets: []corev1.EndpointSubset{ { - Port: 80, - TargetPort: intstr.Parse("tcp"), + Addresses: []corev1.EndpointAddress{ + { + NodeName: toStringPtr("node-foo"), + }, + }, }, }, }, }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 80, + expectedRequests: []ctrl.Request{}, + }, + } + + for name, test := range cases { + t.Run(name, func(t *testing.T) { + logger := logrtest.TestLogger{T: t} + s := runtime.NewScheme() + s.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.Pod{}, &corev1.Endpoints{}, &corev1.EndpointsList{}) + var objects []runtime.Object + if test.agentPod != nil { + objects = append(objects, test.agentPod) + } + for _, endpoint := range test.existingEndpoints { + objects = append(objects, endpoint) + } + + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() + + controller := &EndpointsController{ + Client: fakeClient, + Scheme: s, + Log: logger, + } + var requests []ctrl.Request + if test.agentPod != nil { + requests = controller.requestsForRunningAgentPods(test.agentPod) + } else { + requests = controller.requestsForRunningAgentPods(minimal()) + } + require.ElementsMatch(t, requests, test.expectedRequests) + }) + } +} + +func TestServiceInstancesForK8SServiceNameAndNamespace(t *testing.T) { + t.Parallel() + + const ( + k8sSvc = "k8s-svc" + k8sNS = "k8s-ns" + ) + cases := []struct { + name string + k8sServiceNameMeta string + k8sNamespaceMeta string + expected map[string]*api.AgentService + }{ + { + "no k8s service name or namespace meta", + "", + "", + map[string]*api.AgentService{}, + }, + { + "k8s service name set, but no namespace meta", + k8sSvc, + "", + map[string]*api.AgentService{}, + }, + { + "k8s namespace set, but no k8s service name meta", + "", + k8sNS, + map[string]*api.AgentService{}, + }, + { + "both k8s service name and namespace set", + k8sSvc, + k8sNS, + map[string]*api.AgentService{ + "foo1": { + ID: "foo1", + Service: "foo", + Meta: map[string]string{"k8s-service-name": k8sSvc, "k8s-namespace": k8sNS}, + }, + "foo1-proxy": { + Kind: api.ServiceKindConnectProxy, + ID: "foo1-proxy", + Service: "foo-sidecar-proxy", + Port: 20000, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "foo", + DestinationServiceID: "foo1", + }, + Meta: map[string]string{"k8s-service-name": k8sSvc, "k8s-namespace": k8sNS}, }, }, - expErr: "", }, - "service with a single port and a target port that is an int": { + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + servicesInConsul := []*api.AgentServiceRegistration{ + { + ID: "foo1", + Name: "foo", + Tags: []string{}, + Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": c.k8sNamespaceMeta}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "foo1-proxy", + Name: "foo-sidecar-proxy", + Port: 20000, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "foo", + DestinationServiceID: "foo1", + }, + Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": c.k8sNamespaceMeta}, + }, + { + ID: "k8s-service-different-ns-id", + Name: "k8s-service-different-ns", + Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": "different-ns"}, + }, + { + Kind: api.ServiceKindConnectProxy, + ID: "k8s-service-different-ns-proxy", + Name: "k8s-service-different-ns-proxy", + Port: 20000, + Tags: []string{}, + Proxy: &api.AgentServiceConnectProxyConfig{ + DestinationServiceName: "k8s-service-different-ns", + DestinationServiceID: "k8s-service-different-ns-id", + }, + Meta: map[string]string{"k8s-service-name": c.k8sServiceNameMeta, "k8s-namespace": "different-ns"}, + }, + } + + consul, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := api.NewClient(&api.Config{ + Address: consul.HTTPAddr, + }) + require.NoError(t, err) + + for _, svc := range servicesInConsul { + err := consulClient.Agent().ServiceRegister(svc) + require.NoError(t, err) + } + + svcs, err := serviceInstancesForK8SServiceNameAndNamespace(k8sSvc, k8sNS, consulClient) + require.NoError(t, err) + if len(svcs) > 0 { + require.Len(t, svcs, 2) + require.NotNil(t, c.expected["foo1"], svcs["foo1"]) + require.Equal(t, c.expected["foo1"].Service, svcs["foo1"].Service) + require.NotNil(t, c.expected["foo1-proxy"], svcs["foo1-proxy"]) + require.Equal(t, c.expected["foo1-proxy"].Service, svcs["foo1-proxy"].Service) + } + }) + } +} + +func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { + t.Parallel() + + const serviceName = "test-service" + + cases := map[string]struct { + tproxyGlobalEnabled bool + overwriteProbes bool + podContainers []corev1.Container + podAnnotations map[string]string + namespaceLabels map[string]string + service *corev1.Service + expTaggedAddresses map[string]api.ServiceAddress + expProxyMode api.ProxyMode + expExposePaths []api.ExposePath + expErr string + }{ + "tproxy enabled globally, annotation not provided": { tproxyGlobalEnabled: true, podContainers: []corev1.Container{ { @@ -4832,8 +4665,7 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { - Port: 80, - TargetPort: intstr.FromInt(8081), + Port: 8081, }, }, }, @@ -4842,13 +4674,14 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { expTaggedAddresses: map[string]api.ServiceAddress{ "virtual": { Address: "10.0.0.1", - Port: 80, + Port: 8081, }, }, expErr: "", }, - "service with a multiple ports": { + "tproxy enabled globally, annotation is false": { tproxyGlobalEnabled: true, + podAnnotations: map[string]string{keyTransparentProxy: "false"}, podContainers: []corev1.Container{ { Name: "test", @@ -4873,32 +4706,18 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { - Name: "tcp", - Port: 80, - TargetPort: intstr.FromString("tcp"), - }, - { - Name: "http", - Port: 81, - TargetPort: intstr.FromString("http"), + Port: 80, }, }, }, }, - expProxyMode: api.ProxyModeTransparent, - expTaggedAddresses: map[string]api.ServiceAddress{ - "virtual": { - Address: "10.0.0.1", - Port: 80, - }, - }, - expErr: "", + expProxyMode: api.ProxyModeDefault, + expTaggedAddresses: nil, + expErr: "", }, - // When target port is not equal to the port we're registering with Consul, - // then we want to register the zero-value for the port. This could happen - // for client services that don't have a container port that they're listening on. - "target port is not found": { + "tproxy enabled globally, annotation is true": { tproxyGlobalEnabled: true, + podAnnotations: map[string]string{keyTransparentProxy: "true"}, podContainers: []corev1.Container{ { Name: "test", @@ -4923,8 +4742,7 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { - Port: 80, - TargetPort: intstr.Parse("http"), + Port: 8081, }, }, }, @@ -4933,20 +4751,21 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { expTaggedAddresses: map[string]api.ServiceAddress{ "virtual": { Address: "10.0.0.1", - Port: 0, + Port: 8081, }, }, expErr: "", }, - "service with clusterIP=None (headless service)": { - tproxyGlobalEnabled: true, + "tproxy disabled globally, annotation not provided": { + tproxyGlobalEnabled: false, + podAnnotations: nil, service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: "default", }, Spec: corev1.ServiceSpec{ - ClusterIP: corev1.ClusterIPNone, + ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { Port: 80, @@ -4958,35 +4777,31 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { expTaggedAddresses: nil, expErr: "", }, - "service with an empty clusterIP": { - tproxyGlobalEnabled: true, - service: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "", - Ports: []corev1.ServicePort{ - { - Port: 80, - }, - }, + "tproxy disabled globally, annotation is false": { + tproxyGlobalEnabled: false, + podAnnotations: map[string]string{keyTransparentProxy: "false"}, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, }, }, - expProxyMode: api.ProxyModeDefault, - expTaggedAddresses: nil, - expErr: "", - }, - "service with an invalid clusterIP": { - tproxyGlobalEnabled: true, service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: "default", }, Spec: corev1.ServiceSpec{ - ClusterIP: "invalid", + ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { Port: 80, @@ -4994,12 +4809,12 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { }, }, }, - expTaggedAddresses: nil, expProxyMode: api.ProxyModeDefault, + expTaggedAddresses: nil, expErr: "", }, - "service with an IPv6 clusterIP": { - tproxyGlobalEnabled: true, + "tproxy disabled globally, annotation is true": { + tproxyGlobalEnabled: false, podContainers: []corev1.Container{ { Name: "test", @@ -5015,13 +4830,14 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { }, }, }, + podAnnotations: map[string]string{keyTransparentProxy: "true"}, service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: "default", }, Spec: corev1.ServiceSpec{ - ClusterIP: "2001:db8::68", + ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { Port: 8081, @@ -5032,18 +4848,14 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { expProxyMode: api.ProxyModeTransparent, expTaggedAddresses: map[string]api.ServiceAddress{ "virtual": { - Address: "2001:db8::68", + Address: "10.0.0.1", Port: 8081, }, }, expErr: "", }, - "overwrite probes enabled globally": { - tproxyGlobalEnabled: true, - overwriteProbes: true, - podAnnotations: map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"},\"annotations\":{\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, + "tproxy disabled globally, namespace enabled, no annotation": { + tproxyGlobalEnabled: false, podContainers: []corev1.Container{ { Name: "test", @@ -5057,13 +4869,6 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ContainerPort: 8080, }, }, - LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20300), - }, - }, - }, }, }, service: &corev1.Service{ @@ -5087,21 +4892,42 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { Port: 8081, }, }, - expExposePaths: []api.ExposePath{ - { - ListenerPort: 20300, - LocalPathPort: 8080, + namespaceLabels: map[string]string{keyTransparentProxy: "true"}, + expErr: "", + }, + "tproxy enabled globally, namespace disabled, no annotation": { + tproxyGlobalEnabled: true, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 80, + }, + }, }, }, - expErr: "", + expProxyMode: api.ProxyModeDefault, + expTaggedAddresses: nil, + namespaceLabels: map[string]string{keyTransparentProxy: "false"}, + expErr: "", }, - "overwrite probes disabled globally, enabled via annotation": { + // This case is impossible since we're always passing an endpoints object to this function, + // and Kubernetes will ensure that there is only an endpoints object if there is a service object. + // However, we're testing this case to check that we return an error in case we cannot get the service from k8s. + "no service": { + tproxyGlobalEnabled: true, + service: nil, + expTaggedAddresses: nil, + expProxyMode: api.ProxyModeDefault, + expErr: "services \"test-service\" not found", + }, + "service with a single port without a target port": { tproxyGlobalEnabled: true, - overwriteProbes: false, - podAnnotations: map[string]string{ - constants.AnnotationTransparentProxyOverwriteProbes: "true", - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"},\"annotations\":{\"consul.hashicorp.com/transparent-proxy-overwrite-probes\":\"true\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, podContainers: []corev1.Container{ { Name: "test", @@ -5115,13 +4941,6 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ContainerPort: 8080, }, }, - LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20300), - }, - }, - }, }, }, service: &corev1.Service{ @@ -5145,20 +4964,10 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { Port: 8081, }, }, - expExposePaths: []api.ExposePath{ - { - ListenerPort: 20300, - LocalPathPort: 8080, - }, - }, expErr: "", }, - "overwrite probes enabled globally, tproxy disabled": { - tproxyGlobalEnabled: false, - overwriteProbes: true, - podAnnotations: map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"},\"annotations\":{\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, + "service with a single port and a target port that is a port name": { + tproxyGlobalEnabled: true, podContainers: []corev1.Container{ { Name: "test", @@ -5172,13 +4981,6 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ContainerPort: 8080, }, }, - LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20300), - }, - }, - }, }, }, service: &corev1.Service{ @@ -5190,21 +4992,23 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { - Port: 8081, + Port: 80, + TargetPort: intstr.Parse("tcp"), }, }, }, }, - expTaggedAddresses: nil, - expExposePaths: nil, - expErr: "", + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 80, + }, + }, + expErr: "", }, - "readiness only probe provided": { + "service with a single port and a target port that is an int": { tproxyGlobalEnabled: true, - overwriteProbes: true, - podAnnotations: map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"readinessProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, podContainers: []corev1.Container{ { Name: "test", @@ -5218,13 +5022,6 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ContainerPort: 8080, }, }, - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20400), - }, - }, - }, }, }, service: &corev1.Service{ @@ -5236,7 +5033,8 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { - Port: 8081, + Port: 80, + TargetPort: intstr.FromInt(8081), }, }, }, @@ -5245,23 +5043,13 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { expTaggedAddresses: map[string]api.ServiceAddress{ "virtual": { Address: "10.0.0.1", - Port: 8081, - }, - }, - expExposePaths: []api.ExposePath{ - { - ListenerPort: 20400, - LocalPathPort: 8080, + Port: 80, }, }, expErr: "", }, - "startup only probe provided": { + "service with a multiple ports": { tproxyGlobalEnabled: true, - overwriteProbes: true, - podAnnotations: map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"startupProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, podContainers: []corev1.Container{ { Name: "test", @@ -5275,13 +5063,6 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ContainerPort: 8080, }, }, - StartupProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20500), - }, - }, - }, }, }, service: &corev1.Service{ @@ -5293,7 +5074,14 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { - Port: 8081, + Name: "tcp", + Port: 80, + TargetPort: intstr.FromString("tcp"), + }, + { + Name: "http", + Port: 81, + TargetPort: intstr.FromString("http"), }, }, }, @@ -5302,23 +5090,16 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { expTaggedAddresses: map[string]api.ServiceAddress{ "virtual": { Address: "10.0.0.1", - Port: 8081, - }, - }, - expExposePaths: []api.ExposePath{ - { - ListenerPort: 20500, - LocalPathPort: 8080, + Port: 80, }, }, expErr: "", }, - "all probes provided": { + // When target port is not equal to the port we're registering with Consul, + // then we want to register the zero-value for the port. This could happen + // for client services that don't have a container port that they're listening on. + "target port is not found": { tproxyGlobalEnabled: true, - overwriteProbes: true, - podAnnotations: map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}},\"readinessProbe\":{\"httpGet\":{\"port\":8081}},\"startupProbe\":{\"httpGet\":{\"port\":8081}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, podContainers: []corev1.Container{ { Name: "test", @@ -5332,27 +5113,6 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ContainerPort: 8080, }, }, - LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20300), - }, - }, - }, - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20400), - }, - }, - }, - StartupProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20500), - }, - }, - }, }, }, service: &corev1.Service{ @@ -5364,7 +5124,8 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ClusterIP: "10.0.0.1", Ports: []corev1.ServicePort{ { - Port: 8081, + Port: 80, + TargetPort: intstr.Parse("http"), }, }, }, @@ -5373,106 +5134,193 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { expTaggedAddresses: map[string]api.ServiceAddress{ "virtual": { Address: "10.0.0.1", - Port: 8081, - }, - }, - expExposePaths: []api.ExposePath{ - { - ListenerPort: 20300, - LocalPathPort: 8080, - }, - { - ListenerPort: 20400, - LocalPathPort: 8081, - }, - { - ListenerPort: 20500, - LocalPathPort: 8081, + Port: 0, }, }, expErr: "", }, - "multiple containers with all probes provided": { + "service with clusterIP=None (headless service)": { tproxyGlobalEnabled: true, - overwriteProbes: true, - podAnnotations: map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}},\"readinessProbe\":{\"httpGet\":{\"port\":8081}},\"startupProbe\":{\"httpGet\":{\"port\":8081}}},{\"name\":\"test-2\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8083},{\"name\":\"http\",\"containerPort\":8082}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8082}},\"readinessProbe\":{\"httpGet\":{\"port\":8083}},\"startupProbe\":{\"httpGet\":{\"port\":8083}}},{\"name\":\"envoy-sidecar\",\"ports\":[{\"name\":\"http\",\"containerPort\":20000}],\"resources\":{}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", - }, - podContainers: []corev1.Container{ - { - Name: "test", - Ports: []corev1.ContainerPort{ + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: corev1.ClusterIPNone, + Ports: []corev1.ServicePort{ { - Name: "tcp", - ContainerPort: 8081, + Port: 80, }, + }, + }, + }, + expProxyMode: api.ProxyModeDefault, + expTaggedAddresses: nil, + expErr: "", + }, + "service with an empty clusterIP": { + tproxyGlobalEnabled: true, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "", + Ports: []corev1.ServicePort{ { - Name: "http", - ContainerPort: 8080, + Port: 80, }, }, - LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20300), - }, + }, + }, + expProxyMode: api.ProxyModeDefault, + expTaggedAddresses: nil, + expErr: "", + }, + "service with an invalid clusterIP": { + tproxyGlobalEnabled: true, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "invalid", + Ports: []corev1.ServicePort{ + { + Port: 80, }, }, - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20400), - }, + }, + }, + expTaggedAddresses: nil, + expProxyMode: api.ProxyModeDefault, + expErr: "", + }, + "service with an IPv6 clusterIP": { + tproxyGlobalEnabled: true, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, }, }, - StartupProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20500), - }, + }, + }, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "2001:db8::68", + Ports: []corev1.ServicePort{ + { + Port: 8081, }, }, }, + }, + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "2001:db8::68", + Port: 8081, + }, + }, + expErr: "", + }, + "overwrite probes enabled globally": { + tproxyGlobalEnabled: true, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"},\"annotations\":{\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + }, + podContainers: []corev1.Container{ { - Name: "test-2", + Name: "test", Ports: []corev1.ContainerPort{ { Name: "tcp", - ContainerPort: 8083, + ContainerPort: 8081, }, { Name: "http", - ContainerPort: 8082, + ContainerPort: 8080, }, }, LivenessProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20300 + 1), - }, - }, - }, - ReadinessProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20400 + 1), + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), }, }, }, - StartupProbe: &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20500 + 1), - }, + }, + }, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, }, }, }, + }, + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 8081, + }, + }, + expExposePaths: []api.ExposePath{ + { + ListenerPort: exposedPathsLivenessPortsRangeStart, + LocalPathPort: 8080, + }, + }, + expErr: "", + }, + "overwrite probes disabled globally, enabled via annotation": { + tproxyGlobalEnabled: true, + overwriteProbes: false, + podAnnotations: map[string]string{ + annotationTransparentProxyOverwriteProbes: "true", + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"},\"annotations\":{\"consul.hashicorp.com/transparent-proxy-overwrite-probes\":\"true\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + }, + podContainers: []corev1.Container{ { - Name: "sidecar-proxy", // This name doesn't matter. + Name: "test", Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, { Name: "http", - ContainerPort: 20000, + ContainerPort: 8080, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), + }, }, }, }, @@ -5500,37 +5348,120 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { }, expExposePaths: []api.ExposePath{ { - ListenerPort: 20300, + ListenerPort: exposedPathsLivenessPortsRangeStart, LocalPathPort: 8080, }, + }, + expErr: "", + }, + "overwrite probes enabled globally, tproxy disabled": { + tproxyGlobalEnabled: false, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"},\"annotations\":{\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + }, + podContainers: []corev1.Container{ { - ListenerPort: 20400, - LocalPathPort: 8081, + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), + }, + }, + }, }, - { - ListenerPort: 20500, - LocalPathPort: 8081, + }, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: "default", }, - { - ListenerPort: 20300 + 1, - LocalPathPort: 8082, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, + }, + }, }, + }, + expTaggedAddresses: nil, + expExposePaths: nil, + expErr: "", + }, + "readiness only probe provided": { + tproxyGlobalEnabled: true, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"readinessProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + }, + podContainers: []corev1.Container{ { - ListenerPort: 20400 + 1, - LocalPathPort: 8083, + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), + }, + }, + }, + }, + }, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, + }, + }, + }, + }, + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 8081, }, + }, + expExposePaths: []api.ExposePath{ { - ListenerPort: 20500 + 1, - LocalPathPort: 8083, + ListenerPort: exposedPathsReadinessPortsRangeStart, + LocalPathPort: 8080, }, }, expErr: "", }, - "non-http probe": { + "startup only probe provided": { tproxyGlobalEnabled: true, overwriteProbes: true, podAnnotations: map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"tcpSocket\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"startupProbe\":{\"httpGet\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", }, podContainers: []corev1.Container{ { @@ -5545,10 +5476,10 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { ContainerPort: 8080, }, }, - LivenessProbe: &corev1.Probe{ + StartupProbe: &corev1.Probe{ Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(8080), + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), }, }, }, @@ -5575,14 +5506,19 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { Port: 8081, }, }, - expExposePaths: nil, - expErr: "", + expExposePaths: []api.ExposePath{ + { + ListenerPort: exposedPathsStartupPortsRangeStart, + LocalPathPort: 8080, + }, + }, + expErr: "", }, - "probes with port names": { + "all probes provided": { tproxyGlobalEnabled: true, overwriteProbes: true, podAnnotations: map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":\"tcp\"}},\"readinessProbe\":{\"httpGet\":{\"port\":\"http\"}},\"startupProbe\":{\"httpGet\":{\"port\":\"http\"}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}},\"readinessProbe\":{\"httpGet\":{\"port\":8081}},\"startupProbe\":{\"httpGet\":{\"port\":8081}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", }, podContainers: []corev1.Container{ { @@ -5600,21 +5536,21 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { LivenessProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20300), + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), }, }, }, ReadinessProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20400), + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), }, }, }, StartupProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(20500), + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), }, }, }, @@ -5643,730 +5579,458 @@ func TestCreateServiceRegistrations_withTransparentProxy(t *testing.T) { }, expExposePaths: []api.ExposePath{ { - ListenerPort: 20300, - LocalPathPort: 8081, + ListenerPort: exposedPathsLivenessPortsRangeStart, + LocalPathPort: 8080, }, { - ListenerPort: 20400, - LocalPathPort: 8080, + ListenerPort: exposedPathsReadinessPortsRangeStart, + LocalPathPort: 8081, }, { - ListenerPort: 20500, - LocalPathPort: 8080, + ListenerPort: exposedPathsStartupPortsRangeStart, + LocalPathPort: 8081, }, }, expErr: "", }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - pod := createServicePod("test-pod-1", "1.2.3.4", true, true) - if c.podAnnotations != nil { - pod.Annotations = c.podAnnotations - } - if c.podContainers != nil { - pod.Spec.Containers = c.podContainers - } - - // We set these annotations explicitly as these are set by the meshWebhook and we - // need these values to determine which port to use for the service registration. - pod.Annotations[constants.AnnotationPort] = "tcp" - - endpoints := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: "default", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: "1.2.3.4", - TargetRef: &corev1.ObjectReference{ - Kind: "Pod", - Name: pod.Name, - Namespace: pod.Namespace, - }, - }, - }, - }, - }, - } - // Add the pod's namespace. - ns := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: pod.Namespace, Labels: c.namespaceLabels}, - } - var fakeClient client.Client - if c.service != nil { - fakeClient = fake.NewClientBuilder().WithRuntimeObjects(pod, endpoints, c.service, &ns).Build() - } else { - fakeClient = fake.NewClientBuilder().WithRuntimeObjects(pod, endpoints, &ns).Build() - } - - epCtrl := Controller{ - Client: fakeClient, - EnableTransparentProxy: c.tproxyGlobalEnabled, - TProxyOverwriteProbes: c.overwriteProbes, - Log: logrtest.TestLogger{T: t}, - } - - serviceRegistration, proxyServiceRegistration, err := epCtrl.createServiceRegistrations(*pod, *endpoints, api.HealthPassing) - if c.expErr != "" { - require.EqualError(t, err, c.expErr) - } else { - require.NoError(t, err) - - require.Equal(t, c.expProxyMode, proxyServiceRegistration.Service.Proxy.Mode) - require.Equal(t, c.expTaggedAddresses, serviceRegistration.Service.TaggedAddresses) - require.Equal(t, c.expTaggedAddresses, proxyServiceRegistration.Service.TaggedAddresses) - require.Equal(t, c.expExposePaths, proxyServiceRegistration.Service.Proxy.Expose.Paths) - } - }) - } -} - -func TestGetTokenMetaFromDescription(t *testing.T) { - t.Parallel() - cases := map[string]struct { - description string - expectedTokenMeta map[string]string - }{ - "no description prefix": { - description: `{"pod":"default/pod"}`, - expectedTokenMeta: map[string]string{"pod": "default/pod"}, - }, - "consul's default description prefix": { - description: `token created via login: {"pod":"default/pod"}`, - expectedTokenMeta: map[string]string{"pod": "default/pod"}, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - tokenMeta, err := getTokenMetaFromDescription(c.description) - require.NoError(t, err) - require.Equal(t, c.expectedTokenMeta, tokenMeta) - }) - } -} - -func TestMapAddresses(t *testing.T) { - t.Parallel() - cases := map[string]struct { - addresses corev1.EndpointSubset - expected map[corev1.EndpointAddress]string - }{ - "ready and not ready addresses": { - addresses: corev1.EndpointSubset{ - Addresses: []corev1.EndpointAddress{ - {Hostname: "host1"}, - {Hostname: "host2"}, - }, - NotReadyAddresses: []corev1.EndpointAddress{ - {Hostname: "host3"}, - {Hostname: "host4"}, - }, - }, - expected: map[corev1.EndpointAddress]string{ - {Hostname: "host1"}: api.HealthPassing, - {Hostname: "host2"}: api.HealthPassing, - {Hostname: "host3"}: api.HealthCritical, - {Hostname: "host4"}: api.HealthCritical, - }, - }, - "ready addresses only": { - addresses: corev1.EndpointSubset{ - Addresses: []corev1.EndpointAddress{ - {Hostname: "host1"}, - {Hostname: "host2"}, - {Hostname: "host3"}, - {Hostname: "host4"}, - }, - NotReadyAddresses: []corev1.EndpointAddress{}, - }, - expected: map[corev1.EndpointAddress]string{ - {Hostname: "host1"}: api.HealthPassing, - {Hostname: "host2"}: api.HealthPassing, - {Hostname: "host3"}: api.HealthPassing, - {Hostname: "host4"}: api.HealthPassing, - }, - }, - "not ready addresses only": { - addresses: corev1.EndpointSubset{ - Addresses: []corev1.EndpointAddress{}, - NotReadyAddresses: []corev1.EndpointAddress{ - {Hostname: "host1"}, - {Hostname: "host2"}, - {Hostname: "host3"}, - {Hostname: "host4"}, - }, - }, - expected: map[corev1.EndpointAddress]string{ - {Hostname: "host1"}: api.HealthCritical, - {Hostname: "host2"}: api.HealthCritical, - {Hostname: "host3"}: api.HealthCritical, - {Hostname: "host4"}: api.HealthCritical, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - actual := mapAddresses(c.addresses) - require.Equal(t, c.expected, actual) - }) - } -} - -func Test_GetWANData(t *testing.T) { - cases := map[string]struct { - gatewayPod corev1.Pod - gatewayEndpoint corev1.Endpoints - k8sObjects func() []runtime.Object - wanAddr string - wanPort int - expErr string - }{ - "source=NodeName": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "NodeName", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", - }, - }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, + "multiple containers with all probes provided": { + tproxyGlobalEnabled: true, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8080}},\"readinessProbe\":{\"httpGet\":{\"port\":8081}},\"startupProbe\":{\"httpGet\":{\"port\":8081}}},{\"name\":\"test-2\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8083},{\"name\":\"http\",\"containerPort\":8082}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":8082}},\"readinessProbe\":{\"httpGet\":{\"port\":8083}},\"startupProbe\":{\"httpGet\":{\"port\":8083}}},{\"name\":\"envoy-sidecar\",\"ports\":[{\"name\":\"http\",\"containerPort\":20000}],\"resources\":{}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), + }, + }, }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-nodename", - wanPort: 1234, - }, - "source=HostIP": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "NodeIP", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", + StartupProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), + }, + }, }, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", - }, - }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + { + Name: "test-2", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8083, + }, + { + Name: "http", + ContainerPort: 8082, + }, }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart + 1), + }, + }, }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart + 1), }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-host-ip", - wanPort: 1234, - }, - "source=Static": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "Static", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", + StartupProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart + 1), + }, + }, }, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + Name: envoySidecarContainer, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 20000, + }, + }, }, }, - gatewayEndpoint: corev1.Endpoints{ + service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", + Name: serviceName, Namespace: "default", }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, }, }, - } - return []runtime.Object{service} + }, }, - wanAddr: "test-wan-address", - wanPort: 1234, - }, - "source=Service, serviceType=NodePort": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", - }, + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 8081, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + }, + expExposePaths: []api.ExposePath{ + { + ListenerPort: exposedPathsLivenessPortsRangeStart, + LocalPathPort: 8080, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + ListenerPort: exposedPathsReadinessPortsRangeStart, + LocalPathPort: 8081, }, - }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + { + ListenerPort: exposedPathsStartupPortsRangeStart, + LocalPathPort: 8081, }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeNodePort, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, - }, - }, - }, - } - return []runtime.Object{service} - }, - wanAddr: "test-host-ip", - wanPort: 1234, - }, - "source=Service, serviceType=ClusterIP": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", - }, + { + ListenerPort: exposedPathsLivenessPortsRangeStart + 1, + LocalPathPort: 8082, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + { + ListenerPort: exposedPathsReadinessPortsRangeStart + 1, + LocalPathPort: 8083, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + ListenerPort: exposedPathsStartupPortsRangeStart + 1, + LocalPathPort: 8083, }, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, + expErr: "", + }, + "non-http probe": { + tproxyGlobalEnabled: true, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"tcpSocket\":{\"port\":8080}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - ClusterIP: "test-cluster-ip", + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "1.2.3.4", - }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(8080), }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-cluster-ip", - wanPort: 1234, - }, - "source=Service, serviceType=LoadBalancer,IP": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", }, }, - gatewayEndpoint: corev1.Endpoints{ + service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", + Name: serviceName, Namespace: "default", }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "test-loadbalancer-ip", - }, - }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-loadbalancer-ip", - wanPort: 1234, - }, - "source=Service, serviceType=LoadBalancer,Hostname": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", }, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 8081, }, }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + expExposePaths: nil, + expErr: "", + }, + "probes with port names": { + tproxyGlobalEnabled: true, + overwriteProbes: true, + podAnnotations: map[string]string{ + annotationOriginalPod: "{\"metadata\":{\"name\":\"test-pod-1\",\"namespace\":\"default\",\"creationTimestamp\":null,\"labels\":{\"consul.hashicorp.com/connect-inject-managed-by\":\"consul-k8s-endpoints-controller\",\"consul.hashicorp.com/connect-inject-status\":\"injected\"}},\"spec\":{\"containers\":[{\"name\":\"test\",\"ports\":[{\"name\":\"tcp\",\"containerPort\":8081},{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{},\"livenessProbe\":{\"httpGet\":{\"port\":\"tcp\"}},\"readinessProbe\":{\"httpGet\":{\"port\":\"http\"}},\"startupProbe\":{\"httpGet\":{\"port\":\"http\"}}}]},\"status\":{\"hostIP\":\"127.0.0.1\",\"podIP\":\"1.2.3.4\"}}\n", + }, + podContainers: []corev1.Container{ + { + Name: "test", + Ports: []corev1.ContainerPort{ + { + Name: "tcp", + ContainerPort: 8081, + }, + { + Name: "http", + ContainerPort: 8080, + }, }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsLivenessPortsRangeStart), + }, + }, }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - Hostname: "test-loadbalancer-hostname", - }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsReadinessPortsRangeStart), }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - }, - "no Source annotation": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", + StartupProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(exposedPathsStartupPortsRangeStart), + }, + }, }, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", - }, }, - gatewayEndpoint: corev1.Endpoints{ + service: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", + Name: serviceName, Namespace: "default", }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - Hostname: "test-loadbalancer-hostname", - }, - }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8081, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - expErr: "failed to read annotation consul.hashicorp.com/gateway-wan-address-source", - }, - "no Service with Source=Service": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", - }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", }, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + expProxyMode: api.ProxyModeTransparent, + expTaggedAddresses: map[string]api.ServiceAddress{ + "virtual": { + Address: "10.0.0.1", + Port: 8081, }, }, - k8sObjects: func() []runtime.Object { return nil }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - expErr: "failed to read service gateway in namespace default", - }, - "WAN Port annotation misconfigured": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "not-a-valid-port", - }, + expExposePaths: []api.ExposePath{ + { + ListenerPort: exposedPathsLivenessPortsRangeStart, + LocalPathPort: 8081, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + { + ListenerPort: exposedPathsReadinessPortsRangeStart, + LocalPathPort: 8080, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + { + ListenerPort: exposedPathsStartupPortsRangeStart, + LocalPathPort: 8080, }, }, - gatewayEndpoint: corev1.Endpoints{ + expErr: "", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + pod := createPod("test-pod-1", "1.2.3.4", true, true) + if c.podAnnotations != nil { + pod.Annotations = c.podAnnotations + } + if c.podContainers != nil { + pod.Spec.Containers = c.podContainers + } + + // We set these annotations explicitly as these are set by the meshWebhook and we + // need these values to determine which port to use for the service registration. + pod.Annotations[annotationPort] = "tcp" + + endpoints := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", + Name: serviceName, Namespace: "default", }, - }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - Hostname: "test-loadbalancer-hostname", + Subsets: []corev1.EndpointSubset{ + { + Addresses: []corev1.EndpointAddress{ + { + IP: "1.2.3.4", + TargetRef: &corev1.ObjectReference{ + Kind: "Pod", + Name: pod.Name, + Namespace: pod.Namespace, }, }, }, }, - } - return []runtime.Object{service} - }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - expErr: "failed to parse WAN port from value not-a-valid-port", + }, + } + // Add the pod's namespace. + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: pod.Namespace, Labels: c.namespaceLabels}, + } + var fakeClient client.Client + if c.service != nil { + fakeClient = fake.NewClientBuilder().WithRuntimeObjects(pod, endpoints, c.service, &ns).Build() + } else { + fakeClient = fake.NewClientBuilder().WithRuntimeObjects(pod, endpoints, &ns).Build() + } + + epCtrl := EndpointsController{ + Client: fakeClient, + EnableTransparentProxy: c.tproxyGlobalEnabled, + TProxyOverwriteProbes: c.overwriteProbes, + Log: logrtest.TestLogger{T: t}, + } + + serviceRegistration, proxyServiceRegistration, err := epCtrl.createServiceRegistrations(*pod, *endpoints) + if c.expErr != "" { + require.EqualError(t, err, c.expErr) + } else { + require.NoError(t, err) + + require.Equal(t, c.expProxyMode, proxyServiceRegistration.Proxy.Mode) + require.Equal(t, c.expTaggedAddresses, serviceRegistration.TaggedAddresses) + require.Equal(t, c.expTaggedAddresses, proxyServiceRegistration.TaggedAddresses) + require.Equal(t, c.expExposePaths, proxyServiceRegistration.Proxy.Expose.Paths) + } + }) + } +} + +func TestGetTokenMetaFromDescription(t *testing.T) { + t.Parallel() + cases := map[string]struct { + description string + expectedTokenMeta map[string]string + }{ + "no description prefix": { + description: `{"pod":"default/pod"}`, + expectedTokenMeta: map[string]string{"pod": "default/pod"}, }, - "source=Service, serviceType=LoadBalancer no Ingress configured": { - gatewayPod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Annotations: map[string]string{ - constants.AnnotationGatewayWANSource: "Service", - constants.AnnotationGatewayWANAddress: "test-wan-address", - constants.AnnotationGatewayWANPort: "1234", - }, + "consul's default description prefix": { + description: `token created via login: {"pod":"default/pod"}`, + expectedTokenMeta: map[string]string{"pod": "default/pod"}, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + tokenMeta, err := getTokenMetaFromDescription(c.description) + require.NoError(t, err) + require.Equal(t, c.expectedTokenMeta, tokenMeta) + }) + } +} + +func TestMapAddresses(t *testing.T) { + t.Parallel() + cases := map[string]struct { + addresses corev1.EndpointSubset + expected map[corev1.EndpointAddress]string + }{ + "ready and not ready addresses": { + addresses: corev1.EndpointSubset{ + Addresses: []corev1.EndpointAddress{ + {Hostname: "host1"}, + {Hostname: "host2"}, }, - Spec: corev1.PodSpec{ - NodeName: "test-nodename", + NotReadyAddresses: []corev1.EndpointAddress{ + {Hostname: "host3"}, + {Hostname: "host4"}, }, - Status: corev1.PodStatus{ - HostIP: "test-host-ip", + }, + expected: map[corev1.EndpointAddress]string{ + {Hostname: "host1"}: api.HealthPassing, + {Hostname: "host2"}: api.HealthPassing, + {Hostname: "host3"}: api.HealthCritical, + {Hostname: "host4"}: api.HealthCritical, + }, + }, + "ready addresses only": { + addresses: corev1.EndpointSubset{ + Addresses: []corev1.EndpointAddress{ + {Hostname: "host1"}, + {Hostname: "host2"}, + {Hostname: "host3"}, + {Hostname: "host4"}, }, + NotReadyAddresses: []corev1.EndpointAddress{}, }, - gatewayEndpoint: corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", + expected: map[corev1.EndpointAddress]string{ + {Hostname: "host1"}: api.HealthPassing, + {Hostname: "host2"}: api.HealthPassing, + {Hostname: "host3"}: api.HealthPassing, + {Hostname: "host4"}: api.HealthPassing, + }, + }, + "not ready addresses only": { + addresses: corev1.EndpointSubset{ + Addresses: []corev1.EndpointAddress{}, + NotReadyAddresses: []corev1.EndpointAddress{ + {Hostname: "host1"}, + {Hostname: "host2"}, + {Hostname: "host3"}, + {Hostname: "host4"}, }, }, - k8sObjects: func() []runtime.Object { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "gateway", - Namespace: "default", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - ClusterIP: "test-cluster-ip", - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{}, - }, - }, - } - return []runtime.Object{service} + expected: map[corev1.EndpointAddress]string{ + {Hostname: "host1"}: api.HealthCritical, + {Hostname: "host2"}: api.HealthCritical, + {Hostname: "host3"}: api.HealthCritical, + {Hostname: "host4"}: api.HealthCritical, }, - wanAddr: "test-loadbalancer-hostname", - wanPort: 1234, - expErr: "failed to read ingress config for loadbalancer for service gateway in namespace default", }, } for name, c := range cases { t.Run(name, func(t *testing.T) { - fakeClient := fake.NewClientBuilder().WithRuntimeObjects(c.k8sObjects()...).Build() - epCtrl := Controller{ - Client: fakeClient, - } - addr, port, err := epCtrl.getWanData(c.gatewayPod, c.gatewayEndpoint) - if c.expErr == "" { - require.NoError(t, err) - require.Equal(t, c.wanAddr, addr) - require.Equal(t, c.wanPort, port) - } else { - require.EqualError(t, err, c.expErr) - } + actual := mapAddresses(c.addresses) + require.Equal(t, c.expected, actual) }) } } -func createServicePod(name, ip string, inject bool, managedByEndpointsController bool) *corev1.Pod { +func createPod(name, ip string, inject bool, managedByEndpointsController bool) *corev1.Pod { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: "default", - Labels: map[string]string{}, - Annotations: map[string]string{ - constants.AnnotationConsulK8sVersion: "1.0.0", - }, + Name: name, + Namespace: "default", + Labels: map[string]string{}, + Annotations: map[string]string{}, }, Status: corev1.PodStatus{ PodIP: ip, - HostIP: consulNodeAddress, + HostIP: "127.0.0.1", Conditions: []corev1.PodCondition{ { Type: corev1.PodReady, @@ -6374,40 +6038,17 @@ func createServicePod(name, ip string, inject bool, managedByEndpointsController }, }, }, - Spec: corev1.PodSpec{ - NodeName: nodeName, - }, } if inject { - pod.Labels[constants.KeyInjectStatus] = constants.Injected - pod.Annotations[constants.KeyInjectStatus] = constants.Injected + pod.Labels[keyInjectStatus] = injected + pod.Annotations[keyInjectStatus] = injected } if managedByEndpointsController { - pod.Labels[constants.KeyManagedBy] = constants.ManagedByValue + pod.Labels[keyManagedBy] = managedByValue } return pod } -func createGatewayPod(name, ip string, annotations map[string]string) *corev1.Pod { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: "default", - Labels: map[string]string{constants.KeyManagedBy: constants.ManagedByValue}, - Annotations: annotations, - }, - Status: corev1.PodStatus{ - PodIP: ip, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, - Spec: corev1.PodSpec{ - NodeName: nodeName, - }, - } - return pod +func toStringPtr(input string) *string { + return &input } diff --git a/control-plane/connect-inject/envoy_sidecar.go b/control-plane/connect-inject/envoy_sidecar.go new file mode 100644 index 0000000000..53b3fcac55 --- /dev/null +++ b/control-plane/connect-inject/envoy_sidecar.go @@ -0,0 +1,238 @@ +package connectinject + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/google/shlex" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/pointer" +) + +func (w *MeshWebhook) envoySidecar(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { + resources, err := w.envoySidecarResources(pod) + if err != nil { + return corev1.Container{}, err + } + + multiPort := mpi.serviceName != "" + cmd, err := w.getContainerSidecarCommand(pod, mpi.serviceName, mpi.serviceIndex) + if err != nil { + return corev1.Container{}, err + } + + containerName := envoySidecarContainer + if multiPort { + containerName = fmt.Sprintf("%s-%s", envoySidecarContainer, mpi.serviceName) + } + + container := corev1.Container{ + Name: containerName, + Image: w.ImageEnvoy, + Env: []corev1.EnvVar{ + { + Name: "HOST_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.hostIP"}, + }, + }, + }, + Resources: resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }, + Command: cmd, + } + + if useProxyHealthCheck(pod) { + // Add a port on the sidecar where the sidecar proxy will be queried for its health. + container.Ports = append(container.Ports, corev1.ContainerPort{ + Name: fmt.Sprintf("%s-%d", "proxy-health", mpi.serviceIndex), + ContainerPort: int32(proxyDefaultHealthPort + mpi.serviceIndex), + }) + } + + // Add any extra Envoy VolumeMounts. + if _, ok := pod.Annotations[annotationConsulSidecarUserVolumeMount]; ok { + var volumeMount []corev1.VolumeMount + err := json.Unmarshal([]byte(pod.Annotations[annotationConsulSidecarUserVolumeMount]), &volumeMount) + if err != nil { + return corev1.Container{}, err + } + container.VolumeMounts = append(container.VolumeMounts, volumeMount...) + } + + tproxyEnabled, err := transparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) + if err != nil { + return corev1.Container{}, err + } + + // If not running in transparent proxy mode and in an OpenShift environment, + // skip setting the security context and let OpenShift set it for us. + // When transparent proxy is enabled, then Envoy needs to run as our specific user + // so that traffic redirection will work. + if tproxyEnabled || !w.EnableOpenShift { + if pod.Spec.SecurityContext != nil { + // User container and Envoy container cannot have the same UID. + if pod.Spec.SecurityContext.RunAsUser != nil && *pod.Spec.SecurityContext.RunAsUser == envoyUserAndGroupID { + return corev1.Container{}, fmt.Errorf("pod security context cannot have the same uid as envoy: %v", envoyUserAndGroupID) + } + } + // Ensure that none of the user's containers have the same UID as Envoy. At this point in injection the meshWebhook + // has only injected init containers so all containers defined in pod.Spec.Containers are from the user. + for _, c := range pod.Spec.Containers { + // User container and Envoy container cannot have the same UID. + if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser == envoyUserAndGroupID && c.Image != w.ImageEnvoy { + return corev1.Container{}, fmt.Errorf("container %q has runAsUser set to the same uid %q as envoy which is not allowed", c.Name, envoyUserAndGroupID) + } + } + container.SecurityContext = &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + RunAsGroup: pointer.Int64(envoyUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + } + } + + return container, nil +} +func (w *MeshWebhook) getContainerSidecarCommand(pod corev1.Pod, multiPortSvcName string, multiPortSvcIdx int) ([]string, error) { + bootstrapFile := "/consul/connect-inject/envoy-bootstrap.yaml" + if multiPortSvcName != "" { + bootstrapFile = fmt.Sprintf("/consul/connect-inject/envoy-bootstrap-%s.yaml", multiPortSvcName) + } + cmd := []string{ + "envoy", + "--config-path", bootstrapFile, + } + if multiPortSvcName != "" { + // --base-id is needed so multiple Envoy proxies can run on the same host. + cmd = append(cmd, "--base-id", strconv.Itoa(multiPortSvcIdx)) + } + + // Check to see if the user has overriden concurrency via an annotation. + if pod.Annotations[annotationEnvoyProxyConcurrency] != "" { + val, err := strconv.ParseInt(pod.Annotations[annotationEnvoyProxyConcurrency], 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse annotation: %s", annotationEnvoyProxyConcurrency) + } + if val < 0 { + return nil, fmt.Errorf("invalid envoy concurrency, must be >= 0: %s", pod.Annotations[annotationEnvoyProxyConcurrency]) + } else { + cmd = append(cmd, "--concurrency", pod.Annotations[annotationEnvoyProxyConcurrency]) + } + } else { + // Use the default concurrency. + cmd = append(cmd, "--concurrency", fmt.Sprintf("%d", w.DefaultEnvoyProxyConcurrency)) + } + + extraArgs, annotationSet := pod.Annotations[annotationEnvoyExtraArgs] + + if annotationSet || w.EnvoyExtraArgs != "" { + extraArgsToUse := w.EnvoyExtraArgs + + // Prefer args set by pod annotation over the flag to the consul-k8s binary (h.EnvoyExtraArgs). + if annotationSet { + extraArgsToUse = extraArgs + } + + // Split string into tokens. + // e.g. "--foo bar --boo baz" --> ["--foo", "bar", "--boo", "baz"] + tokens, err := shlex.Split(extraArgsToUse) + if err != nil { + return []string{}, err + } + for _, t := range tokens { + if strings.Contains(t, " ") { + t = strconv.Quote(t) + } + cmd = append(cmd, t) + } + } + return cmd, nil +} + +func (w *MeshWebhook) envoySidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { + resources := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + } + // zeroQuantity is used for comparison to see if a quantity was explicitly + // set. + var zeroQuantity resource.Quantity + + // NOTE: We only want to set the limit/request if the default or annotation + // was explicitly set. If it's not explicitly set, it will be the zero value + // which would show up in the pod spec as being explicitly set to zero if we + // set that key, e.g. "cpu" to zero. + // We want it to not show up in the pod spec at all if if it's not explicitly + // set so that users aren't wondering why it's set to 0 when they didn't specify + // a request/limit. If they have explicitly set it to 0 then it will be set + // to 0 in the pod spec because we're doing a comparison to the zero-valued + // struct. + + // CPU Limit. + if anno, ok := pod.Annotations[annotationSidecarProxyCPULimit]; ok { + cpuLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyCPULimit, anno, err) + } + resources.Limits[corev1.ResourceCPU] = cpuLimit + } else if w.DefaultProxyCPULimit != zeroQuantity { + resources.Limits[corev1.ResourceCPU] = w.DefaultProxyCPULimit + } + + // CPU Request. + if anno, ok := pod.Annotations[annotationSidecarProxyCPURequest]; ok { + cpuRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyCPURequest, anno, err) + } + resources.Requests[corev1.ResourceCPU] = cpuRequest + } else if w.DefaultProxyCPURequest != zeroQuantity { + resources.Requests[corev1.ResourceCPU] = w.DefaultProxyCPURequest + } + + // Memory Limit. + if anno, ok := pod.Annotations[annotationSidecarProxyMemoryLimit]; ok { + memoryLimit, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyMemoryLimit, anno, err) + } + resources.Limits[corev1.ResourceMemory] = memoryLimit + } else if w.DefaultProxyMemoryLimit != zeroQuantity { + resources.Limits[corev1.ResourceMemory] = w.DefaultProxyMemoryLimit + } + + // Memory Request. + if anno, ok := pod.Annotations[annotationSidecarProxyMemoryRequest]; ok { + memoryRequest, err := resource.ParseQuantity(anno) + if err != nil { + return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", annotationSidecarProxyMemoryRequest, anno, err) + } + resources.Requests[corev1.ResourceMemory] = memoryRequest + } else if w.DefaultProxyMemoryRequest != zeroQuantity { + resources.Requests[corev1.ResourceMemory] = w.DefaultProxyMemoryRequest + } + + return resources, nil +} + +// useProxyHealthCheck returns true if the pod has the annotation 'consul.hashicorp.com/use-proxy-health-check' +// set to truthy values. +func useProxyHealthCheck(pod corev1.Pod) bool { + if v, ok := pod.Annotations[annotationUseProxyHealthCheck]; ok { + useProxyHealthCheck, err := strconv.ParseBool(v) + if err != nil { + return false + } + return useProxyHealthCheck + } + return false +} diff --git a/control-plane/connect-inject/envoy_sidecar_test.go b/control-plane/connect-inject/envoy_sidecar_test.go new file mode 100644 index 0000000000..43c464ed7f --- /dev/null +++ b/control-plane/connect-inject/envoy_sidecar_test.go @@ -0,0 +1,668 @@ +package connectinject + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func TestHandlerEnvoySidecar(t *testing.T) { + cases := map[string]struct { + annotations map[string]string + expCommand []string + expPort *corev1.ContainerPort + expErr string + }{ + "default settings, no annotations": { + annotations: map[string]string{ + annotationService: "foo", + }, + expCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + }, + }, + "default settings, annotation override": { + annotations: map[string]string{ + annotationService: "foo", + annotationEnvoyProxyConcurrency: "42", + }, + expCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "42", + }, + }, + "default settings, proxy health check annotations": { + annotations: map[string]string{ + annotationService: "foo", + annotationUseProxyHealthCheck: "true", + }, + expCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + }, + expPort: &corev1.ContainerPort{ + Name: "proxy-health-0", + ContainerPort: int32(proxyDefaultHealthPort), + }, + }, + "default settings, invalid concurrency annotation negative number": { + annotations: map[string]string{ + annotationService: "foo", + annotationEnvoyProxyConcurrency: "-42", + }, + expErr: "invalid envoy concurrency, must be >= 0: -42", + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + h := MeshWebhook{} + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := h.envoySidecar(testNS, pod, multiPortInfo{}) + if c.expErr != "" { + require.Contains(t, err.Error(), c.expErr) + } else { + require.NoError(t, err) + require.Equal(t, c.expCommand, container.Command) + require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }) + if c.expPort != nil { + require.Contains(t, container.Ports, *c.expPort) + } + } + }) + } +} + +func TestHandlerEnvoySidecar_Multiport(t *testing.T) { + w := MeshWebhook{} + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "web,web-admin", + annotationUseProxyHealthCheck: "true", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + { + Name: "web-admin", + }, + }, + }, + } + multiPortInfos := []multiPortInfo{ + { + serviceIndex: 0, + serviceName: "web", + }, + { + serviceIndex: 1, + serviceName: "web-admin", + }, + } + expCommand := map[int][]string{ + 0: {"envoy", "--config-path", "/consul/connect-inject/envoy-bootstrap-web.yaml", "--base-id", "0", "--concurrency", "0"}, + 1: {"envoy", "--config-path", "/consul/connect-inject/envoy-bootstrap-web-admin.yaml", "--base-id", "1", "--concurrency", "0"}, + } + expPorts := map[int]corev1.ContainerPort{ + 0: { + Name: "proxy-health-0", + ContainerPort: int32(proxyDefaultHealthPort), + }, + 1: { + Name: "proxy-health-1", + ContainerPort: int32(proxyDefaultHealthPort + 1), + }, + } + for i := 0; i < 2; i++ { + container, err := w.envoySidecar(testNS, pod, multiPortInfos[i]) + require.NoError(t, err) + require.Equal(t, expCommand[i], container.Command) + + require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/consul/connect-inject", + }, + }) + + require.Contains(t, container.Ports, expPorts[i]) + } +} + +func TestHandlerEnvoySidecar_withSecurityContext(t *testing.T) { + cases := map[string]struct { + tproxyEnabled bool + openShiftEnabled bool + expSecurityContext *corev1.SecurityContext + }{ + "tproxy disabled; openshift disabled": { + tproxyEnabled: false, + openShiftEnabled: false, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + RunAsGroup: pointer.Int64(envoyUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + }, + }, + "tproxy enabled; openshift disabled": { + tproxyEnabled: true, + openShiftEnabled: false, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + RunAsGroup: pointer.Int64(envoyUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + }, + }, + "tproxy disabled; openshift enabled": { + tproxyEnabled: false, + openShiftEnabled: true, + expSecurityContext: nil, + }, + "tproxy enabled; openshift enabled": { + tproxyEnabled: true, + openShiftEnabled: true, + expSecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + RunAsGroup: pointer.Int64(envoyUserAndGroupID), + RunAsNonRoot: pointer.Bool(true), + ReadOnlyRootFilesystem: pointer.Bool(true), + }, + }, + } + for name, c := range cases { + t.Run(name, func(t *testing.T) { + w := MeshWebhook{ + EnableTransparentProxy: c.tproxyEnabled, + EnableOpenShift: c.openShiftEnabled, + } + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationService: "foo", + }, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + ec, err := w.envoySidecar(testNS, pod, multiPortInfo{}) + require.NoError(t, err) + require.Equal(t, c.expSecurityContext, ec.SecurityContext) + }) + } +} + +// Test that if the user specifies a pod security context with the same uid as `envoyUserAndGroupID` that we return +// an error to the meshWebhook. +func TestHandlerEnvoySidecar_FailsWithDuplicatePodSecurityContextUID(t *testing.T) { + require := require.New(t) + w := MeshWebhook{} + pod := corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + }, + }, + } + _, err := w.envoySidecar(testNS, pod, multiPortInfo{}) + require.Error(err, fmt.Sprintf("pod security context cannot have the same uid as envoy: %v", envoyUserAndGroupID)) +} + +// Test that if the user specifies a container with security context with the same uid as `envoyUserAndGroupID` that we +// return an error to the meshWebhook. If a container using the envoy image has the same uid, we don't return an error +// because in multiport pod there can be multiple envoy sidecars. +func TestHandlerEnvoySidecar_FailsWithDuplicateContainerSecurityContextUID(t *testing.T) { + cases := []struct { + name string + pod corev1.Pod + webhook MeshWebhook + expErr bool + expErrMessage error + }{ + { + name: "fails with non envoy image", + pod: corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + // Setting RunAsUser: 1 should succeed. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(1), + }, + }, + { + Name: "app", + // Setting RunAsUser: 5995 should fail. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + }, + Image: "not-envoy", + }, + }, + }, + }, + webhook: MeshWebhook{}, + expErr: true, + expErrMessage: fmt.Errorf("container app has runAsUser set to the same uid %q as envoy which is not allowed", envoyUserAndGroupID), + }, + { + name: "doesn't fail with envoy image", + pod: corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + // Setting RunAsUser: 1 should succeed. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(1), + }, + }, + { + Name: "sidecar", + // Setting RunAsUser: 5995 should succeed if the image matches h.ImageEnvoy. + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(envoyUserAndGroupID), + }, + Image: "envoy", + }, + }, + }, + }, + webhook: MeshWebhook{ + ImageEnvoy: "envoy", + }, + expErr: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.webhook.envoySidecar(testNS, tc.pod, multiPortInfo{}) + if tc.expErr { + require.Error(t, err, tc.expErrMessage) + } else { + require.NoError(t, err) + } + }) + } +} + +// Test that we can pass extra args to envoy via the extraEnvoyArgs flag +// or via pod annotations. When arguments are passed in both ways, the +// arguments set via pod annotations are used. +func TestHandlerEnvoySidecar_EnvoyExtraArgs(t *testing.T) { + cases := []struct { + name string + envoyExtraArgs string + pod *corev1.Pod + expectedContainerCommand []string + }{ + { + name: "no extra options provided", + envoyExtraArgs: "", + pod: &corev1.Pod{}, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + }, + }, + { + name: "via flag: extra log-level option", + envoyExtraArgs: "--log-level debug", + pod: &corev1.Pod{}, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + "--log-level", "debug", + }, + }, + { + name: "via flag: multiple arguments with quotes", + envoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + pod: &corev1.Pod{}, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + "--log-level", "debug", + "--admin-address-path", "\"/tmp/consul/foo bar\"", + }, + }, + { + name: "via annotation: multiple arguments with quotes", + envoyExtraArgs: "", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + }, + }, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + "--log-level", "debug", + "--admin-address-path", "\"/tmp/consul/foo bar\"", + }, + }, + { + name: "via flag and annotation: should prefer setting via the annotation", + envoyExtraArgs: "this should be overwritten", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + }, + }, + }, + expectedContainerCommand: []string{ + "envoy", + "--config-path", "/consul/connect-inject/envoy-bootstrap.yaml", + "--concurrency", "0", + "--log-level", "debug", + "--admin-address-path", "\"/tmp/consul/foo bar\"", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h := MeshWebhook{ + ImageConsul: "hashicorp/consul:latest", + ImageEnvoy: "hashicorp/consul-k8s:latest", + EnvoyExtraArgs: tc.envoyExtraArgs, + } + + c, err := h.envoySidecar(testNS, *tc.pod, multiPortInfo{}) + require.NoError(t, err) + require.Equal(t, tc.expectedContainerCommand, c.Command) + }) + } +} + +func TestHandlerEnvoySidecar_UserVolumeMounts(t *testing.T) { + cases := []struct { + name string + pod corev1.Pod + expectedContainerVolumeMounts []corev1.VolumeMount + expErr string + }{ + { + name: "able to set a sidecar container volume mount via annotation", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + annotationConsulSidecarUserVolumeMount: "[{\"name\": \"tls-cert\", \"mountPath\": \"/custom/path\"}, {\"name\": \"tls-ca\", \"mountPath\": \"/custom/path2\"}]", + }, + }, + }, + expectedContainerVolumeMounts: []corev1.VolumeMount{ + { + Name: "consul-connect-inject-data", + MountPath: "/consul/connect-inject", + }, + { + Name: "tls-cert", + MountPath: "/custom/path", + }, + { + Name: "tls-ca", + MountPath: "/custom/path2", + }, + }, + }, + { + name: "invalid annotation results in error", + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", + annotationConsulSidecarUserVolumeMount: "[abcdefg]", + }, + }, + }, + expErr: "invalid character 'a' looking ", + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h := MeshWebhook{ + ImageConsul: "hashicorp/consul:latest", + ImageEnvoy: "hashicorp/consul-k8s:latest", + } + c, err := h.envoySidecar(testNS, tc.pod, multiPortInfo{}) + if tc.expErr == "" { + require.NoError(t, err) + require.Equal(t, tc.expectedContainerVolumeMounts, c.VolumeMounts) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expErr) + } + }) + } +} + +func TestHandlerEnvoySidecar_Resources(t *testing.T) { + mem1 := resource.MustParse("100Mi") + mem2 := resource.MustParse("200Mi") + cpu1 := resource.MustParse("100m") + cpu2 := resource.MustParse("200m") + zero := resource.MustParse("0") + + cases := map[string]struct { + webhook MeshWebhook + annotations map[string]string + expResources corev1.ResourceRequirements + expErr string + }{ + "no defaults, no annotations": { + webhook: MeshWebhook{}, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + "all defaults, no annotations": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: cpu1, + DefaultProxyCPULimit: cpu2, + DefaultProxyMemoryRequest: mem1, + DefaultProxyMemoryLimit: mem2, + }, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "no defaults, all annotations": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyCPURequest: "100m", + annotationSidecarProxyMemoryRequest: "100Mi", + annotationSidecarProxyCPULimit: "200m", + annotationSidecarProxyMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "annotations override defaults": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: zero, + DefaultProxyCPULimit: zero, + DefaultProxyMemoryRequest: zero, + DefaultProxyMemoryLimit: zero, + }, + annotations: map[string]string{ + annotationSidecarProxyCPURequest: "100m", + annotationSidecarProxyMemoryRequest: "100Mi", + annotationSidecarProxyCPULimit: "200m", + annotationSidecarProxyMemoryLimit: "200Mi", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: cpu2, + corev1.ResourceMemory: mem2, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: cpu1, + corev1.ResourceMemory: mem1, + }, + }, + }, + "defaults set to zero, no annotations": { + webhook: MeshWebhook{ + DefaultProxyCPURequest: zero, + DefaultProxyCPULimit: zero, + DefaultProxyMemoryRequest: zero, + DefaultProxyMemoryLimit: zero, + }, + annotations: nil, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "annotations set to 0": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyCPURequest: "0", + annotationSidecarProxyMemoryRequest: "0", + annotationSidecarProxyCPULimit: "0", + annotationSidecarProxyMemoryLimit: "0", + }, + expResources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: zero, + corev1.ResourceMemory: zero, + }, + }, + }, + "invalid cpu request": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyCPURequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-request:\"invalid\": quantities must match the regular expression", + }, + "invalid cpu limit": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyCPULimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-limit:\"invalid\": quantities must match the regular expression", + }, + "invalid memory request": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyMemoryRequest: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-request:\"invalid\": quantities must match the regular expression", + }, + "invalid memory limit": { + webhook: MeshWebhook{}, + annotations: map[string]string{ + annotationSidecarProxyMemoryLimit: "invalid", + }, + expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-limit:\"invalid\": quantities must match the regular expression", + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + } + container, err := c.webhook.envoySidecar(testNS, pod, multiPortInfo{}) + if c.expErr != "" { + require.NotNil(err) + require.Contains(err.Error(), c.expErr) + } else { + require.NoError(err) + require.Equal(c.expResources, container.Resources) + } + }) + } +} diff --git a/control-plane/connect-inject/webhook/health_checks_test.go b/control-plane/connect-inject/health_checks_test.go similarity index 98% rename from control-plane/connect-inject/webhook/health_checks_test.go rename to control-plane/connect-inject/health_checks_test.go index 9279d8f140..8816e6e5bb 100644 --- a/control-plane/connect-inject/webhook/health_checks_test.go +++ b/control-plane/connect-inject/health_checks_test.go @@ -1,4 +1,4 @@ -package webhook +package connectinject import ( "os" diff --git a/control-plane/connect-inject/webhook/heath_checks.go b/control-plane/connect-inject/heath_checks.go similarity index 95% rename from control-plane/connect-inject/webhook/heath_checks.go rename to control-plane/connect-inject/heath_checks.go index 42d6da08e1..9e423e5ab2 100644 --- a/control-plane/connect-inject/webhook/heath_checks.go +++ b/control-plane/connect-inject/heath_checks.go @@ -1,4 +1,4 @@ -package webhook +package connectinject import ( "errors" diff --git a/control-plane/connect-inject/webhook/mesh_webhook.go b/control-plane/connect-inject/mesh_webhook.go similarity index 78% rename from control-plane/connect-inject/webhook/mesh_webhook.go rename to control-plane/connect-inject/mesh_webhook.go index 503d3182b4..54c458af7f 100644 --- a/control-plane/connect-inject/webhook/mesh_webhook.go +++ b/control-plane/connect-inject/mesh_webhook.go @@ -1,4 +1,4 @@ -package webhook +package connectinject import ( "context" @@ -9,15 +9,12 @@ import ( "path/filepath" "strconv" "strings" + "time" mapset "github.com/deckarep/golang-set" "github.com/go-logr/logr" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" - "github.com/hashicorp/consul-k8s/control-plane/version" + "github.com/hashicorp/consul/api" "gomodules.xyz/jsonpatch/v2" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -28,42 +25,21 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) -const ( - sidecarContainer = "consul-dataplane" - - // exposedPathsLivenessPortsRangeStart is the start of the port range that we will use as - // the ListenerPort for the Expose configuration of the proxy registration for a liveness probe. - exposedPathsLivenessPortsRangeStart = 20300 - - // exposedPathsReadinessPortsRangeStart is the start of the port range that we will use as - // the ListenerPort for the Expose configuration of the proxy registration for a readiness probe. - exposedPathsReadinessPortsRangeStart = 20400 - - // exposedPathsStartupPortsRangeStart is the start of the port range that we will use as - // the ListenerPort for the Expose configuration of the proxy registration for a startup probe. - exposedPathsStartupPortsRangeStart = 20500 -) - // kubeSystemNamespaces is a set of namespaces that are considered // "system" level namespaces and are always skipped (never injected). var kubeSystemNamespaces = mapset.NewSetWith(metav1.NamespaceSystem, metav1.NamespacePublic) -// MeshWebhook is the HTTP meshWebhook for admission webhooks. +// Webhook is the HTTP meshWebhook for admission webhooks. type MeshWebhook struct { - Clientset kubernetes.Interface - - // ConsulClientConfig is the config to create a Consul API client. - ConsulConfig *consul.Config - - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager + ConsulClient *api.Client + Clientset kubernetes.Interface // ImageConsul is the container image for Consul to use. - // ImageConsulDataplane is the container image for Envoy to use. + // ImageEnvoy is the container image for Envoy to use. // // Both of these MUST be set. - ImageConsul string - ImageConsulDataplane string + ImageConsul string + ImageEnvoy string // ImageConsulK8S is the container image for consul-k8s to use. // This image is used for the consul-sidecar container. @@ -86,17 +62,6 @@ type MeshWebhook struct { // If not set, will use HTTP. ConsulCACert string - // TLSEnabled indicates whether we should use TLS for communicating to Consul. - TLSEnabled bool - - // ConsulAddress is the address of the Consul server. This should be only the - // host (i.e. not including port or protocol). - ConsulAddress string - - // ConsulTLSServerName is the SNI header to use to connect to the Consul servers - // over TLS. - ConsulTLSServerName string - // ConsulPartition is the name of the Admin Partition that the controller // is deployed in. It is an enterprise feature requiring Consul Enterprise 1.11+. // Its value is an empty string if partitions aren't enabled. @@ -154,7 +119,7 @@ type MeshWebhook struct { // MetricsConfig contains metrics configuration from the inject-connect command and has methods to determine whether // configuration should come from the default flags or annotations. The meshWebhook uses this to configure prometheus // annotations and the merged metrics server. - MetricsConfig metrics.Config + MetricsConfig MetricsConfig // Resource settings for init container. All of these fields // will be populated by the defaults provided in the initial flags. @@ -182,27 +147,26 @@ type MeshWebhook struct { // from mesh services. EnableConsulDNS bool + // ResourcePrefix is the prefix used for the installation which is used to determine the Service + // name of the Consul DNS service. + ResourcePrefix string + // EnableOpenShift indicates that when tproxy is enabled, the security context for the Envoy and init // containers should not be added because OpenShift sets a random user for those and will not allow // those containers to be created otherwise. EnableOpenShift bool - // SkipServerWatch prevents consul-dataplane from consuming the server update stream. This is useful - // for situations where Consul servers are behind a load balancer. - SkipServerWatch bool - - // ReleaseNamespace is the Kubernetes namespace where this webhook is running. - ReleaseNamespace string + // ConsulAPITimeout is the duration that the consul API client will + // wait for a response from the API before cancelling the request. + ConsulAPITimeout time.Duration // Log Log logr.Logger - // Log settings for consul-dataplane and connect-init containers. + // Log settings for consul-sidecar LogLevel string LogJSON bool decoder *admission.Decoder - // etcResolvFile is only used in tests to stub out /etc/resolv.conf file. - etcResolvFile string } type multiPortInfo struct { serviceIndex int @@ -228,6 +192,11 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi return admission.Errored(http.StatusBadRequest, err) } + if err := w.validatePod(pod); err != nil { + w.Log.Error(err, "error validating pod", "request name", req.Name) + return admission.Errored(http.StatusBadRequest, err) + } + // Setup the default annotation values that are used for the container. // This MUST be done before shouldInject is called since that function // uses these annotations. @@ -255,9 +224,9 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi w.injectVolumeMount(pod) // Optionally add any volumes that are to be used by the envoy sidecar. - if _, ok := pod.Annotations[constants.AnnotationConsulSidecarUserVolume]; ok { + if _, ok := pod.Annotations[annotationConsulSidecarUserVolume]; ok { var userVolumes []corev1.Volume - err := json.Unmarshal([]byte(pod.Annotations[constants.AnnotationConsulSidecarUserVolume]), &userVolumes) + err := json.Unmarshal([]byte(pod.Annotations[annotationConsulSidecarUserVolume]), &userVolumes) if err != nil { return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error unmarshalling sidecar user volumes: %s", err)) } @@ -275,6 +244,10 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, containerEnvVars...) } + // Add the init container which copies the Consul binary to /consul/connect-inject/. + initCopyContainer := w.initCopyContainer() + pod.Spec.InitContainers = append(pod.Spec.InitContainers, initCopyContainer) + // A user can enable/disable tproxy for an entire namespace via a label. ns, err := w.Clientset.CoreV1().Namespaces().Get(ctx, req.Namespace, metav1.GetOptions{}) if err != nil { @@ -298,7 +271,7 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi pod.Spec.InitContainers = append(pod.Spec.InitContainers, initContainer) // Add the Envoy sidecar. - envoySidecar, err := w.consulDataplaneSidecar(*ns, pod, multiPortInfo{}) + envoySidecar, err := w.envoySidecar(*ns, pod, multiPortInfo{}) if err != nil { w.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection sidecar container: %s", err)) @@ -368,7 +341,7 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi pod.Spec.InitContainers = append(pod.Spec.InitContainers, initContainer) // Add the Envoy sidecar. - envoySidecar, err := w.consulDataplaneSidecar(*ns, pod, mpi) + envoySidecar, err := w.envoySidecar(*ns, pod, mpi) if err != nil { w.Log.Error(err, "error configuring injection sidecar container", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring injection sidecar container: %s", err)) @@ -377,29 +350,40 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi } } + // Now that the consul-sidecar no longer needs to re-register services periodically + // (that functionality lives in the endpoints-controller), + // we only need the consul sidecar to run the metrics merging server. + // First, determine if we need to run the metrics merging server. + shouldRunMetricsMerging, err := w.MetricsConfig.shouldRunMergedMetricsServer(pod) + if err != nil { + w.Log.Error(err, "error determining if metrics merging server should be run", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error determining if metrics merging server should be run: %s", err)) + } + + // Add the consul-sidecar only if we need to run the metrics merging server. + if shouldRunMetricsMerging { + consulSidecar, err := w.consulSidecar(pod) + if err != nil { + w.Log.Error(err, "error configuring consul sidecar container", "request name", req.Name) + return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring consul sidecar container: %s", err)) + } + pod.Spec.Containers = append(pod.Spec.Containers, consulSidecar) + } + // pod.Annotations has already been initialized by h.defaultAnnotations() // and does not need to be checked for being a nil value. - pod.Annotations[constants.KeyInjectStatus] = constants.Injected + pod.Annotations[keyInjectStatus] = injected - tproxyEnabled, err := common.TransparentProxyEnabled(*ns, pod, w.EnableTransparentProxy) + tproxyEnabled, err := transparentProxyEnabled(*ns, pod, w.EnableTransparentProxy) if err != nil { w.Log.Error(err, "error determining if transparent proxy is enabled", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error determining if transparent proxy is enabled: %s", err)) } // Add an annotation to the pod sets transparent-proxy-status to enabled or disabled. Used by the CNI plugin - // to determine if it should traffic redirect or not. + // to determine if it should traffic redirect or not if tproxyEnabled { - pod.Annotations[constants.KeyTransparentProxyStatus] = constants.Enabled - } - - // If tproxy with DNS redirection is enabled, we want to configure dns on the pod. - if tproxyEnabled && w.EnableConsulDNS { - if err = w.configureDNS(&pod, req.Namespace); err != nil { - w.Log.Error(err, "error configuring DNS on the pod", "request name", req.Name) - return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring DNS on the pod: %s", err)) - } - + pod.Annotations[keyTransparentProxyStatus] = enabled } // Add annotations for metrics. @@ -411,15 +395,15 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi if pod.Labels == nil { pod.Labels = make(map[string]string) } - pod.Labels[constants.KeyInjectStatus] = constants.Injected + pod.Labels[keyInjectStatus] = injected // Add the managed-by label since services are now managed by endpoints controller. This is to support upgrading // from consul-k8s without Endpoints controller to consul-k8s with Endpoints controller. - pod.Labels[constants.KeyManagedBy] = constants.ManagedByValue + pod.Labels[keyManagedBy] = managedByValue // Consul-ENT only: Add the Consul destination namespace as an annotation to the pod. if w.EnableNamespaces { - pod.Annotations[constants.AnnotationConsulNamespace] = w.consulNamespace(req.Namespace) + pod.Annotations[annotationConsulNamespace] = w.consulNamespace(req.Namespace) } // Overwrite readiness/liveness probes if needed. @@ -432,7 +416,8 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi // When CNI and tproxy are enabled, we add an annotation to the pod that contains the iptables config so that the CNI // plugin can apply redirect traffic rules on the pod. if w.EnableCNI && tproxyEnabled { - if err = w.addRedirectTrafficConfigAnnotation(&pod, *ns); err != nil { + if err := w.addRedirectTrafficConfigAnnotation(&pod, *ns); err != nil { + // todo: update this error message w.Log.Error(err, "error configuring annotation for CNI traffic redirection", "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error configuring annotation for CNI traffic redirection: %s", err)) } @@ -456,19 +441,7 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi // all patches are created to guarantee no errors were encountered in // that process before modifying the Consul cluster. if w.EnableNamespaces { - serverState, err := w.ConsulServerConnMgr.State() - if err != nil { - w.Log.Error(err, "error checking or creating namespace", - "ns", w.consulNamespace(req.Namespace), "request name", req.Name) - return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking or creating namespace: %s", err)) - } - apiClient, err := consul.NewClientFromConnMgrState(w.ConsulConfig, serverState) - if err != nil { - w.Log.Error(err, "error checking or creating namespace", - "ns", w.consulNamespace(req.Namespace), "request name", req.Name) - return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking or creating namespace: %s", err)) - } - if _, err := namespaces.EnsureExists(apiClient, w.consulNamespace(req.Namespace), w.CrossNamespaceACLPolicy); err != nil { + if _, err := namespaces.EnsureExists(w.ConsulClient, w.consulNamespace(req.Namespace), w.CrossNamespaceACLPolicy); err != nil { w.Log.Error(err, "error checking or creating namespace", "ns", w.consulNamespace(req.Namespace), "request name", req.Name) return admission.Errored(http.StatusInternalServerError, fmt.Errorf("error checking or creating namespace: %s", err)) @@ -480,15 +453,25 @@ func (w *MeshWebhook) Handle(ctx context.Context, req admission.Request) admissi return admission.Patched(fmt.Sprintf("valid %s request", pod.Kind), patches...) } +// shouldOverwriteProbes returns true if we need to overwrite readiness/liveness probes for this pod. +// It returns an error when the annotation value cannot be parsed by strconv.ParseBool. +func shouldOverwriteProbes(pod corev1.Pod, globalOverwrite bool) (bool, error) { + if raw, ok := pod.Annotations[annotationTransparentProxyOverwriteProbes]; ok { + return strconv.ParseBool(raw) + } + + return globalOverwrite, nil +} + // overwriteProbes overwrites readiness/liveness probes of this pod when // both transparent proxy is enabled and overwrite probes is true for the pod. func (w *MeshWebhook) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) error { - tproxyEnabled, err := common.TransparentProxyEnabled(ns, *pod, w.EnableTransparentProxy) + tproxyEnabled, err := transparentProxyEnabled(ns, *pod, w.EnableTransparentProxy) if err != nil { return err } - overwriteProbes, err := common.ShouldOverwriteProbes(*pod, w.TProxyOverwriteProbes) + overwriteProbes, err := shouldOverwriteProbes(*pod, w.TProxyOverwriteProbes) if err != nil { return err } @@ -496,7 +479,7 @@ func (w *MeshWebhook) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) erro if tproxyEnabled && overwriteProbes { for i, container := range pod.Spec.Containers { // skip the "envoy-sidecar" container from having it's probes overridden - if container.Name == sidecarContainer { + if container.Name == envoySidecarContainer { continue } if container.LivenessProbe != nil && container.LivenessProbe.HTTPGet != nil { @@ -514,7 +497,7 @@ func (w *MeshWebhook) overwriteProbes(ns corev1.Namespace, pod *corev1.Pod) erro } func (w *MeshWebhook) injectVolumeMount(pod corev1.Pod) { - containersToInject := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationInjectMountVolumes, pod) + containersToInject := splitCommaSeparatedItemsFromAnnotation(annotationInjectMountVolumes, pod) for index, container := range pod.Spec.Containers { if sliceContains(containersToInject, container.Name) { @@ -544,14 +527,14 @@ func (w *MeshWebhook) shouldInject(pod corev1.Pod, namespace string) (bool, erro } // If we already injected then don't inject again - if pod.Annotations[constants.KeyInjectStatus] != "" { + if pod.Annotations[keyInjectStatus] != "" { return false, nil } // If the explicit true/false is on, then take that value. Note that // this has to be the last check since it sets a default value after // all other checks. - if raw, ok := pod.Annotations[constants.AnnotationInject]; ok { + if raw, ok := pod.Annotations[annotationInject]; ok { return strconv.ParseBool(raw) } @@ -564,19 +547,18 @@ func (w *MeshWebhook) defaultAnnotations(pod *corev1.Pod, podJson string) error } // Default service port is the first port exported in the container - if _, ok := pod.ObjectMeta.Annotations[constants.AnnotationPort]; !ok { + if _, ok := pod.ObjectMeta.Annotations[annotationPort]; !ok { if cs := pod.Spec.Containers; len(cs) > 0 { if ps := cs[0].Ports; len(ps) > 0 { if ps[0].Name != "" { - pod.Annotations[constants.AnnotationPort] = ps[0].Name + pod.Annotations[annotationPort] = ps[0].Name } else { - pod.Annotations[constants.AnnotationPort] = strconv.Itoa(int(ps[0].ContainerPort)) + pod.Annotations[annotationPort] = strconv.Itoa(int(ps[0].ContainerPort)) } } } } - pod.Annotations[constants.AnnotationOriginalPod] = podJson - pod.Annotations[constants.AnnotationConsulK8sVersion] = version.GetHumanVersion() + pod.Annotations[annotationOriginalPod] = podJson return nil } @@ -584,20 +566,20 @@ func (w *MeshWebhook) defaultAnnotations(pod *corev1.Pod, podJson string) error // prometheusAnnotations sets the Prometheus scraping configuration // annotations on the Pod. func (w *MeshWebhook) prometheusAnnotations(pod *corev1.Pod) error { - enableMetrics, err := w.MetricsConfig.EnableMetrics(*pod) + enableMetrics, err := w.MetricsConfig.enableMetrics(*pod) if err != nil { return err } - prometheusScrapePort, err := w.MetricsConfig.PrometheusScrapePort(*pod) + prometheusScrapePort, err := w.MetricsConfig.prometheusScrapePort(*pod) if err != nil { return err } - prometheusScrapePath := w.MetricsConfig.PrometheusScrapePath(*pod) + prometheusScrapePath := w.MetricsConfig.prometheusScrapePath(*pod) if enableMetrics { - pod.Annotations[constants.AnnotationPrometheusScrape] = "true" - pod.Annotations[constants.AnnotationPrometheusPort] = prometheusScrapePort - pod.Annotations[constants.AnnotationPrometheusPath] = prometheusScrapePath + pod.Annotations[annotationPrometheusScrape] = "true" + pod.Annotations[annotationPrometheusPort] = prometheusScrapePort + pod.Annotations[annotationPrometheusPath] = prometheusScrapePath } return nil } @@ -609,11 +591,39 @@ func (w *MeshWebhook) consulNamespace(ns string) string { return namespaces.ConsulNamespace(ns, w.EnableNamespaces, w.ConsulDestinationNamespace, w.EnableK8SNSMirroring, w.K8SNSMirroringPrefix) } -func findServiceAccountVolumeMount(pod corev1.Pod, multiPortSvcName string) (corev1.VolumeMount, string, error) { +func (w *MeshWebhook) validatePod(pod corev1.Pod) error { + if _, ok := pod.Annotations[annotationProtocol]; ok { + return fmt.Errorf("the %q annotation is no longer supported. Instead, create a ServiceDefaults resource (see www.consul.io/docs/k8s/crds/upgrade-to-crds)", + annotationProtocol) + } + + if _, ok := pod.Annotations[annotationSyncPeriod]; ok { + return fmt.Errorf("the %q annotation is no longer supported because consul-sidecar is no longer injected to periodically register services", annotationSyncPeriod) + } + return nil +} + +func portValue(pod corev1.Pod, value string) (int32, error) { + value = strings.Split(value, ",")[0] + // First search for the named port. + for _, c := range pod.Spec.Containers { + for _, p := range c.Ports { + if p.Name == value { + return p.ContainerPort, nil + } + } + } + + // Named port not found, return the parsed value. + raw, err := strconv.ParseInt(value, 0, 32) + return int32(raw), err +} + +func findServiceAccountVolumeMount(pod corev1.Pod, multiPort bool, multiPortSvcName string) (corev1.VolumeMount, string, error) { // In the case of a multiPort pod, there may be another service account // token mounted as a different volume. Its name must be -serviceaccount. // If not we'll fall back to the service account for the pod. - if multiPortSvcName != "" { + if multiPort { for _, v := range pod.Spec.Volumes { if v.Name == fmt.Sprintf("%s-service-account", multiPortSvcName) { mountPath := fmt.Sprintf("/consul/serviceaccount-%s", multiPortSvcName) @@ -648,22 +658,22 @@ func findServiceAccountVolumeMount(pod corev1.Pod, multiPortSvcName string) (cor func (w *MeshWebhook) annotatedServiceNames(pod corev1.Pod) []string { var annotatedSvcNames []string - if anno, ok := pod.Annotations[constants.AnnotationService]; ok { + if anno, ok := pod.Annotations[annotationService]; ok { annotatedSvcNames = strings.Split(anno, ",") } return annotatedSvcNames } func (w *MeshWebhook) checkUnsupportedMultiPortCases(ns corev1.Namespace, pod corev1.Pod) error { - tproxyEnabled, err := common.TransparentProxyEnabled(ns, pod, w.EnableTransparentProxy) + tproxyEnabled, err := transparentProxyEnabled(ns, pod, w.EnableTransparentProxy) if err != nil { return fmt.Errorf("couldn't check if tproxy is enabled: %s", err) } - metricsEnabled, err := w.MetricsConfig.EnableMetrics(pod) + metricsEnabled, err := w.MetricsConfig.enableMetrics(pod) if err != nil { return fmt.Errorf("couldn't check if metrics is enabled: %s", err) } - metricsMergingEnabled, err := w.MetricsConfig.EnableMetricsMerging(pod) + metricsMergingEnabled, err := w.MetricsConfig.enableMetricsMerging(pod) if err != nil { return fmt.Errorf("couldn't check if metrics merging is enabled: %s", err) } diff --git a/control-plane/connect-inject/webhook/mesh_webhook_ent_test.go b/control-plane/connect-inject/mesh_webhook_ent_test.go similarity index 88% rename from control-plane/connect-inject/webhook/mesh_webhook_ent_test.go rename to control-plane/connect-inject/mesh_webhook_ent_test.go index 34071a686c..7a34ee3d73 100644 --- a/control-plane/connect-inject/webhook/mesh_webhook_ent_test.go +++ b/control-plane/connect-inject/mesh_webhook_ent_test.go @@ -1,17 +1,17 @@ //go:build enterprise -package webhook +package connectinject import ( "context" "testing" + "time" "github.com/deckarep/golang-set" logrtest "github.com/go-logr/logr/testing" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" @@ -231,37 +231,47 @@ func TestHandler_MutateWithNamespaces(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - client := testClient.APIClient + require := require.New(t) - // Add the client config and watcher to the test's meshWebhook - tt.Webhook.ConsulConfig = testClient.Cfg - tt.Webhook.ConsulServerConnMgr = testClient.Watcher + // Set up consul server + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + a.WaitForSerfCheck(t) + defer a.Stop() + + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) + + // Add the client to the test's meshWebhook + tt.Webhook.ConsulClient = client // Mutate! resp := tt.Webhook.Handle(context.Background(), tt.Req) - require.Equal(t, resp.Allowed, true) + require.Equal(resp.Allowed, true) // Check all the namespace things // Check that we have the right number of namespaces namespaces, _, err := client.Namespaces().List(&api.QueryOptions{}) - require.NoError(t, err) - require.Len(t, namespaces, len(tt.ExpectedNamespaces)) + require.NoError(err) + require.Len(namespaces, len(tt.ExpectedNamespaces)) // Check the namespace details for _, ns := range tt.ExpectedNamespaces { actNamespace, _, err := client.Namespaces().Read(ns, &api.QueryOptions{}) - require.NoErrorf(t, err, "error getting namespace %s", ns) - require.NotNilf(t, actNamespace, "namespace %s was nil", ns) - require.Equalf(t, ns, actNamespace.Name, "namespace %s was improperly named", ns) + require.NoErrorf(err, "error getting namespace %s", ns) + require.NotNilf(actNamespace, "namespace %s was nil", ns) + require.Equalf(ns, actNamespace.Name, "namespace %s was improperly named", ns) // Check created namespace properties if ns != "default" { - require.Equalf(t, "Auto-generated by consul-k8s", actNamespace.Description, + require.Equalf("Auto-generated by consul-k8s", actNamespace.Description, "wrong namespace description for namespace %s", ns) - require.Containsf(t, actNamespace.Meta, "external-source", + require.Containsf(actNamespace.Meta, "external-source", "namespace %s does not contain external-source metadata key", ns) - require.Equalf(t, "kubernetes", actNamespace.Meta["external-source"], + require.Equalf("kubernetes", actNamespace.Meta["external-source"], "namespace %s has wrong value for external-source metadata key", ns) } @@ -486,16 +496,37 @@ func TestHandler_MutateWithNamespaces_ACLs(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { // Set up consul server - adminToken := "123e4567-e89b-12d3-a456-426614174000" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + a, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true - c.ACL.Tokens.InitialManagement = adminToken }) - client := testClient.APIClient + a.WaitForSerfCheck(t) + defer a.Stop() + + // Set up a client for bootstrapping + bootClient, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + // Bootstrap the server and get the bootstrap token + var bootstrapResp *api.ACLToken + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, t, func(r *retry.R) { + bootstrapResp, _, err = bootClient.ACL().Bootstrap() + require.NoError(r, err) + }) + bootstrapToken := bootstrapResp.SecretID + require.NotEmpty(t, bootstrapToken) + + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + Token: bootstrapToken, + }) + require.NoError(t, err) - // Add the client config and watcher to the test's meshWebhook - tt.Webhook.ConsulConfig = testClient.Cfg - tt.Webhook.ConsulServerConnMgr = testClient.Watcher + // Add the client to the test's meshWebhook + tt.Webhook.ConsulClient = client // Create cross namespace policy // This would have been created by the acl bootstrapper in the @@ -590,14 +621,24 @@ func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { for name, c := range cases { t.Run(name, func(t *testing.T) { - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) + require := require.New(t) + + // Set up consul server + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(err) + a.WaitForSerfCheck(t) + defer a.Stop() s := runtime.NewScheme() s.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &corev1.Pod{}) decoder, err := admission.NewDecoder(s) - require.NoError(t, err) + require.NoError(err) - require.NoError(t, err) + // Set up consul client + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(err) webhook := MeshWebhook{ Log: logrtest.TestLogger{T: t}, @@ -607,8 +648,7 @@ func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { ConsulDestinationNamespace: c.ConsulDestinationNamespace, EnableK8SNSMirroring: c.Mirroring, K8SNSMirroringPrefix: c.MirroringPrefix, - ConsulConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: client, decoder: decoder, Clientset: clientWithNamespace(sourceKubeNS), } @@ -632,21 +672,21 @@ func TestHandler_MutateWithNamespaces_Annotation(t *testing.T) { }, } resp := webhook.Handle(context.Background(), request) - require.Equal(t, resp.Allowed, true) + require.Equal(resp.Allowed, true) // Check that the annotation was added as a patch. var consulNamespaceAnnotationValue string for _, patch := range resp.Patches { if patch.Path == "/metadata/annotations" { for annotationName, annotationValue := range patch.Value.(map[string]interface{}) { - if annotationName == constants.AnnotationConsulNamespace { + if annotationName == annotationConsulNamespace { consulNamespaceAnnotationValue = annotationValue.(string) } } } } - require.NotEmpty(t, consulNamespaceAnnotationValue, "no namespace annotation set") - require.Equal(t, c.ExpNamespaceAnnotation, consulNamespaceAnnotationValue) + require.NotEmpty(consulNamespaceAnnotationValue, "no namespace annotation set") + require.Equal(c.ExpNamespaceAnnotation, consulNamespaceAnnotationValue) }) } } diff --git a/control-plane/connect-inject/webhook/mesh_webhook_test.go b/control-plane/connect-inject/mesh_webhook_test.go similarity index 80% rename from control-plane/connect-inject/webhook/mesh_webhook_test.go rename to control-plane/connect-inject/mesh_webhook_test.go index 602ba63239..cc7a9011c6 100644 --- a/control-plane/connect-inject/webhook/mesh_webhook_test.go +++ b/control-plane/connect-inject/mesh_webhook_test.go @@ -1,4 +1,4 @@ -package webhook +package connectinject import ( "context" @@ -8,11 +8,7 @@ import ( mapset "github.com/deckarep/golang-set" logrtest "github.com/go-logr/logr/testing" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" - "github.com/hashicorp/consul-k8s/control-plane/version" "github.com/stretchr/testify/require" "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" @@ -83,7 +79,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.KeyInjectStatus: constants.Injected, + keyInjectStatus: injected, }, }, Spec: basicSpec, @@ -151,7 +147,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationUpstreams: "echo:1234,db:1234", + annotationUpstreams: "echo:1234,db:1234", }, }, Spec: basicSpec, @@ -166,15 +162,11 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "add", @@ -210,7 +202,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationInject: "false", + annotationInject: "false", }, }, Spec: basicSpec, @@ -236,7 +228,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationInject: "t", + annotationInject: "t", }, }, Spec: basicSpec, @@ -259,15 +251,11 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "add", @@ -291,7 +279,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationInjectMountVolumes: "", + annotationInjectMountVolumes: "", }, }, Spec: basicSpec, @@ -314,15 +302,11 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "add", @@ -345,7 +329,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationInjectMountVolumes: "web,unknown,web_three_point_oh", + annotationInjectMountVolumes: "web,unknown,web_three_point_oh", }, }, Spec: corev1.PodSpec{ @@ -388,15 +372,11 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "add", @@ -419,7 +399,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationConsulSidecarUserVolume: "[{\"name\":\"bbb\",\"csi\":{\"driver\":\"bob\"}}]", + annotationConsulSidecarUserVolume: "[{\"name\":\"bbb\",\"csi\":{\"driver\":\"bob\"}}]", }, }, Spec: corev1.PodSpec{ @@ -448,15 +428,11 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "add", @@ -479,7 +455,7 @@ func TestHandlerHandle(t *testing.T) { Object: encodeRaw(t, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationConsulSidecarUserVolume: "[a]", + annotationConsulSidecarUserVolume: "[a]", }, }, Spec: corev1.PodSpec{ @@ -511,7 +487,7 @@ func TestHandlerHandle(t *testing.T) { Spec: basicSpec, ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "foo", + annotationService: "foo", }, }, }), @@ -533,15 +509,11 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "add", @@ -592,14 +564,93 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/labels/" + escapeJSONPointer(constants.KeyInjectStatus), + Path: "/metadata/labels/" + escapeJSONPointer(keyInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/labels/" + escapeJSONPointer(keyManagedBy), + }, + }, + }, + + { + "when metrics merging is enabled, we should inject the consul-sidecar and add prometheus annotations", + MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + MetricsConfig: MetricsConfig{ + DefaultEnableMetrics: true, + DefaultEnableMetricsMerging: true, + }, + decoder: decoder, + Clientset: defaultTestClientWithNamespace(), + }, + admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespaces.DefaultNamespace, + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "testLabel": "123", + }, + Annotations: map[string]string{ + annotationServiceMetricsPort: "1234", + }, + }, + Spec: basicSpec, + }), + }, + }, + "", + []jsonpatch.Operation{ + { + Operation: "add", + Path: "/spec/volumes", + }, + { + Operation: "add", + Path: "/spec/initContainers", + }, + { + Operation: "add", + Path: "/spec/containers/1", + }, + { + Operation: "add", + Path: "/spec/containers/2", + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(annotationPrometheusScrape), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(annotationPrometheusPath), + }, + { + Operation: "add", + Path: "/metadata/annotations/" + escapeJSONPointer(annotationPrometheusPort), + }, + { + Operation: "add", + Path: "/metadata/labels/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/labels/" + escapeJSONPointer(constants.KeyManagedBy), + Path: "/metadata/labels/" + escapeJSONPointer(keyManagedBy), }, }, }, + { "tproxy with overwriteProbes is enabled", MeshWebhook{ @@ -665,20 +716,16 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyTransparentProxyStatus), + Path: "/metadata/annotations/" + escapeJSONPointer(keyTransparentProxyStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "replace", @@ -707,7 +754,7 @@ func TestHandlerHandle(t *testing.T) { Spec: basicSpec, ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "web,web-admin", + annotationService: "web,web-admin", }, }, }), @@ -733,15 +780,11 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "add", @@ -766,7 +809,7 @@ func TestHandlerHandle(t *testing.T) { Spec: basicSpec, ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "web,web-admin", + annotationService: "web,web-admin", }, }, }), @@ -792,15 +835,11 @@ func TestHandlerHandle(t *testing.T) { }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.KeyInjectStatus), + Path: "/metadata/annotations/" + escapeJSONPointer(keyInjectStatus), }, { Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationOriginalPod), - }, - { - Operation: "add", - Path: "/metadata/annotations/" + escapeJSONPointer(constants.AnnotationConsulK8sVersion), + Path: "/metadata/annotations/" + escapeJSONPointer(annotationOriginalPod), }, { Operation: "add", @@ -812,14 +851,14 @@ func TestHandlerHandle(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { - tt.Webhook.ConsulConfig = &consul.Config{HTTPPort: 8500} + require := require.New(t) ctx := context.Background() resp := tt.Webhook.Handle(ctx, tt.Req) if (tt.Err == "") != resp.Allowed { t.Fatalf("allowed: %v, expected err: %v", resp.Allowed, tt.Err) } if tt.Err != "" { - require.Contains(t, resp.Result.Message, tt.Err) + require.Contains(resp.Result.Message, tt.Err) return } @@ -829,7 +868,73 @@ func TestHandlerHandle(t *testing.T) { actual[i].Value = nil } } - require.ElementsMatch(t, tt.Patches, actual) + require.ElementsMatch(tt.Patches, actual) + }) + } +} + +// Test that we error out when deprecated annotations are set. +func TestHandler_ErrorsOnDeprecatedAnnotations(t *testing.T) { + cases := []struct { + name string + annotations map[string]string + expErr string + }{ + { + "default protocol annotation", + map[string]string{ + annotationProtocol: "http", + }, + "the \"consul.hashicorp.com/connect-service-protocol\" annotation is no longer supported. Instead, create a ServiceDefaults resource (see www.consul.io/docs/k8s/crds/upgrade-to-crds)", + }, + { + "sync period annotation", + map[string]string{ + annotationSyncPeriod: "30s", + }, + "the \"consul.hashicorp.com/connect-sync-period\" annotation is no longer supported because consul-sidecar is no longer injected to periodically register services", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require := require.New(t) + s := runtime.NewScheme() + s.AddKnownTypes(schema.GroupVersion{ + Group: "", + Version: "v1", + }, &corev1.Pod{}) + decoder, err := admission.NewDecoder(s) + require.NoError(err) + + webhook := MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + } + + request := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: "default", + Object: encodeRaw(t, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: c.annotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + }, + }, + }, + }), + }, + } + + response := webhook.Handle(context.Background(), request) + require.False(response.Allowed) + require.Equal(c.expErr, response.Result.Message) }) } } @@ -845,8 +950,7 @@ func TestHandlerDefaultAnnotations(t *testing.T) { "empty", &corev1.Pod{}, map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":null},\"status\":{}}", - constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + annotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":null},\"status\":{}}", }, "", }, @@ -866,8 +970,7 @@ func TestHandlerDefaultAnnotations(t *testing.T) { }, }, map[string]string{ - constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", - constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + annotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", }, "", }, @@ -877,7 +980,7 @@ func TestHandlerDefaultAnnotations(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "foo", + annotationService: "foo", }, }, @@ -894,8 +997,7 @@ func TestHandlerDefaultAnnotations(t *testing.T) { }, map[string]string{ "consul.hashicorp.com/connect-service": "foo", - constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null,\"annotations\":{\"consul.hashicorp.com/connect-service\":\"foo\"}},\"spec\":{\"containers\":[{\"name\":\"web\",\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", - constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + annotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null,\"annotations\":{\"consul.hashicorp.com/connect-service\":\"foo\"}},\"spec\":{\"containers\":[{\"name\":\"web\",\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", }, "", @@ -922,9 +1024,8 @@ func TestHandlerDefaultAnnotations(t *testing.T) { }, }, map[string]string{ - constants.AnnotationPort: "http", - constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"ports\":[{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", - constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + annotationPort: "http", + annotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"ports\":[{\"name\":\"http\",\"containerPort\":8080}],\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", }, "", }, @@ -949,9 +1050,8 @@ func TestHandlerDefaultAnnotations(t *testing.T) { }, }, map[string]string{ - constants.AnnotationPort: "8080", - constants.AnnotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"ports\":[{\"containerPort\":8080}],\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", - constants.AnnotationConsulK8sVersion: version.GetHumanVersion(), + annotationPort: "8080", + annotationOriginalPod: "{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"containers\":[{\"name\":\"web\",\"ports\":[{\"containerPort\":8080}],\"resources\":{}},{\"name\":\"web-side\",\"resources\":{}}]},\"status\":{}}", }, "", }, @@ -959,8 +1059,10 @@ func TestHandlerDefaultAnnotations(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + podJson, err := json.Marshal(tt.Pod) - require.NoError(t, err) + require.NoError(err) var w MeshWebhook err = w.defaultAnnotations(tt.Pod, string(podJson)) @@ -968,7 +1070,7 @@ func TestHandlerDefaultAnnotations(t *testing.T) { t.Fatalf("actual: %v, expected err: %v", err, tt.Err) } if tt.Err != "" { - require.Contains(t, err.Error(), tt.Err) + require.Contains(err.Error(), tt.Err) return } @@ -976,7 +1078,7 @@ func TestHandlerDefaultAnnotations(t *testing.T) { if len(actual) == 0 { actual = nil } - require.Equal(t, tt.Expected, actual) + require.Equal(tt.Expected, actual) }) } } @@ -990,22 +1092,22 @@ func TestHandlerPrometheusAnnotations(t *testing.T) { { Name: "Sets the correct prometheus annotations on the pod if metrics are enabled", Webhook: MeshWebhook{ - MetricsConfig: metrics.Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: true, DefaultPrometheusScrapePort: "20200", DefaultPrometheusScrapePath: "/metrics", }, }, Expected: map[string]string{ - constants.AnnotationPrometheusScrape: "true", - constants.AnnotationPrometheusPort: "20200", - constants.AnnotationPrometheusPath: "/metrics", + annotationPrometheusScrape: "true", + annotationPrometheusPort: "20200", + annotationPrometheusPath: "/metrics", }, }, { Name: "Does not set annotations if metrics are not enabled", Webhook: MeshWebhook{ - MetricsConfig: metrics.Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: false, DefaultPrometheusScrapePort: "20200", DefaultPrometheusScrapePath: "/metrics", @@ -1029,6 +1131,93 @@ func TestHandlerPrometheusAnnotations(t *testing.T) { } } +// Test portValue function. +func TestHandlerPortValue(t *testing.T) { + cases := []struct { + Name string + Pod *corev1.Pod + Value string + Expected int32 + Err string + }{ + { + "empty", + &corev1.Pod{}, + "", + 0, + "strconv.ParseInt: parsing \"\": invalid syntax", + }, + + { + "basic pod, with ports", + &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 8080, + }, + }, + }, + + { + Name: "web-side", + }, + }, + }, + }, + "http", + int32(8080), + "", + }, + + { + "basic pod, with unnamed ports", + &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "web", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + }, + }, + }, + + { + Name: "web-side", + }, + }, + }, + }, + "8080", + int32(8080), + "", + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + require := require.New(t) + + port, err := portValue(*tt.Pod, tt.Value) + if (tt.Err != "") != (err != nil) { + t.Fatalf("actual: %v, expected err: %v", err, tt.Err) + } + if tt.Err != "" { + require.Contains(err.Error(), tt.Err) + return + } + + require.Equal(tt.Expected, port) + }) + } +} + // Test consulNamespace function. func TestConsulNamespace(t *testing.T) { cases := []struct { @@ -1146,7 +1335,7 @@ func TestShouldInject(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ // Service annotation is required for injection - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1161,7 +1350,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1176,7 +1365,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1191,7 +1380,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1206,7 +1395,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1221,7 +1410,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1236,7 +1425,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1251,7 +1440,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1266,7 +1455,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1281,7 +1470,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1296,7 +1485,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1311,7 +1500,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1326,7 +1515,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1341,7 +1530,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1356,7 +1545,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1371,7 +1560,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1386,7 +1575,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1401,7 +1590,7 @@ func TestShouldInject(t *testing.T) { &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - constants.AnnotationService: "testing", + annotationService: "testing", }, }, }, @@ -1582,7 +1771,7 @@ func TestOverwriteProbes(t *testing.T) { overwriteProbes: true, podContainers: []corev1.Container{ { - Name: sidecarContainer, + Name: envoySidecarContainer, }, }, }, @@ -1752,17 +1941,17 @@ func TestHandler_checkUnsupportedMultiPortCases(t *testing.T) { }{ { name: "tproxy", - annotations: map[string]string{constants.KeyTransparentProxy: "true"}, + annotations: map[string]string{keyTransparentProxy: "true"}, expErr: "multi port services are not compatible with transparent proxy", }, { name: "metrics", - annotations: map[string]string{constants.AnnotationEnableMetrics: "true"}, + annotations: map[string]string{annotationEnableMetrics: "true"}, expErr: "multi port services are not compatible with metrics", }, { name: "metrics merging", - annotations: map[string]string{constants.AnnotationEnableMetricsMerging: "true"}, + annotations: map[string]string{annotationEnableMetricsMerging: "true"}, expErr: "multi port services are not compatible with metrics merging", }, } diff --git a/control-plane/connect-inject/metrics/metrics_configuration.go b/control-plane/connect-inject/metrics_configuration.go similarity index 61% rename from control-plane/connect-inject/metrics/metrics_configuration.go rename to control-plane/connect-inject/metrics_configuration.go index 651fb87184..fc8c5d574a 100644 --- a/control-plane/connect-inject/metrics/metrics_configuration.go +++ b/control-plane/connect-inject/metrics_configuration.go @@ -1,19 +1,16 @@ -package metrics +package connectinject import ( "errors" "fmt" "strconv" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" corev1 "k8s.io/api/core/v1" ) -// Config represents configuration common to connect-inject components related to metrics. -type Config struct { +// MetricsConfig represents configuration common to connect-inject components related to metrics. +type MetricsConfig struct { DefaultEnableMetrics bool - EnableGatewayMetrics bool DefaultEnableMetricsMerging bool DefaultMergedMetricsPort string DefaultPrometheusScrapePort string @@ -30,10 +27,10 @@ const ( defaultServiceMetricsPath = "/metrics" ) -// MergedMetricsServerConfiguration is called when running a merged metrics server and used to return ports necessary to +// mergedMetricsServerConfiguration is called when running a merged metrics server and used to return ports necessary to // configure the merged metrics server. -func (mc Config) MergedMetricsServerConfiguration(pod corev1.Pod) (metricsPorts, error) { - run, err := mc.ShouldRunMergedMetricsServer(pod) +func (mc MetricsConfig) mergedMetricsServerConfiguration(pod corev1.Pod) (metricsPorts, error) { + run, err := mc.shouldRunMergedMetricsServer(pod) if err != nil { return metricsPorts{}, err } @@ -45,16 +42,16 @@ func (mc Config) MergedMetricsServerConfiguration(pod corev1.Pod) (metricsPorts, } // Configure consul sidecar with the appropriate metrics flags. - mergedMetricsPort, err := mc.MergedMetricsPort(pod) + mergedMetricsPort, err := mc.mergedMetricsPort(pod) if err != nil { return metricsPorts{}, err } // Don't need to check the error since it's checked in the call to - // mc.ShouldRunMergedMetricsServer() above. - serviceMetricsPort, _ := mc.ServiceMetricsPort(pod) + // mc.shouldRunMergedMetricsServer() above. + serviceMetricsPort, _ := mc.serviceMetricsPort(pod) - serviceMetricsPath := mc.ServiceMetricsPath(pod) + serviceMetricsPath := mc.serviceMetricsPath(pod) metricsPorts := metricsPorts{ mergedPort: mergedMetricsPort, @@ -64,112 +61,108 @@ func (mc Config) MergedMetricsServerConfiguration(pod corev1.Pod) (metricsPorts, return metricsPorts, nil } -// EnableMetrics returns whether metrics are enabled either via the default value in the meshWebhook, or if it's been +// enableMetrics returns whether metrics are enabled either via the default value in the meshWebhook, or if it's been // overridden via the annotation. -func (mc Config) EnableMetrics(pod corev1.Pod) (bool, error) { +func (mc MetricsConfig) enableMetrics(pod corev1.Pod) (bool, error) { enabled := mc.DefaultEnableMetrics - if raw, ok := pod.Annotations[constants.AnnotationEnableMetrics]; ok && raw != "" { + if raw, ok := pod.Annotations[annotationEnableMetrics]; ok && raw != "" { enableMetrics, err := strconv.ParseBool(raw) if err != nil { - return false, fmt.Errorf("%s annotation value of %s was invalid: %s", constants.AnnotationEnableMetrics, raw, err) + return false, fmt.Errorf("%s annotation value of %s was invalid: %s", annotationEnableMetrics, raw, err) } enabled = enableMetrics } return enabled, nil } -// EnableMetricsMerging returns whether metrics merging functionality is enabled either via the default value in the +// enableMetricsMerging returns whether metrics merging functionality is enabled either via the default value in the // meshWebhook, or if it's been overridden via the annotation. -func (mc Config) EnableMetricsMerging(pod corev1.Pod) (bool, error) { +func (mc MetricsConfig) enableMetricsMerging(pod corev1.Pod) (bool, error) { enabled := mc.DefaultEnableMetricsMerging - if raw, ok := pod.Annotations[constants.AnnotationEnableMetricsMerging]; ok && raw != "" { + if raw, ok := pod.Annotations[annotationEnableMetricsMerging]; ok && raw != "" { enableMetricsMerging, err := strconv.ParseBool(raw) if err != nil { - return false, fmt.Errorf("%s annotation value of %s was invalid: %s", constants.AnnotationEnableMetricsMerging, raw, err) + return false, fmt.Errorf("%s annotation value of %s was invalid: %s", annotationEnableMetricsMerging, raw, err) } enabled = enableMetricsMerging } return enabled, nil } -// MergedMetricsPort returns the port to run the merged metrics server on, either via the default value in the meshWebhook, +// mergedMetricsPort returns the port to run the merged metrics server on, either via the default value in the meshWebhook, // or if it's been overridden via the annotation. It also validates the port is in the unprivileged port range. -func (mc Config) MergedMetricsPort(pod corev1.Pod) (string, error) { - return determineAndValidatePort(pod, constants.AnnotationMergedMetricsPort, mc.DefaultMergedMetricsPort, false) +func (mc MetricsConfig) mergedMetricsPort(pod corev1.Pod) (string, error) { + return determineAndValidatePort(pod, annotationMergedMetricsPort, mc.DefaultMergedMetricsPort, false) } -// PrometheusScrapePort returns the port for Prometheus to scrape from, either via the default value in the meshWebhook, or +// prometheusScrapePort returns the port for Prometheus to scrape from, either via the default value in the meshWebhook, or // if it's been overridden via the annotation. It also validates the port is in the unprivileged port range. -func (mc Config) PrometheusScrapePort(pod corev1.Pod) (string, error) { - return determineAndValidatePort(pod, constants.AnnotationPrometheusScrapePort, mc.DefaultPrometheusScrapePort, false) +func (mc MetricsConfig) prometheusScrapePort(pod corev1.Pod) (string, error) { + return determineAndValidatePort(pod, annotationPrometheusScrapePort, mc.DefaultPrometheusScrapePort, false) } -// PrometheusScrapePath returns the path for Prometheus to scrape from, either via the default value in the meshWebhook, or +// prometheusScrapePath returns the path for Prometheus to scrape from, either via the default value in the meshWebhook, or // if it's been overridden via the annotation. -func (mc Config) PrometheusScrapePath(pod corev1.Pod) string { - if raw, ok := pod.Annotations[constants.AnnotationPrometheusScrapePath]; ok && raw != "" { +func (mc MetricsConfig) prometheusScrapePath(pod corev1.Pod) string { + if raw, ok := pod.Annotations[annotationPrometheusScrapePath]; ok && raw != "" { return raw } - if mc.DefaultPrometheusScrapePath == "" { - return defaultServiceMetricsPath - } - return mc.DefaultPrometheusScrapePath } -// ServiceMetricsPort returns the port the service exposes metrics on. This will +// serviceMetricsPort returns the port the service exposes metrics on. This will // default to the port used to register the service with Consul, and can be // overridden with the annotation if provided. -func (mc Config) ServiceMetricsPort(pod corev1.Pod) (string, error) { +func (mc MetricsConfig) serviceMetricsPort(pod corev1.Pod) (string, error) { // The annotationPort is the port used to register the service with Consul. // If that has been set, it'll be used as the port for getting service // metrics as well, unless overridden by the service-metrics-port annotation. - if raw, ok := pod.Annotations[constants.AnnotationPort]; ok && raw != "" { + if raw, ok := pod.Annotations[annotationPort]; ok && raw != "" { // The service metrics port can be privileged if the service author has // written their service in such a way that it expects to be able to use // privileged ports. So, the port metrics are exposed on the service can // be privileged. - return determineAndValidatePort(pod, constants.AnnotationServiceMetricsPort, raw, true) + return determineAndValidatePort(pod, annotationServiceMetricsPort, raw, true) } // If the annotationPort is not set, the serviceMetrics port will be 0 // unless overridden by the service-metrics-port annotation. If the service // metrics port is 0, the consul sidecar will not run a merged metrics // server. - return determineAndValidatePort(pod, constants.AnnotationServiceMetricsPort, "0", true) + return determineAndValidatePort(pod, annotationServiceMetricsPort, "0", true) } -// ServiceMetricsPath returns a default of /metrics, or overrides +// serviceMetricsPath returns a default of /metrics, or overrides // that with the annotation if provided. -func (mc Config) ServiceMetricsPath(pod corev1.Pod) string { - if raw, ok := pod.Annotations[constants.AnnotationServiceMetricsPath]; ok && raw != "" { +func (mc MetricsConfig) serviceMetricsPath(pod corev1.Pod) string { + if raw, ok := pod.Annotations[annotationServiceMetricsPath]; ok && raw != "" { return raw } return defaultServiceMetricsPath } -// ShouldRunMergedMetricsServer returns whether we need to run a merged metrics +// shouldRunMergedMetricsServer returns whether we need to run a merged metrics // server. This is used to configure the consul sidecar command, and the init // container, so it can pass appropriate arguments to the consul connect envoy // command. -func (mc Config) ShouldRunMergedMetricsServer(pod corev1.Pod) (bool, error) { - enableMetrics, err := mc.EnableMetrics(pod) +func (mc MetricsConfig) shouldRunMergedMetricsServer(pod corev1.Pod) (bool, error) { + enableMetrics, err := mc.enableMetrics(pod) if err != nil { return false, err } - enableMetricsMerging, err := mc.EnableMetricsMerging(pod) + enableMetricsMerging, err := mc.enableMetricsMerging(pod) if err != nil { return false, err } - serviceMetricsPort, err := mc.ServiceMetricsPort(pod) + serviceMetricsPort, err := mc.serviceMetricsPort(pod) if err != nil { return false, err } - // Don't need to check error here since ServiceMetricsPort has been - // validated by calling mc.ServiceMetricsPort above. + // Don't need to check error here since serviceMetricsPort has been + // validated by calling mc.serviceMetricsPort above. smp, _ := strconv.Atoi(serviceMetricsPort) if enableMetrics && enableMetricsMerging && smp > 0 { @@ -186,7 +179,7 @@ func (mc Config) ShouldRunMergedMetricsServer(pod corev1.Pod) (bool, error) { // unprivileged range of 1024-65535. func determineAndValidatePort(pod corev1.Pod, annotation string, defaultPort string, privileged bool) (string, error) { if raw, ok := pod.Annotations[annotation]; ok && raw != "" { - port, err := common.PortValue(pod, raw) + port, err := portValue(pod, raw) if err != nil { return "", fmt.Errorf("%s annotation value of %s is not a valid integer", annotation, raw) } @@ -203,7 +196,7 @@ func determineAndValidatePort(pod corev1.Pod, annotation string, defaultPort str // If the annotation does not exist, return the default. if defaultPort != "" { - port, err := common.PortValue(pod, defaultPort) + port, err := portValue(pod, defaultPort) if err != nil { return "", fmt.Errorf("%s is not a valid port on the pod %s", defaultPort, pod.Name) } diff --git a/control-plane/connect-inject/metrics/metrics_configuration_test.go b/control-plane/connect-inject/metrics_configuration_test.go similarity index 85% rename from control-plane/connect-inject/metrics/metrics_configuration_test.go rename to control-plane/connect-inject/metrics_configuration_test.go index 2f41b05744..9564b2190c 100644 --- a/control-plane/connect-inject/metrics/metrics_configuration_test.go +++ b/control-plane/connect-inject/metrics_configuration_test.go @@ -1,9 +1,8 @@ -package metrics +package connectinject import ( "testing" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" "github.com/hashicorp/consul-k8s/control-plane/namespaces" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -14,7 +13,7 @@ func TestMetricsConfigEnableMetrics(t *testing.T) { cases := []struct { Name string Pod func(*corev1.Pod) *corev1.Pod - MetricsConfig Config + MetricsConfig MetricsConfig Expected bool Err string }{ @@ -23,7 +22,7 @@ func TestMetricsConfigEnableMetrics(t *testing.T) { Pod: func(pod *corev1.Pod) *corev1.Pod { return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: true, }, Expected: true, @@ -32,10 +31,10 @@ func TestMetricsConfigEnableMetrics(t *testing.T) { { Name: "Metrics enabled via annotation", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationEnableMetrics] = "true" + pod.Annotations[annotationEnableMetrics] = "true" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: false, }, Expected: true, @@ -44,10 +43,10 @@ func TestMetricsConfigEnableMetrics(t *testing.T) { { Name: "Metrics configured via invalid annotation", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationEnableMetrics] = "not-a-bool" + pod.Annotations[annotationEnableMetrics] = "not-a-bool" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: false, }, Expected: false, @@ -60,7 +59,7 @@ func TestMetricsConfigEnableMetrics(t *testing.T) { require := require.New(t) mc := tt.MetricsConfig - actual, err := mc.EnableMetrics(*tt.Pod(minimal())) + actual, err := mc.enableMetrics(*tt.Pod(minimal())) if tt.Err == "" { require.Equal(tt.Expected, actual) @@ -76,7 +75,7 @@ func TestMetricsConfigEnableMetricsMerging(t *testing.T) { cases := []struct { Name string Pod func(*corev1.Pod) *corev1.Pod - MetricsConfig Config + MetricsConfig MetricsConfig Expected bool Err string }{ @@ -85,7 +84,7 @@ func TestMetricsConfigEnableMetricsMerging(t *testing.T) { Pod: func(pod *corev1.Pod) *corev1.Pod { return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetricsMerging: true, }, Expected: true, @@ -94,10 +93,10 @@ func TestMetricsConfigEnableMetricsMerging(t *testing.T) { { Name: "Metrics merging enabled via annotation", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationEnableMetricsMerging] = "true" + pod.Annotations[annotationEnableMetricsMerging] = "true" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetricsMerging: false, }, Expected: true, @@ -106,10 +105,10 @@ func TestMetricsConfigEnableMetricsMerging(t *testing.T) { { Name: "Metrics merging configured via invalid annotation", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationEnableMetricsMerging] = "not-a-bool" + pod.Annotations[annotationEnableMetricsMerging] = "not-a-bool" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetricsMerging: false, }, Expected: false, @@ -122,7 +121,7 @@ func TestMetricsConfigEnableMetricsMerging(t *testing.T) { require := require.New(t) mc := tt.MetricsConfig - actual, err := mc.EnableMetricsMerging(*tt.Pod(minimal())) + actual, err := mc.enableMetricsMerging(*tt.Pod(minimal())) if tt.Err == "" { require.Equal(tt.Expected, actual) @@ -143,8 +142,8 @@ func TestMetricsConfigServiceMetricsPort(t *testing.T) { { Name: "Prefers annotationServiceMetricsPort", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationPort] = "1234" - pod.Annotations[constants.AnnotationServiceMetricsPort] = "9000" + pod.Annotations[annotationPort] = "1234" + pod.Annotations[annotationServiceMetricsPort] = "9000" return pod }, Expected: "9000", @@ -152,7 +151,7 @@ func TestMetricsConfigServiceMetricsPort(t *testing.T) { { Name: "Uses annotationPort of annotationServiceMetricsPort is not set", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationPort] = "1234" + pod.Annotations[annotationPort] = "1234" return pod }, Expected: "1234", @@ -169,9 +168,9 @@ func TestMetricsConfigServiceMetricsPort(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { require := require.New(t) - mc := Config{} + mc := MetricsConfig{} - actual, err := mc.ServiceMetricsPort(*tt.Pod(minimal())) + actual, err := mc.serviceMetricsPort(*tt.Pod(minimal())) require.Equal(tt.Expected, actual) require.NoError(err) @@ -195,7 +194,7 @@ func TestMetricsConfigServiceMetricsPath(t *testing.T) { { Name: "Uses annotationServiceMetricsPath when set", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationServiceMetricsPath] = "/custom-metrics-path" + pod.Annotations[annotationServiceMetricsPath] = "/custom-metrics-path" return pod }, Expected: "/custom-metrics-path", @@ -205,9 +204,9 @@ func TestMetricsConfigServiceMetricsPath(t *testing.T) { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { require := require.New(t) - mc := Config{} + mc := MetricsConfig{} - actual := mc.ServiceMetricsPath(*tt.Pod(minimal())) + actual := mc.serviceMetricsPath(*tt.Pod(minimal())) require.Equal(tt.Expected, actual) }) @@ -218,7 +217,7 @@ func TestMetricsConfigPrometheusScrapePath(t *testing.T) { cases := []struct { Name string Pod func(*corev1.Pod) *corev1.Pod - MetricsConfig Config + MetricsConfig MetricsConfig Expected string }{ { @@ -226,7 +225,7 @@ func TestMetricsConfigPrometheusScrapePath(t *testing.T) { Pod: func(pod *corev1.Pod) *corev1.Pod { return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultPrometheusScrapePath: "/default-prometheus-scrape-path", }, Expected: "/default-prometheus-scrape-path", @@ -234,10 +233,10 @@ func TestMetricsConfigPrometheusScrapePath(t *testing.T) { { Name: "Uses annotationPrometheusScrapePath when set", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationPrometheusScrapePath] = "/custom-scrape-path" + pod.Annotations[annotationPrometheusScrapePath] = "/custom-scrape-path" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultPrometheusScrapePath: "/default-prometheus-scrape-path", }, Expected: "/custom-scrape-path", @@ -249,7 +248,7 @@ func TestMetricsConfigPrometheusScrapePath(t *testing.T) { require := require.New(t) mc := tt.MetricsConfig - actual := mc.PrometheusScrapePath(*tt.Pod(minimal())) + actual := mc.prometheusScrapePath(*tt.Pod(minimal())) require.Equal(tt.Expected, actual) }) @@ -257,21 +256,21 @@ func TestMetricsConfigPrometheusScrapePath(t *testing.T) { } // This test only needs unique cases not already handled in tests for -// h.EnableMetrics, h.EnableMetricsMerging, and h.ServiceMetricsPort. +// h.enableMetrics, h.enableMetricsMerging, and h.serviceMetricsPort. func TestMetricsConfigShouldRunMergedMetricsServer(t *testing.T) { cases := []struct { Name string Pod func(*corev1.Pod) *corev1.Pod - MetricsConfig Config + MetricsConfig MetricsConfig Expected bool }{ { Name: "Returns true when metrics and metrics merging are enabled, and the service metrics port is greater than 0", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationPort] = "1234" + pod.Annotations[annotationPort] = "1234" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: true, DefaultEnableMetricsMerging: true, }, @@ -280,10 +279,10 @@ func TestMetricsConfigShouldRunMergedMetricsServer(t *testing.T) { { Name: "Returns false when service metrics port is 0", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationPort] = "0" + pod.Annotations[annotationPort] = "0" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: true, DefaultEnableMetricsMerging: true, }, @@ -296,7 +295,7 @@ func TestMetricsConfigShouldRunMergedMetricsServer(t *testing.T) { require := require.New(t) mc := tt.MetricsConfig - actual, err := mc.ShouldRunMergedMetricsServer(*tt.Pod(minimal())) + actual, err := mc.shouldRunMergedMetricsServer(*tt.Pod(minimal())) require.Equal(tt.Expected, actual) require.NoError(err) @@ -305,7 +304,7 @@ func TestMetricsConfigShouldRunMergedMetricsServer(t *testing.T) { } // Tests determineAndValidatePort, which in turn tests the -// PrometheusScrapePort() and MergedMetricsPort() functions because their logic +// prometheusScrapePort() and mergedMetricsPort() functions because their logic // is just to call out to determineAndValidatePort(). func TestMetricsConfigDetermineAndValidatePort(t *testing.T) { cases := []struct { @@ -447,12 +446,12 @@ func TestMetricsConfigDetermineAndValidatePort(t *testing.T) { } } -// Tests MergedMetricsServerConfiguration happy path and error case not covered by other Config tests. +// Tests mergedMetricsServerConfiguration happy path and error case not covered by other MetricsConfig tests. func TestMetricsConfigMergedMetricsServerConfiguration(t *testing.T) { cases := []struct { Name string Pod func(*corev1.Pod) *corev1.Pod - MetricsConfig Config + MetricsConfig MetricsConfig ExpectedMergedMetricsPort string ExpectedServiceMetricsPort string ExpectedServiceMetricsPath string @@ -461,10 +460,10 @@ func TestMetricsConfigMergedMetricsServerConfiguration(t *testing.T) { { Name: "Returns merged metrics server configuration correctly", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationPort] = "1234" + pod.Annotations[annotationPort] = "1234" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: true, DefaultEnableMetricsMerging: true, DefaultMergedMetricsPort: "12345", @@ -476,10 +475,10 @@ func TestMetricsConfigMergedMetricsServerConfiguration(t *testing.T) { { Name: "Returns an error when merged metrics server shouldn't run", Pod: func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationPort] = "0" + pod.Annotations[annotationPort] = "0" return pod }, - MetricsConfig: Config{ + MetricsConfig: MetricsConfig{ DefaultEnableMetrics: true, DefaultEnableMetricsMerging: false, }, @@ -492,7 +491,7 @@ func TestMetricsConfigMergedMetricsServerConfiguration(t *testing.T) { require := require.New(t) mc := tt.MetricsConfig - metricsPorts, err := mc.MergedMetricsServerConfiguration(*tt.Pod(minimal())) + metricsPorts, err := mc.mergedMetricsServerConfiguration(*tt.Pod(minimal())) if tt.ExpErr != "" { require.Equal(tt.ExpErr, err.Error()) @@ -512,7 +511,7 @@ func minimal() *corev1.Pod { Namespace: namespaces.DefaultNamespace, Name: "minimal", Annotations: map[string]string{ - constants.AnnotationService: "foo", + annotationService: "foo", }, }, diff --git a/control-plane/connect-inject/controllers/peering/peering_acceptor_controller.go b/control-plane/connect-inject/peering_acceptor_controller.go similarity index 69% rename from control-plane/connect-inject/controllers/peering/peering_acceptor_controller.go rename to control-plane/connect-inject/peering_acceptor_controller.go index 044de55998..1125c96b51 100644 --- a/control-plane/connect-inject/controllers/peering/peering_acceptor_controller.go +++ b/control-plane/connect-inject/peering_acceptor_controller.go @@ -1,4 +1,4 @@ -package peering +package connectinject import ( "context" @@ -9,8 +9,6 @@ import ( "github.com/go-logr/logr" consulv1alpha1 "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul/api" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -28,29 +26,25 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -// AcceptorController reconciles a PeeringAcceptor object. -type AcceptorController struct { +// PeeringAcceptorController reconciles a PeeringAcceptor object. +type PeeringAcceptorController struct { client.Client - // ConsulClientConfig is the config to create a Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager - // ExposeServersServiceName is the Kubernetes service name that the Consul servers are using. - ExposeServersServiceName string - // ReleaseNamespace is the namespace where this controller is deployed. - ReleaseNamespace string - // Log is the logger for this controller - Log logr.Logger - // Scheme is the API scheme that this controller should have. - Scheme *runtime.Scheme + // ConsulClient points at the agent local to the connect-inject deployment pod. + ConsulClient *api.Client + ExposeServersServiceName string + ReadServerExternalService bool + TokenServerAddresses []string + ReleaseNamespace string + Log logr.Logger + Scheme *runtime.Scheme context.Context } const ( - finalizerName = "finalizers.consul.hashicorp.com" - consulAgentError = "consulAgentError" - internalError = "internalError" - kubernetesError = "kubernetesError" + FinalizerName = "finalizers.consul.hashicorp.com" + ConsulAgentError = "ConsulAgentError" + InternalError = "InternalError" + KubernetesError = "KubernetesError" ) //+kubebuilder:rbac:groups=consul.hashicorp.com,resources=peeringacceptors,verbs=get;list;watch;create;update;patch;delete @@ -72,7 +66,7 @@ const ( // two different resource kinds. As a result, we need to make sure that the code in this method // is thread-safe. For example, we may need to fetch the resource again before writing because another // call to Reconcile could have modified it, and so we need to make sure that we're updating the latest version. -func (r *AcceptorController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *PeeringAcceptorController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Log.Info("received request for PeeringAcceptor", "name", req.Name, "ns", req.Namespace) // Get the PeeringAcceptor resource. @@ -89,54 +83,55 @@ func (r *AcceptorController) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, err } - // Create Consul client for this reconcile. - serverState, err := r.ConsulServerConnMgr.State() - if err != nil { - r.Log.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - apiClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) - if err != nil { - r.Log.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - // The DeletionTimestamp is zero when the object has not been marked for deletion. The finalizer is added // in case it does not exist to all resources. If the DeletionTimestamp is non-zero, the object has been // marked for deletion and goes into the deletion workflow. if acceptor.GetDeletionTimestamp().IsZero() { - if !controllerutil.ContainsFinalizer(acceptor, finalizerName) { - controllerutil.AddFinalizer(acceptor, finalizerName) + if !controllerutil.ContainsFinalizer(acceptor, FinalizerName) { + controllerutil.AddFinalizer(acceptor, FinalizerName) if err := r.Update(ctx, acceptor); err != nil { return ctrl.Result{}, err } } } else { - if containsString(acceptor.Finalizers, finalizerName) { + if containsString(acceptor.Finalizers, FinalizerName) { r.Log.Info("PeeringAcceptor was deleted, deleting from Consul", "name", req.Name, "ns", req.Namespace) - err := r.deletePeering(ctx, apiClient, req.Name) + err := r.deletePeering(ctx, req.Name) if acceptor.Secret().Backend == "kubernetes" { err = r.deleteK8sSecret(ctx, acceptor.Secret().Name, acceptor.Namespace) } if err != nil { return ctrl.Result{}, err } - controllerutil.RemoveFinalizer(acceptor, finalizerName) + controllerutil.RemoveFinalizer(acceptor, FinalizerName) err = r.Update(ctx, acceptor) return ctrl.Result{}, err } } + // Scrape the address of the server service + var serverExternalAddresses []string + if r.ReadServerExternalService { + addrs, err := r.getExposeServersServiceAddresses() + if err != nil { + r.updateStatusError(ctx, acceptor, KubernetesError, err) + return ctrl.Result{}, err + } + serverExternalAddresses = addrs + } else if len(r.TokenServerAddresses) > 0 { + serverExternalAddresses = r.TokenServerAddresses + } + // existingSecret will be nil if it doesn't exist, and have the contents of the secret if it does exist. existingSecret, err := r.getExistingSecret(ctx, acceptor.Secret().Name, acceptor.Namespace) if err != nil { r.Log.Error(err, "error retrieving existing secret", "name", acceptor.Secret().Name) - r.updateStatusError(ctx, acceptor, kubernetesError, err) + r.updateStatusError(ctx, acceptor, KubernetesError, err) return ctrl.Result{}, err } // Read the peering from Consul. - peering, _, err := apiClient.Peerings().Read(ctx, acceptor.Name, nil) + peering, _, err := r.ConsulClient.Peerings().Read(ctx, acceptor.Name, nil) if err != nil { r.Log.Error(err, "failed to get Peering from Consul", "name", req.Name) return ctrl.Result{}, err @@ -150,19 +145,19 @@ func (r *AcceptorController) Reconcile(ctx context.Context, req ctrl.Request) (c if acceptor.SecretRef() != nil { r.Log.Info("stale secret in status; deleting stale secret", "name", acceptor.Name, "secret-name", acceptor.SecretRef().Name) if err := r.deleteK8sSecret(ctx, acceptor.SecretRef().Name, acceptor.Namespace); err != nil { - r.updateStatusError(ctx, acceptor, kubernetesError, err) + r.updateStatusError(ctx, acceptor, KubernetesError, err) return ctrl.Result{}, err } } // Generate and store the peering token. var resp *api.PeeringGenerateTokenResponse - if resp, err = r.generateToken(ctx, apiClient, acceptor.Name); err != nil { - r.updateStatusError(ctx, acceptor, consulAgentError, err) + if resp, err = r.generateToken(ctx, acceptor.Name, serverExternalAddresses); err != nil { + r.updateStatusError(ctx, acceptor, ConsulAgentError, err) return ctrl.Result{}, err } if acceptor.Secret().Backend == "kubernetes" { if err := r.createOrUpdateK8sSecret(ctx, acceptor, resp); err != nil { - r.updateStatusError(ctx, acceptor, kubernetesError, err) + r.updateStatusError(ctx, acceptor, KubernetesError, err) return ctrl.Result{}, err } } @@ -178,7 +173,7 @@ func (r *AcceptorController) Reconcile(ctx context.Context, req ctrl.Request) (c // If the peering does exist in Consul, figure out whether to generate and store a new token. shouldGenerate, nameChanged, err := shouldGenerateToken(acceptor, existingSecret) if err != nil { - r.updateStatusError(ctx, acceptor, internalError, err) + r.updateStatusError(ctx, acceptor, InternalError, err) return ctrl.Result{}, err } @@ -186,7 +181,7 @@ func (r *AcceptorController) Reconcile(ctx context.Context, req ctrl.Request) (c // Generate and store the peering token. var resp *api.PeeringGenerateTokenResponse r.Log.Info("generating new token for an existing peering") - if resp, err = r.generateToken(ctx, apiClient, acceptor.Name); err != nil { + if resp, err = r.generateToken(ctx, acceptor.Name, serverExternalAddresses); err != nil { return ctrl.Result{}, err } if acceptor.Secret().Backend == "kubernetes" { @@ -198,7 +193,7 @@ func (r *AcceptorController) Reconcile(ctx context.Context, req ctrl.Request) (c if nameChanged && acceptor.SecretRef() != nil { r.Log.Info("stale secret in status; deleting stale secret", "name", acceptor.Name, "secret-name", acceptor.SecretRef().Name) if err = r.deleteK8sSecret(ctx, acceptor.SecretRef().Name, acceptor.Namespace); err != nil { - r.updateStatusError(ctx, acceptor, kubernetesError, err) + r.updateStatusError(ctx, acceptor, KubernetesError, err) return ctrl.Result{}, err } } @@ -226,7 +221,7 @@ func shouldGenerateToken(acceptor *consulv1alpha1.PeeringAcceptor, existingSecre if acceptor.SecretRef().Backend != acceptor.Secret().Backend { return false, false, errors.New("PeeringAcceptor backend cannot be changed") } - if peeringVersionString, ok := acceptor.Annotations[constants.AnnotationPeeringVersion]; ok { + if peeringVersionString, ok := acceptor.Annotations[annotationPeeringVersion]; ok { peeringVersion, err := strconv.ParseUint(peeringVersionString, 10, 64) if err != nil { return false, false, err @@ -245,7 +240,7 @@ func shouldGenerateToken(acceptor *consulv1alpha1.PeeringAcceptor, existingSecre } // updateStatus updates the peeringAcceptor's secret in the status. -func (r *AcceptorController) updateStatus(ctx context.Context, acceptorObjKey types.NamespacedName) error { +func (r *PeeringAcceptorController) updateStatus(ctx context.Context, acceptorObjKey types.NamespacedName) error { // Get the latest resource before we update it. acceptor := &consulv1alpha1.PeeringAcceptor{} if err := r.Client.Get(ctx, acceptorObjKey, acceptor); err != nil { @@ -256,7 +251,7 @@ func (r *AcceptorController) updateStatus(ctx context.Context, acceptorObjKey ty } acceptor.Status.LastSyncedTime = &metav1.Time{Time: time.Now()} acceptor.SetSyncedCondition(corev1.ConditionTrue, "", "") - if peeringVersionString, ok := acceptor.Annotations[constants.AnnotationPeeringVersion]; ok { + if peeringVersionString, ok := acceptor.Annotations[annotationPeeringVersion]; ok { peeringVersion, err := strconv.ParseUint(peeringVersionString, 10, 64) if err != nil { r.Log.Error(err, "failed to update PeeringAcceptor status", "name", acceptor.Name, "namespace", acceptor.Namespace) @@ -274,7 +269,7 @@ func (r *AcceptorController) updateStatus(ctx context.Context, acceptorObjKey ty } // updateStatusError updates the peeringAcceptor's ReconcileError in the status. -func (r *AcceptorController) updateStatusError(ctx context.Context, acceptor *consulv1alpha1.PeeringAcceptor, reason string, reconcileErr error) { +func (r *PeeringAcceptorController) updateStatusError(ctx context.Context, acceptor *consulv1alpha1.PeeringAcceptor, reason string, reconcileErr error) { acceptor.SetSyncedCondition(corev1.ConditionFalse, reason, reconcileErr.Error()) err := r.Status().Update(ctx, acceptor) if err != nil { @@ -283,7 +278,7 @@ func (r *AcceptorController) updateStatusError(ctx context.Context, acceptor *co } // getExistingSecret gets the K8s secret specified, and either returns the existing secret or nil if it doesn't exist. -func (r *AcceptorController) getExistingSecret(ctx context.Context, name string, namespace string) (*corev1.Secret, error) { +func (r *PeeringAcceptorController) getExistingSecret(ctx context.Context, name string, namespace string) (*corev1.Secret, error) { existingSecret := &corev1.Secret{} namespacedName := types.NamespacedName{Name: name, Namespace: namespace} err := r.Client.Get(ctx, namespacedName, existingSecret) @@ -299,7 +294,7 @@ func (r *AcceptorController) getExistingSecret(ctx context.Context, name string, // createOrUpdateK8sSecret creates a secret and uses the controller's K8s client to apply the secret. It checks if // there's an existing secret with the same name and makes sure to update the existing secret if so. -func (r *AcceptorController) createOrUpdateK8sSecret(ctx context.Context, acceptor *consulv1alpha1.PeeringAcceptor, resp *api.PeeringGenerateTokenResponse) error { +func (r *PeeringAcceptorController) createOrUpdateK8sSecret(ctx context.Context, acceptor *consulv1alpha1.PeeringAcceptor, resp *api.PeeringGenerateTokenResponse) error { secretName := acceptor.Secret().Name secretNamespace := acceptor.Namespace secret := createSecret(secretName, secretNamespace, acceptor.Secret().Key, resp.PeeringToken) @@ -320,7 +315,7 @@ func (r *AcceptorController) createOrUpdateK8sSecret(ctx context.Context, accept return nil } -func (r *AcceptorController) deleteK8sSecret(ctx context.Context, name, namespace string) error { +func (r *PeeringAcceptorController) deleteK8sSecret(ctx context.Context, name, namespace string) error { existingSecret, err := r.getExistingSecret(ctx, name, namespace) if err != nil { return err @@ -334,7 +329,7 @@ func (r *AcceptorController) deleteK8sSecret(ctx context.Context, name, namespac } // SetupWithManager sets up the controller with the Manager. -func (r *AcceptorController) SetupWithManager(mgr ctrl.Manager) error { +func (r *PeeringAcceptorController) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&consulv1alpha1.PeeringAcceptor{}). Watches( @@ -345,11 +340,14 @@ func (r *AcceptorController) SetupWithManager(mgr ctrl.Manager) error { } // generateToken is a helper function that calls the Consul api to generate a token for the peer. -func (r *AcceptorController) generateToken(ctx context.Context, apiClient *api.Client, peerName string) (*api.PeeringGenerateTokenResponse, error) { +func (r *PeeringAcceptorController) generateToken(ctx context.Context, peerName string, serverExternalAddresses []string) (*api.PeeringGenerateTokenResponse, error) { req := api.PeeringGenerateTokenRequest{ PeerName: peerName, } - resp, _, err := apiClient.Peerings().GenerateToken(ctx, req, nil) + if len(serverExternalAddresses) > 0 { + req.ServerExternalAddresses = serverExternalAddresses + } + resp, _, err := r.ConsulClient.Peerings().GenerateToken(ctx, req, nil) if err != nil { r.Log.Error(err, "failed to get generate token", "err", err) return nil, err @@ -358,8 +356,8 @@ func (r *AcceptorController) generateToken(ctx context.Context, apiClient *api.C } // deletePeering is a helper function that calls the Consul api to delete a peering. -func (r *AcceptorController) deletePeering(ctx context.Context, apiClient *api.Client, peerName string) error { - _, err := apiClient.Peerings().Delete(ctx, peerName, nil) +func (r *PeeringAcceptorController) deletePeering(ctx context.Context, peerName string) error { + _, err := r.ConsulClient.Peerings().Delete(ctx, peerName, nil) if err != nil { r.Log.Error(err, "failed to delete Peering from Consul", "name", peerName) return err @@ -372,7 +370,7 @@ func (r *AcceptorController) deletePeering(ctx context.Context, apiClient *api.C // the list of acceptors and creates a request for the acceptor that has the same secret as it's // secretRef and that of the updated secret that is being watched. // We compare it to the secret in the status as the resource has created the secret. -func (r *AcceptorController) requestsForPeeringTokens(object client.Object) []reconcile.Request { +func (r *PeeringAcceptorController) requestsForPeeringTokens(object client.Object) []reconcile.Request { r.Log.Info("received update for Peering Token Secret", "name", object.GetName(), "namespace", object.GetNamespace()) // Get the list of all acceptors. @@ -391,13 +389,80 @@ func (r *AcceptorController) requestsForPeeringTokens(object client.Object) []re return []ctrl.Request{} } +func (r *PeeringAcceptorController) getExposeServersServiceAddresses() ([]string, error) { + r.Log.Info("getting external address from expose-servers service", "name", r.ExposeServersServiceName) + var serverExternalAddresses []string + + serverService := &corev1.Service{} + key := types.NamespacedName{ + Name: r.ExposeServersServiceName, + Namespace: r.ReleaseNamespace, + } + err := r.Client.Get(r.Context, key, serverService) + if err != nil { + return nil, err + } + switch serverService.Spec.Type { + case corev1.ServiceTypeNodePort: + nodes := corev1.NodeList{} + err := r.Client.List(r.Context, &nodes) + if err != nil { + return nil, err + } + if len(nodes.Items) == 0 { + return nil, fmt.Errorf("no nodes were found for scraping server addresses from expose-servers service") + } + var grpcNodePort int32 + for _, port := range serverService.Spec.Ports { + if port.Name == "grpc" { + grpcNodePort = port.NodePort + } + } + if grpcNodePort == 0 { + return nil, fmt.Errorf("no grpc port was found for expose-servers service") + } + for _, node := range nodes.Items { + addrs := node.Status.Addresses + for _, addr := range addrs { + if addr.Type == corev1.NodeInternalIP { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%d", addr.Address, grpcNodePort)) + } + } + } + if len(serverExternalAddresses) == 0 { + return nil, fmt.Errorf("no server addresses were scraped from expose-servers service") + } + return serverExternalAddresses, nil + case corev1.ServiceTypeLoadBalancer: + lbAddrs := serverService.Status.LoadBalancer.Ingress + if len(lbAddrs) < 1 { + return nil, fmt.Errorf("unable to find load balancer address for %s service, retrying", r.ExposeServersServiceName) + } + for _, lbAddr := range lbAddrs { + // When the service is of type load balancer, the grpc port is hardcoded to 8502. + if lbAddr.IP != "" { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%s", lbAddr.IP, "8502")) + } + if lbAddr.Hostname != "" { + serverExternalAddresses = append(serverExternalAddresses, fmt.Sprintf("%s:%s", lbAddr.Hostname, "8502")) + } + } + if len(serverExternalAddresses) == 0 { + return nil, fmt.Errorf("unable to find load balancer address for %s service, retrying", r.ExposeServersServiceName) + } + default: + return nil, fmt.Errorf("only NodePort and LoadBalancer service types are supported") + } + return serverExternalAddresses, nil +} + // filterPeeringAcceptors receives meta and object information for Kubernetes resources that are being watched, // which in this case are Secrets. It only returns true if the Secret is a Peering Token Secret. It reads the labels // from the meta of the resource and uses the values of the "consul.hashicorp.com/peering-token" label to validate that // the Secret is a Peering Token Secret. -func (r *AcceptorController) filterPeeringAcceptors(object client.Object) bool { +func (r *PeeringAcceptorController) filterPeeringAcceptors(object client.Object) bool { secretLabels := object.GetLabels() - isPeeringToken, ok := secretLabels[constants.LabelPeeringToken] + isPeeringToken, ok := secretLabels[labelPeeringToken] if !ok { return false } @@ -411,7 +476,7 @@ func createSecret(name, namespace, key, value string) *corev1.Secret { Name: name, Namespace: namespace, Labels: map[string]string{ - constants.LabelPeeringToken: "true", + labelPeeringToken: "true", }, }, Data: map[string][]byte{ diff --git a/control-plane/connect-inject/controllers/peering/peering_acceptor_controller_test.go b/control-plane/connect-inject/peering_acceptor_controller_test.go similarity index 73% rename from control-plane/connect-inject/controllers/peering/peering_acceptor_controller_test.go rename to control-plane/connect-inject/peering_acceptor_controller_test.go index 15a3740816..7e649c2394 100644 --- a/control-plane/connect-inject/controllers/peering/peering_acceptor_controller_test.go +++ b/control-plane/connect-inject/peering_acceptor_controller_test.go @@ -1,4 +1,4 @@ -package peering +package connectinject import ( "context" @@ -9,9 +9,8 @@ import ( logrtest "github.com/go-logr/logr/testing" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -29,15 +28,19 @@ import ( // TestReconcile_CreateUpdatePeeringAcceptor creates a peering acceptor. func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := []struct { - name string - k8sObjects func() []runtime.Object - expectedConsulPeerings []*api.Peering - expectedK8sSecrets func() []*corev1.Secret - expErr string - expectedStatus *v1alpha1.PeeringAcceptorStatus - expectDeletedK8sSecret *types.NamespacedName - initialConsulPeerName string + name string + k8sObjects func() []runtime.Object + expectedConsulPeerings []*api.Peering + expectedK8sSecrets func() []*corev1.Secret + expErr string + expectedStatus *v1alpha1.PeeringAcceptorStatus + expectDeletedK8sSecret *types.NamespacedName + initialConsulPeerName string + externalAddresses []string + readServerExposeService bool + expectedTokenAddresses []string }{ { name: "New PeeringAcceptor creates a peering in Consul and generates a token", @@ -87,7 +90,9 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { }, }, { - name: "PeeringAcceptor generates a token with expose server addresses", + name: "PeeringAcceptor generates a token with expose server addresses", + readServerExposeService: true, + expectedTokenAddresses: []string{"1.1.1.1:8503"}, k8sObjects: func() []runtime.Object { service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -151,6 +156,55 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { return []*corev1.Secret{secret} }, }, + { + name: "PeeringAcceptor generates a token with external addresses specified", + externalAddresses: []string{"1.1.1.1:8503", "2.2.2.2:8503"}, + expectedTokenAddresses: []string{"1.1.1.1:8503", "2.2.2.2:8503"}, + k8sObjects: func() []runtime.Object { + acceptor := &v1alpha1.PeeringAcceptor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acceptor-created", + Namespace: "default", + }, + Spec: v1alpha1.PeeringAcceptorSpec{ + Peer: &v1alpha1.Peer{ + Secret: &v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, + }, + }, + } + return []runtime.Object{acceptor} + }, + expectedStatus: &v1alpha1.PeeringAcceptorStatus{ + SecretRef: &v1alpha1.SecretRefStatus{ + Secret: v1alpha1.Secret{ + Name: "acceptor-created-secret", + Key: "data", + Backend: "kubernetes", + }, + }, + }, + expectedConsulPeerings: []*api.Peering{ + { + Name: "acceptor-created", + }, + }, + expectedK8sSecrets: func() []*corev1.Secret { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acceptor-created-secret", + Namespace: "default", + }, + StringData: map[string]string{ + "data": "tokenstub", + }, + } + return []*corev1.Secret{secret} + }, + }, { name: "When the secret already exists (not created by controller), it is updated with the contents of the new peering token and an owner reference is added", k8sObjects: func() []runtime.Object { @@ -207,7 +261,7 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { Name: "acceptor-created", Namespace: "default", Annotations: map[string]string{ - constants.AnnotationPeeringVersion: "2", + annotationPeeringVersion: "2", }, }, Spec: v1alpha1.PeeringAcceptorSpec{ @@ -503,24 +557,35 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) if tt.initialConsulPeerName != "" { // Add the initial peerings into Consul by calling the Generate token endpoint. - _, _, err := consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: tt.initialConsulPeerName}, nil) + _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: tt.initialConsulPeerName}, nil) require.NoError(t, err) } // Create the peering acceptor controller - controller := &AcceptorController{ - Client: fakeClient, - ExposeServersServiceName: "test-expose-servers", - ReleaseNamespace: "default", - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + controller := &PeeringAcceptorController{ + Client: fakeClient, + TokenServerAddresses: tt.externalAddresses, + ReadServerExternalService: tt.readServerExposeService, + ExposeServersServiceName: "test-expose-servers", + ReleaseNamespace: "default", + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "acceptor-created", @@ -553,8 +618,8 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { require.NoError(t, err) expSecrets := tt.expectedK8sSecrets() require.Equal(t, expSecrets[0].Name, createdSecret.Name) - require.Contains(t, createdSecret.Labels, constants.LabelPeeringToken) - require.Equal(t, createdSecret.Labels[constants.LabelPeeringToken], "true") + require.Contains(t, createdSecret.Labels, labelPeeringToken) + require.Equal(t, createdSecret.Labels[labelPeeringToken], "true") // This assertion needs to be on StringData rather than Data because in the fake K8s client the contents are // stored in StringData if that's how the secret was initialized in the fake client. In a real cluster, this // StringData is an input only field, and shouldn't be read from. @@ -565,15 +630,20 @@ func TestReconcile_CreateUpdatePeeringAcceptor(t *testing.T) { decodedTokenData, err := base64.StdEncoding.DecodeString(string(createdSecret.Data["data"])) require.NoError(t, err) - require.Contains(t, string(decodedTokenData), "\"CA\":") + require.Contains(t, string(decodedTokenData), "\"CA\":null") require.Contains(t, string(decodedTokenData), "\"ServerAddresses\"") - require.Contains(t, string(decodedTokenData), "\"ServerName\":\"server.dc1.peering.11111111-2222-3333-4444-555555555555.consul\"") + require.Contains(t, string(decodedTokenData), "\"ServerName\":\"server.dc1.consul\"") + if len(tt.expectedTokenAddresses) > 0 { + for _, addr := range tt.externalAddresses { + require.Contains(t, string(decodedTokenData), addr) + } + } // Get the reconciled PeeringAcceptor and make assertions on the status acceptor := &v1alpha1.PeeringAcceptor{} err = fakeClient.Get(context.Background(), namespacedName, acceptor) require.NoError(t, err) - require.Contains(t, acceptor.Finalizers, finalizerName) + require.Contains(t, acceptor.Finalizers, FinalizerName) if tt.expectedStatus != nil { require.Equal(t, tt.expectedStatus.SecretRef.Name, acceptor.SecretRef().Name) require.Equal(t, tt.expectedStatus.SecretRef.Key, acceptor.SecretRef().Key) @@ -604,7 +674,7 @@ func TestReconcile_DeletePeeringAcceptor(t *testing.T) { Name: "acceptor-deleted", Namespace: "default", DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{finalizerName}, + Finalizers: []string{FinalizerName}, }, Spec: v1alpha1.PeeringAcceptorSpec{ Peer: &v1alpha1.Peer{ @@ -623,22 +693,30 @@ func TestReconcile_DeletePeeringAcceptor(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() - // Create test consulServer server // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = "test-node" + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) // Add the initial peerings into Consul by calling the Generate token endpoint. - _, _, err := consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "acceptor-deleted"}, nil) + _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "acceptor-deleted"}, nil) require.NoError(t, err) // Create the peering acceptor controller. - controller := &AcceptorController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + controller := &PeeringAcceptorController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "acceptor-deleted", @@ -672,6 +750,7 @@ func TestReconcile_DeletePeeringAcceptor(t *testing.T) { // scenarios involving the user setting the version annotation. func TestReconcile_VersionAnnotation(t *testing.T) { t.Parallel() + nodeName := "test-node" cases := map[string]struct { annotations map[string]string expErr string @@ -679,13 +758,13 @@ func TestReconcile_VersionAnnotation(t *testing.T) { }{ "fails if annotation is not a number": { annotations: map[string]string{ - constants.AnnotationPeeringVersion: "foo", + annotationPeeringVersion: "foo", }, expErr: `strconv.ParseUint: parsing "foo": invalid syntax`, }, "is no/op if annotation value is less than value in status": { annotations: map[string]string{ - constants.AnnotationPeeringVersion: "2", + annotationPeeringVersion: "2", }, expectedStatus: &v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ @@ -701,7 +780,7 @@ func TestReconcile_VersionAnnotation(t *testing.T) { }, "is no/op if annotation value is equal to value in status": { annotations: map[string]string{ - constants.AnnotationPeeringVersion: "3", + annotationPeeringVersion: "3", }, expectedStatus: &v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ @@ -717,7 +796,7 @@ func TestReconcile_VersionAnnotation(t *testing.T) { }, "updates if annotation value is greater than value in status": { annotations: map[string]string{ - constants.AnnotationPeeringVersion: "4", + annotationPeeringVersion: "4", }, expectedStatus: &v1alpha1.PeeringAcceptorStatus{ SecretRef: &v1alpha1.SecretRefStatus{ @@ -770,19 +849,28 @@ func TestReconcile_VersionAnnotation(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) - _, _, err := consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "acceptor-created"}, nil) + _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "acceptor-created"}, nil) require.NoError(t, err) // Create the peering acceptor controller - controller := &AcceptorController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + controller := &PeeringAcceptorController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "acceptor-created", @@ -803,7 +891,7 @@ func TestReconcile_VersionAnnotation(t *testing.T) { acceptor = &v1alpha1.PeeringAcceptor{} err = fakeClient.Get(context.Background(), namespacedName, acceptor) require.NoError(t, err) - require.Contains(t, acceptor.Finalizers, finalizerName) + require.Contains(t, acceptor.Finalizers, FinalizerName) if tt.expectedStatus != nil { require.Equal(t, tt.expectedStatus.SecretRef.Name, acceptor.SecretRef().Name) require.Equal(t, tt.expectedStatus.SecretRef.Key, acceptor.SecretRef().Key) @@ -1017,7 +1105,7 @@ func TestAcceptorUpdateStatus(t *testing.T) { }, }, Conditions: v1alpha1.Conditions{ - v1alpha1.Condition{ + { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionTrue, }, @@ -1060,7 +1148,7 @@ func TestAcceptorUpdateStatus(t *testing.T) { }, }, Conditions: v1alpha1.Conditions{ - v1alpha1.Condition{ + { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionTrue, }, @@ -1081,7 +1169,7 @@ func TestAcceptorUpdateStatus(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create the peering acceptor controller. - pac := &AcceptorController{ + pac := &PeeringAcceptorController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, Scheme: s, @@ -1102,6 +1190,7 @@ func TestAcceptorUpdateStatus(t *testing.T) { require.Equal(t, tt.expStatus.SecretRef.Backend, acceptor.SecretRef().Backend) require.Equal(t, tt.expStatus.SecretRef.ResourceVersion, acceptor.SecretRef().ResourceVersion) require.Equal(t, tt.expStatus.Conditions[0].Message, acceptor.Status.Conditions[0].Message) + }) } } @@ -1133,10 +1222,10 @@ func TestAcceptorUpdateStatusError(t *testing.T) { reconcileErr: errors.New("this is an error"), expStatus: v1alpha1.PeeringAcceptorStatus{ Conditions: v1alpha1.Conditions{ - v1alpha1.Condition{ + { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionFalse, - Reason: internalError, + Reason: InternalError, Message: "this is an error", }, }, @@ -1173,7 +1262,7 @@ func TestAcceptorUpdateStatusError(t *testing.T) { { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionFalse, - Reason: internalError, + Reason: InternalError, Message: "this is an error", }, }, @@ -1193,13 +1282,13 @@ func TestAcceptorUpdateStatusError(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create the peering acceptor controller. - controller := &AcceptorController{ + controller := &PeeringAcceptorController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, Scheme: s, } - controller.updateStatusError(context.Background(), tt.acceptor, internalError, tt.reconcileErr) + controller.updateStatusError(context.Background(), tt.acceptor, InternalError, tt.reconcileErr) acceptor := &v1alpha1.PeeringAcceptor{} acceptorName := types.NamespacedName{ @@ -1226,7 +1315,7 @@ func TestAcceptor_FilterPeeringAcceptor(t *testing.T) { Name: "test", Namespace: "test", Labels: map[string]string{ - constants.LabelPeeringToken: "true", + labelPeeringToken: "true", }, }, }, @@ -1238,7 +1327,7 @@ func TestAcceptor_FilterPeeringAcceptor(t *testing.T) { Name: "test", Namespace: "test", Labels: map[string]string{ - constants.LabelPeeringToken: "false", + labelPeeringToken: "false", }, }, }, @@ -1250,7 +1339,7 @@ func TestAcceptor_FilterPeeringAcceptor(t *testing.T) { Name: "test", Namespace: "test", Labels: map[string]string{ - constants.LabelPeeringToken: "foo", + labelPeeringToken: "foo", }, }, }, @@ -1269,7 +1358,7 @@ func TestAcceptor_FilterPeeringAcceptor(t *testing.T) { for name, tt := range cases { t.Run(name, func(t *testing.T) { - controller := AcceptorController{} + controller := PeeringAcceptorController{} result := controller.filterPeeringAcceptors(tt.secret) require.Equal(t, tt.result, result) }) @@ -1476,7 +1565,7 @@ func TestAcceptor_RequestsForPeeringTokens(t *testing.T) { s := scheme.Scheme s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(tt.secret, &tt.acceptors).Build() - controller := AcceptorController{ + controller := PeeringAcceptorController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, } @@ -1486,3 +1575,301 @@ func TestAcceptor_RequestsForPeeringTokens(t *testing.T) { }) } } + +func TestGetExposeServersServiceAddress(t *testing.T) { + t.Parallel() + cases := []struct { + name string + k8sObjects func() []runtime.Object + releaseNamespace string + expAddresses []string + expErr string + }{ + { + name: "Valid LoadBalancer service", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + IP: "1.2.3.4", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expAddresses: []string{"1.2.3.4:8502"}, + }, + { + name: "Valid LoadBalancer service with Hostname", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + Hostname: "foo.bar.baz", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expAddresses: []string{"foo.bar.baz:8502"}, + }, + { + name: "LoadBalancer has no addresses", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{}, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expErr: "unable to find load balancer address for test-expose-servers service, retrying", + }, + { + name: "LoadBalancer has empty IP", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + IP: "", + }, + }, + }, + }, + } + return []runtime.Object{exposeServersService} + }, + expErr: "unable to find load balancer address for test-expose-servers service, retrying", + }, + { + name: "Valid NodePort service", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "", "10.1.1.1") + node2 := createNode("fake-gke-node2", "", "10.2.2.2") + node3 := createNode("fake-gke-node3", "", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expAddresses: []string{"10.1.1.1:30100", "10.2.2.2:30100", "10.3.3.3:30100"}, + }, + { + name: "Valid NodePort service ignores node external IPs", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "10.1.1.1") + node2 := createNode("fake-gke-node2", "30.2.2.2", "10.2.2.2") + node3 := createNode("fake-gke-node3", "30.3.3.3", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expAddresses: []string{"10.1.1.1:30100", "10.2.2.2:30100", "10.3.3.3:30100"}, + }, + { + name: "Invalid NodePort service with only external IPs", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "") + node2 := createNode("fake-gke-node2", "30.2.2.2", "") + node3 := createNode("fake-gke-node3", "30.3.3.3", "") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expErr: "no server addresses were scraped from expose-servers service", + }, + { + name: "Invalid NodePort service because no nodes exist to scrape addresses from", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + return []runtime.Object{exposeServersService} + }, + expErr: "no nodes were found for scraping server addresses from expose-servers service", + }, + { + name: "Invalid NodePort service because no grpc port exists", + releaseNamespace: "test", + k8sObjects: func() []runtime.Object { + exposeServersService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-expose-servers", + Namespace: "test", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: "not-grpc", + NodePort: 30100, + }, + }, + }, + Status: corev1.ServiceStatus{}, + } + node1 := createNode("fake-gke-node1", "30.1.1.1", "10.1.1.1") + node2 := createNode("fake-gke-node2", "30.2.2.2", "10.2.2.2") + node3 := createNode("fake-gke-node3", "30.3.3.3", "10.3.3.3") + return []runtime.Object{exposeServersService, node1, node2, node3} + }, + expErr: "no grpc port was found for expose-servers service", + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + // Add the default namespace. + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + nsTest := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test"}} + // Create fake k8s client + k8sObjects := append(tt.k8sObjects(), &ns, &nsTest) + + s := scheme.Scheme + //s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.PeeringAcceptor{}, &v1alpha1.PeeringAcceptorList{}) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() + + // Create the peering acceptor controller + controller := &PeeringAcceptorController{ + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + Scheme: s, + ReleaseNamespace: tt.releaseNamespace, + ExposeServersServiceName: "test-expose-servers", + } + + // Get addresses from expose-servers service. + addrs, err := controller.getExposeServersServiceAddresses() + if tt.expErr != "" { + require.EqualError(t, err, tt.expErr) + } else { + require.NoError(t, err) + } + + // Assert all the expected addresses are there. + for _, expAddr := range tt.expAddresses { + require.Contains(t, addrs, expAddr) + } + }) + } +} + +// createNode is a test helper to create Kubernetes nodes. +func createNode(name, externalIP, internalIP string) *corev1.Node { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + } + if externalIP != "" { + node.Status.Addresses = append(node.Status.Addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: externalIP}) + } + if internalIP != "" { + node.Status.Addresses = append(node.Status.Addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: internalIP}) + } + return node +} diff --git a/control-plane/connect-inject/controllers/peering/peering_dialer_controller.go b/control-plane/connect-inject/peering_dialer_controller.go similarity index 81% rename from control-plane/connect-inject/controllers/peering/peering_dialer_controller.go rename to control-plane/connect-inject/peering_dialer_controller.go index 98646b1654..aa1fb4e0db 100644 --- a/control-plane/connect-inject/controllers/peering/peering_dialer_controller.go +++ b/control-plane/connect-inject/peering_dialer_controller.go @@ -1,4 +1,4 @@ -package peering +package connectinject import ( "context" @@ -9,8 +9,6 @@ import ( "github.com/go-logr/logr" consulv1alpha1 "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul/api" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -31,14 +29,10 @@ import ( // PeeringDialerController reconciles a PeeringDialer object. type PeeringDialerController struct { client.Client - // ConsulClientConfig is the config to create a Consul API client. - ConsulClientConfig *consul.Config - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager - // Log is the logger for this controller. - Log logr.Logger - // Scheme is the API scheme that this controller should have. - Scheme *runtime.Scheme + // ConsulClient points at the agent local to the connect-inject deployment pod. + ConsulClient *api.Client + Log logr.Logger + Scheme *runtime.Scheme context.Context } @@ -64,35 +58,23 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } - // Create Consul client for this reconcile. - serverState, err := r.ConsulServerConnMgr.State() - if err != nil { - r.Log.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - apiClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) - if err != nil { - r.Log.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - // The DeletionTimestamp is zero when the object has not been marked for deletion. The finalizer is added // in case it does not exist to all resources. If the DeletionTimestamp is non-zero, the object has been // marked for deletion and goes into the deletion workflow. if dialer.GetDeletionTimestamp().IsZero() { - if !controllerutil.ContainsFinalizer(dialer, finalizerName) { - controllerutil.AddFinalizer(dialer, finalizerName) + if !controllerutil.ContainsFinalizer(dialer, FinalizerName) { + controllerutil.AddFinalizer(dialer, FinalizerName) if err := r.Update(ctx, dialer); err != nil { return ctrl.Result{}, err } } } else { - if containsString(dialer.Finalizers, finalizerName) { + if containsString(dialer.Finalizers, FinalizerName) { r.Log.Info("PeeringDialer was deleted, deleting from Consul", "name", req.Name, "ns", req.Namespace) - if err := r.deletePeering(ctx, apiClient, req.Name); err != nil { + if err := r.deletePeering(ctx, req.Name); err != nil { return ctrl.Result{}, err } - controllerutil.RemoveFinalizer(dialer, finalizerName) + controllerutil.RemoveFinalizer(dialer, FinalizerName) err := r.Update(ctx, dialer) return ctrl.Result{}, err } @@ -102,14 +84,14 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques var specSecret *corev1.Secret specSecret, err = r.getSecret(ctx, dialer.Secret().Name, dialer.Namespace) if err != nil { - r.updateStatusError(ctx, dialer, kubernetesError, err) + r.updateStatusError(ctx, dialer, KubernetesError, err) return ctrl.Result{}, err } // If specSecret doesn't exist, error because we can only initiate peering if we have a token to initiate with. if specSecret == nil { err = errors.New("PeeringDialer spec.peer.secret does not exist") - r.updateStatusError(ctx, dialer, internalError, err) + r.updateStatusError(ctx, dialer, InternalError, err) return ctrl.Result{}, err } @@ -124,7 +106,7 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques if secretRefSet { statusSecret, err = r.getSecret(ctx, dialer.SecretRef().Name, dialer.Namespace) if err != nil { - r.updateStatusError(ctx, dialer, kubernetesError, err) + r.updateStatusError(ctx, dialer, KubernetesError, err) return ctrl.Result{}, err } } @@ -136,8 +118,8 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques // correct secret specified in the spec. r.Log.Info("the secret in status.secretRef doesn't exist or wasn't set, establishing peering with the existing spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) peeringToken := specSecret.Data[dialer.Secret().Key] - if err := r.establishPeering(ctx, apiClient, dialer.Name, string(peeringToken)); err != nil { - r.updateStatusError(ctx, dialer, consulAgentError, err) + if err := r.establishPeering(ctx, dialer.Name, string(peeringToken)); err != nil { + r.updateStatusError(ctx, dialer, ConsulAgentError, err) return ctrl.Result{}, err } else { err := r.updateStatus(ctx, req.NamespacedName, specSecret.ResourceVersion) @@ -149,7 +131,7 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques // Read the peering from Consul. r.Log.Info("reading peering from Consul", "name", dialer.Name) - peering, _, err := apiClient.Peerings().Read(ctx, dialer.Name, nil) + peering, _, err := r.ConsulClient.Peerings().Read(ctx, dialer.Name, nil) if err != nil { r.Log.Error(err, "failed to get Peering from Consul", "name", req.Name) return ctrl.Result{}, err @@ -159,8 +141,8 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques if peering == nil { r.Log.Info("status.secret exists, but the peering doesn't exist in Consul; establishing peering with the existing spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) peeringToken := specSecret.Data[dialer.Secret().Key] - if err := r.establishPeering(ctx, apiClient, dialer.Name, string(peeringToken)); err != nil { - r.updateStatusError(ctx, dialer, consulAgentError, err) + if err := r.establishPeering(ctx, dialer.Name, string(peeringToken)); err != nil { + r.updateStatusError(ctx, dialer, ConsulAgentError, err) return ctrl.Result{}, err } else { err := r.updateStatus(ctx, req.NamespacedName, specSecret.ResourceVersion) @@ -173,8 +155,8 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques if r.specStatusSecretsDifferent(dialer, specSecret) { r.Log.Info("the spec.peer.secret is different from the status secret, re-establishing peering", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) peeringToken := specSecret.Data[dialer.Secret().Key] - if err := r.establishPeering(ctx, apiClient, dialer.Name, string(peeringToken)); err != nil { - r.updateStatusError(ctx, dialer, consulAgentError, err) + if err := r.establishPeering(ctx, dialer.Name, string(peeringToken)); err != nil { + r.updateStatusError(ctx, dialer, ConsulAgentError, err) return ctrl.Result{}, err } else { err := r.updateStatus(ctx, req.NamespacedName, specSecret.ResourceVersion) @@ -185,15 +167,15 @@ func (r *PeeringDialerController) Reconcile(ctx context.Context, req ctrl.Reques if updated, err := r.versionAnnotationUpdated(dialer); err == nil && updated { r.Log.Info("the version annotation was incremented; re-establishing peering with spec.peer.secret", "secret-name", dialer.Secret().Name, "secret-namespace", dialer.Namespace) peeringToken := specSecret.Data[dialer.Secret().Key] - if err := r.establishPeering(ctx, apiClient, dialer.Name, string(peeringToken)); err != nil { - r.updateStatusError(ctx, dialer, consulAgentError, err) + if err := r.establishPeering(ctx, dialer.Name, string(peeringToken)); err != nil { + r.updateStatusError(ctx, dialer, ConsulAgentError, err) return ctrl.Result{}, err } else { err := r.updateStatus(ctx, req.NamespacedName, specSecret.ResourceVersion) return ctrl.Result{}, err } } else if err != nil { - r.updateStatusError(ctx, dialer, internalError, err) + r.updateStatusError(ctx, dialer, InternalError, err) return ctrl.Result{}, err } } @@ -225,7 +207,7 @@ func (r *PeeringDialerController) updateStatus(ctx context.Context, dialerObjKey } dialer.Status.LastSyncedTime = &metav1.Time{Time: time.Now()} dialer.SetSyncedCondition(corev1.ConditionTrue, "", "") - if peeringVersionString, ok := dialer.Annotations[constants.AnnotationPeeringVersion]; ok { + if peeringVersionString, ok := dialer.Annotations[annotationPeeringVersion]; ok { peeringVersion, err := strconv.ParseUint(peeringVersionString, 10, 64) if err != nil { r.Log.Error(err, "failed to update PeeringDialer status", "name", dialer.Name, "namespace", dialer.Namespace) @@ -276,12 +258,12 @@ func (r *PeeringDialerController) SetupWithManager(mgr ctrl.Manager) error { } // establishPeering is a helper function that calls the Consul api to generate a token for the peer. -func (r *PeeringDialerController) establishPeering(ctx context.Context, apiClient *api.Client, peerName string, peeringToken string) error { +func (r *PeeringDialerController) establishPeering(ctx context.Context, peerName string, peeringToken string) error { req := api.PeeringEstablishRequest{ PeerName: peerName, PeeringToken: peeringToken, } - _, _, err := apiClient.Peerings().Establish(ctx, req, nil) + _, _, err := r.ConsulClient.Peerings().Establish(ctx, req, nil) if err != nil { r.Log.Error(err, "failed to initiate peering", "err", err) return err @@ -290,8 +272,8 @@ func (r *PeeringDialerController) establishPeering(ctx context.Context, apiClien } // deletePeering is a helper function that calls the Consul api to delete a peering. -func (r *PeeringDialerController) deletePeering(ctx context.Context, apiClient *api.Client, peerName string) error { - _, err := apiClient.Peerings().Delete(ctx, peerName, nil) +func (r *PeeringDialerController) deletePeering(ctx context.Context, peerName string) error { + _, err := r.ConsulClient.Peerings().Delete(ctx, peerName, nil) if err != nil { r.Log.Error(err, "failed to delete Peering from Consul", "name", peerName) return err @@ -300,7 +282,7 @@ func (r *PeeringDialerController) deletePeering(ctx context.Context, apiClient * } func (r *PeeringDialerController) versionAnnotationUpdated(dialer *consulv1alpha1.PeeringDialer) (bool, error) { - if peeringVersionString, ok := dialer.Annotations[constants.AnnotationPeeringVersion]; ok { + if peeringVersionString, ok := dialer.Annotations[annotationPeeringVersion]; ok { peeringVersion, err := strconv.ParseUint(peeringVersionString, 10, 64) if err != nil { return false, err @@ -342,7 +324,7 @@ func (r *PeeringDialerController) requestsForPeeringTokens(object client.Object) // the Secret is a Peering Token Secret. func (r *PeeringDialerController) filterPeeringDialers(object client.Object) bool { secretLabels := object.GetLabels() - isPeeringToken, ok := secretLabels[constants.LabelPeeringToken] + isPeeringToken, ok := secretLabels[labelPeeringToken] if !ok { return false } diff --git a/control-plane/connect-inject/controllers/peering/peering_dialer_controller_test.go b/control-plane/connect-inject/peering_dialer_controller_test.go similarity index 92% rename from control-plane/connect-inject/controllers/peering/peering_dialer_controller_test.go rename to control-plane/connect-inject/peering_dialer_controller_test.go index ba33afd765..62cb618d90 100644 --- a/control-plane/connect-inject/controllers/peering/peering_dialer_controller_test.go +++ b/control-plane/connect-inject/peering_dialer_controller_test.go @@ -1,4 +1,4 @@ -package peering +package connectinject import ( "context" @@ -8,14 +8,9 @@ import ( logrtest "github.com/go-logr/logr/testing" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" - "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,6 +26,8 @@ import ( // TestReconcile_CreateUpdatePeeringDialer creates a peering dialer. func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { t.Parallel() + nodeName := "test-node" + node2Name := "test-node2" cases := map[string]struct { peeringName string k8sObjects func() []runtime.Object @@ -203,7 +200,7 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { Name: "peering", Namespace: "default", Annotations: map[string]string{ - constants.AnnotationPeeringVersion: "2", + annotationPeeringVersion: "2", }, }, Spec: v1alpha1.PeeringDialerSpec{ @@ -253,10 +250,12 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { // Create test consul server. acceptorPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = nodeName // We set the datacenter because the server name, typically formatted as "server.." // must be unique on the acceptor and dialer peers. Otherwise the following consul error will be thrown: // https://github.com/hashicorp/consul/blob/74b87d49d33069a048aead7a86d85d4b4b6461b5/agent/rpc/peering/service.go#L491. c.Datacenter = "acceptor-dc" + c.Ports.HTTPS = 0 }) require.NoError(t, err) defer acceptorPeerServer.Stop() @@ -292,16 +291,23 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { } // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - dialerClient := testClient.APIClient + dialerPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = node2Name + }) + require.NoError(t, err) + defer dialerPeerServer.Stop() + dialerPeerServer.WaitForServiceIntentions(t) + + cfg = &api.Config{ + Address: dialerPeerServer.HTTPAddr, + } + dialerClient, err := api.NewClient(cfg) + require.NoError(t, err) // If the peering is supposed to already exist in Consul, then establish a peering with the existing token, so the peering will exist on the dialing side. if tt.peeringExists { - retry.Run(t, func(r *retry.R) { - _, _, err = dialerClient.Peerings().Establish(context.Background(), api.PeeringEstablishRequest{PeerName: tt.peeringName, PeeringToken: encodedPeeringToken}, nil) - require.NoError(r, err) - }) - + _, _, err := dialerClient.Peerings().Establish(context.Background(), api.PeeringEstablishRequest{PeerName: tt.peeringName, PeeringToken: encodedPeeringToken}, nil) + require.NoError(t, err) k8sObjects = append(k8sObjects, createSecret("dialer-token-old", "default", "token", "old-token")) // Create a new token to be used by Reconcile(). The original token has already been // used once to simulate establishing an existing peering. @@ -317,11 +323,10 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { // Create the peering dialer controller controller := &PeeringDialerController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: dialerClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "peering", @@ -355,7 +360,7 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { require.Equal(t, tt.expectedStatus.SecretRef.Backend, dialer.SecretRef().Backend) require.Equal(t, "latest-version", dialer.SecretRef().ResourceVersion) require.Equal(t, tt.expectedStatus.LatestPeeringVersion, dialer.Status.LatestPeeringVersion) - require.Contains(t, dialer.Finalizers, finalizerName) + require.Contains(t, dialer.Finalizers, FinalizerName) require.NotEmpty(t, dialer.SecretRef().ResourceVersion) require.NotEqual(t, "test-version", dialer.SecretRef().ResourceVersion) } @@ -366,6 +371,8 @@ func TestReconcile_CreateUpdatePeeringDialer(t *testing.T) { func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { t.Parallel() + nodeName := "test-node" + node2Name := "test-node2" cases := map[string]struct { annotations map[string]string expErr string @@ -373,13 +380,13 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { }{ "fails if annotation is not a number": { annotations: map[string]string{ - constants.AnnotationPeeringVersion: "foo", + annotationPeeringVersion: "foo", }, expErr: `strconv.ParseUint: parsing "foo": invalid syntax`, }, "is no/op if annotation value is less than value in status": { annotations: map[string]string{ - constants.AnnotationPeeringVersion: "2", + annotationPeeringVersion: "2", }, expectedStatus: &v1alpha1.PeeringDialerStatus{ SecretRef: &v1alpha1.SecretRefStatus{ @@ -394,7 +401,7 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { }, "is no/op if annotation value is equal to value in status": { annotations: map[string]string{ - constants.AnnotationPeeringVersion: "3", + annotationPeeringVersion: "3", }, expectedStatus: &v1alpha1.PeeringDialerStatus{ SecretRef: &v1alpha1.SecretRefStatus{ @@ -409,7 +416,7 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { }, "updates if annotation value is greater than value in status": { annotations: map[string]string{ - constants.AnnotationPeeringVersion: "4", + annotationPeeringVersion: "4", }, expectedStatus: &v1alpha1.PeeringDialerStatus{ SecretRef: &v1alpha1.SecretRefStatus{ @@ -428,12 +435,11 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { // Create test consul server. acceptorPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - // We set different cluster id for the connect CA because the server name, - // typically formatted as server.dc1.peering..consul + c.NodeName = nodeName + // We set the datacenter because the server name, typically formatted as "server.." // must be unique on the acceptor and dialer peers. - c.Connect["ca_config"] = map[string]interface{}{ - "cluster_id": "00000000-2222-3333-4444-555555555555", - } + c.Datacenter = "acceptor-dc" + c.Ports.HTTPS = 0 }) require.NoError(t, err) defer acceptorPeerServer.Stop() @@ -483,34 +489,22 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { require.NoError(t, err) // Create test consul server. - var testServerCfg *testutil.TestServerConfig dialerPeerServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - testServerCfg = c + c.NodeName = node2Name }) require.NoError(t, err) defer dialerPeerServer.Stop() dialerPeerServer.WaitForServiceIntentions(t) - consulConfig := &consul.Config{ - APIClientConfig: &api.Config{Address: dialerPeerServer.HTTPAddr}, - HTTPPort: testServerCfg.Ports.HTTP, + cfg = &api.Config{ + Address: dialerPeerServer.HTTPAddr, } - dialerClient, err := api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - watcher, err := discovery.NewWatcher(ctx, discovery.Config{Addresses: "127.0.0.1", GRPCPort: testServerCfg.Ports.GRPC}, hclog.NewNullLogger()) + dialerClient, err := api.NewClient(cfg) require.NoError(t, err) - t.Cleanup(watcher.Stop) - go watcher.Run() // Establish a peering with the generated token. - retry.Run(t, func(r *retry.R) { - _, _, err = dialerClient.Peerings().Establish(context.Background(), api.PeeringEstablishRequest{PeerName: "peering", PeeringToken: generatedToken.PeeringToken}, nil) - require.NoError(r, err) - }) - + _, _, err = dialerClient.Peerings().Establish(context.Background(), api.PeeringEstablishRequest{PeerName: "peering", PeeringToken: generatedToken.PeeringToken}, nil) + require.NoError(t, err) k8sObjects = append(k8sObjects, createSecret("dialer-token-old", "default", "token", "old-token")) // Create a new token to be potentially used by Reconcile(). The original token has already been @@ -527,11 +521,10 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { // Create the peering dialer controller controller := &PeeringDialerController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: watcher, - Scheme: s, + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: dialerClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "peering", @@ -724,7 +717,7 @@ func TestReconcileDeletePeeringDialer(t *testing.T) { Name: "dialer-deleted", Namespace: "default", DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{finalizerName}, + Finalizers: []string{FinalizerName}, }, Spec: v1alpha1.PeeringDialerSpec{ Peer: &v1alpha1.Peer{ @@ -742,20 +735,29 @@ func TestReconcileDeletePeeringDialer(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(k8sObjects...).Build() // Create test consul server. - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.NodeName = "test-node" + }) + require.NoError(t, err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + + cfg := &api.Config{ + Address: consul.HTTPAddr, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) // Add the initial peerings into Consul by calling the Generate token endpoint. - _, _, err := consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "dialer-deleted"}, nil) + _, _, err = consulClient.Peerings().GenerateToken(context.Background(), api.PeeringGenerateTokenRequest{PeerName: "dialer-deleted"}, nil) require.NoError(t, err) // Create the peering dialer controller. pdc := &PeeringDialerController{ - Client: fakeClient, - Log: logrtest.TestLogger{T: t}, - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - Scheme: s, + Client: fakeClient, + Log: logrtest.TestLogger{T: t}, + ConsulClient: consulClient, + Scheme: s, } namespacedName := types.NamespacedName{ Name: "dialer-deleted", @@ -937,7 +939,7 @@ func TestDialerUpdateStatusError(t *testing.T) { { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionFalse, - Reason: internalError, + Reason: InternalError, Message: "this is an error", }, }, @@ -974,7 +976,7 @@ func TestDialerUpdateStatusError(t *testing.T) { { Type: v1alpha1.ConditionSynced, Status: corev1.ConditionFalse, - Reason: internalError, + Reason: InternalError, Message: "this is an error", }, }, @@ -1000,7 +1002,7 @@ func TestDialerUpdateStatusError(t *testing.T) { Scheme: s, } - controller.updateStatusError(context.Background(), tt.dialer, internalError, tt.reconcileErr) + controller.updateStatusError(context.Background(), tt.dialer, InternalError, tt.reconcileErr) dialer := &v1alpha1.PeeringDialer{} dialerName := types.NamespacedName{ @@ -1027,7 +1029,7 @@ func TestDialer_FilterPeeringDialers(t *testing.T) { Name: "test", Namespace: "test", Labels: map[string]string{ - constants.LabelPeeringToken: "true", + labelPeeringToken: "true", }, }, }, @@ -1039,7 +1041,7 @@ func TestDialer_FilterPeeringDialers(t *testing.T) { Name: "test", Namespace: "test", Labels: map[string]string{ - constants.LabelPeeringToken: "false", + labelPeeringToken: "false", }, }, }, @@ -1051,7 +1053,7 @@ func TestDialer_FilterPeeringDialers(t *testing.T) { Name: "test", Namespace: "test", Labels: map[string]string{ - constants.LabelPeeringToken: "foo", + labelPeeringToken: "foo", }, }, }, diff --git a/control-plane/connect-inject/webhook/redirect_traffic.go b/control-plane/connect-inject/redirect_traffic.go similarity index 56% rename from control-plane/connect-inject/webhook/redirect_traffic.go rename to control-plane/connect-inject/redirect_traffic.go index eab23a2b91..71aebf54e7 100644 --- a/control-plane/connect-inject/webhook/redirect_traffic.go +++ b/control-plane/connect-inject/redirect_traffic.go @@ -1,67 +1,65 @@ -package webhook +package connectinject import ( "encoding/json" "fmt" + "os" "strconv" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" "github.com/hashicorp/consul/sdk/iptables" corev1 "k8s.io/api/core/v1" ) -// addRedirectTrafficConfigAnnotation creates an iptables.Config in JSON format based on proxy configuration. +// addRedirectTrafficConfigAnnotation creates an iptables.Config based on proxy configuration. // iptables.Config: -// -// ConsulDNSIP: an environment variable named RESOURCE_PREFIX_DNS_SERVICE_HOST where RESOURCE_PREFIX is the consul.fullname in helm. -// ProxyUserID: a constant set in Annotations -// ProxyInboundPort: the service port or bind port -// ProxyOutboundPort: default transparent proxy outbound port or transparent proxy outbound listener port -// ExcludeInboundPorts: prometheus, envoy stats, expose paths, checks and excluded pod annotations -// ExcludeOutboundPorts: pod annotations -// ExcludeOutboundCIDRs: pod annotations -// ExcludeUIDs: pod annotations -func (w *MeshWebhook) iptablesConfigJSON(pod corev1.Pod, ns corev1.Namespace) (string, error) { +// ConsulDNSIP: an environment variable named RESOURCE_PREFIX_DNS_SERVICE_HOST where RESOURCE_PREFIX is the consul.fullname in helm. +// ProxyUserID: a constant set in Annotations +// ProxyInboundPort: the service port or bind port +// ProxyOutboundPort: default transparent proxy outbound port or transparent proxy outbound listener port +// ExcludeInboundPorts: prometheus, envoy stats, expose paths, checks and excluded pod annotations +// ExcludeOutboundPorts: pod annotations +// ExcludeOutboundCIDRs: pod annotations +// ExcludeUIDs: pod annotations +func (w *MeshWebhook) addRedirectTrafficConfigAnnotation(pod *corev1.Pod, ns corev1.Namespace) error { cfg := iptables.Config{ - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), } // Set the proxy's inbound port. - cfg.ProxyInboundPort = constants.ProxyDefaultInboundPort + cfg.ProxyInboundPort = proxyDefaultInboundPort // Set the proxy's outbound port. cfg.ProxyOutboundPort = iptables.DefaultTProxyOutboundPort // If metrics are enabled, get the prometheusScrapePort and exclude it from the inbound ports - enableMetrics, err := w.MetricsConfig.EnableMetrics(pod) + enableMetrics, err := w.MetricsConfig.enableMetrics(*pod) if err != nil { - return "", err + return err } if enableMetrics { - prometheusScrapePort, err := w.MetricsConfig.PrometheusScrapePort(pod) + prometheusScrapePort, err := w.MetricsConfig.prometheusScrapePort(*pod) if err != nil { - return "", err + return err } cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, prometheusScrapePort) } - // Exclude any overwritten liveness/readiness/startup ports from redirection. - overwriteProbes, err := common.ShouldOverwriteProbes(pod, w.TProxyOverwriteProbes) - if err != nil { - return "", err - } - // Exclude the port on which the proxy health check port will be configured if // using the proxy health check for a service. - if useProxyHealthCheck(pod) { - cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, strconv.Itoa(constants.ProxyDefaultHealthPort)) + if useProxyHealthCheck(*pod) { + cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, strconv.Itoa(proxyDefaultHealthPort)) + } + + // Exclude any overwritten liveness/readiness/startup ports from redirection. + overwriteProbes, err := shouldOverwriteProbes(*pod, w.TProxyOverwriteProbes) + if err != nil { + return err } if overwriteProbes { for i, container := range pod.Spec.Containers { // skip the "envoy-sidecar" container from having its probes overridden - if container.Name == sidecarContainer { + if container.Name == envoySidecarContainer { continue } if container.LivenessProbe != nil && container.LivenessProbe.HTTPGet != nil { @@ -77,53 +75,47 @@ func (w *MeshWebhook) iptablesConfigJSON(pod corev1.Pod, ns corev1.Namespace) (s } // Inbound ports - excludeInboundPorts := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationTProxyExcludeInboundPorts, pod) + excludeInboundPorts := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeInboundPorts, *pod) cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, excludeInboundPorts...) // Outbound ports - excludeOutboundPorts := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationTProxyExcludeOutboundPorts, pod) + excludeOutboundPorts := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundPorts, *pod) cfg.ExcludeOutboundPorts = append(cfg.ExcludeOutboundPorts, excludeOutboundPorts...) // Outbound CIDRs - excludeOutboundCIDRs := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationTProxyExcludeOutboundCIDRs, pod) + excludeOutboundCIDRs := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeOutboundCIDRs, *pod) cfg.ExcludeOutboundCIDRs = append(cfg.ExcludeOutboundCIDRs, excludeOutboundCIDRs...) // UIDs - excludeUIDs := splitCommaSeparatedItemsFromAnnotation(constants.AnnotationTProxyExcludeUIDs, pod) + excludeUIDs := splitCommaSeparatedItemsFromAnnotation(annotationTProxyExcludeUIDs, *pod) cfg.ExcludeUIDs = append(cfg.ExcludeUIDs, excludeUIDs...) // Add init container user ID to exclude from traffic redirection. cfg.ExcludeUIDs = append(cfg.ExcludeUIDs, strconv.Itoa(initContainersUserAndGroupID)) - dnsEnabled, err := consulDNSEnabled(ns, pod, w.EnableConsulDNS) + dnsEnabled, err := consulDNSEnabled(ns, *pod, w.EnableConsulDNS) if err != nil { - return "", err + return err } + var consulDNSClusterIP string if dnsEnabled { // If Consul DNS is enabled, we find the environment variable that has the value // of the ClusterIP of the Consul DNS Service. constructDNSServiceHostName returns // the name of the env variable whose value is the ClusterIP of the Consul DNS Service. - cfg.ConsulDNSIP = consulDataplaneDNSBindHost - cfg.ConsulDNSPort = consulDataplaneDNSBindPort + consulDNSClusterIP = os.Getenv(w.constructDNSServiceHostName()) + if consulDNSClusterIP == "" { + return fmt.Errorf("environment variable %s not found", w.constructDNSServiceHostName()) + } + cfg.ConsulDNSIP = consulDNSClusterIP } iptablesConfigJson, err := json.Marshal(&cfg) if err != nil { - return "", fmt.Errorf("could not marshal iptables config: %w", err) - } - - return string(iptablesConfigJson), nil -} - -// addRedirectTrafficConfigAnnotation add the created iptables JSON config as an annotation on the provided pod. -func (w *MeshWebhook) addRedirectTrafficConfigAnnotation(pod *corev1.Pod, ns corev1.Namespace) error { - iptablesConfig, err := w.iptablesConfigJSON(*pod, ns) - if err != nil { - return err + return fmt.Errorf("could not marshal iptables config: %w", err) } - pod.Annotations[constants.AnnotationRedirectTraffic] = iptablesConfig + pod.Annotations[annotationRedirectTraffic] = string(iptablesConfigJson) return nil } diff --git a/control-plane/connect-inject/webhook/redirect_traffic_test.go b/control-plane/connect-inject/redirect_traffic_test.go similarity index 65% rename from control-plane/connect-inject/webhook/redirect_traffic_test.go rename to control-plane/connect-inject/redirect_traffic_test.go index 2ad9940fbe..1478786be5 100644 --- a/control-plane/connect-inject/webhook/redirect_traffic_test.go +++ b/control-plane/connect-inject/redirect_traffic_test.go @@ -1,15 +1,14 @@ -package webhook +package connectinject import ( "encoding/json" "fmt" + "os" "strconv" "testing" mapset "github.com/deckarep/golang-set" logrtest "github.com/go-logr/logr/testing" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul/sdk/iptables" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -23,6 +22,9 @@ import ( const ( defaultPodName = "fakePod" defaultNamespace = "default" + resourcePrefix = "CONSUL" + dnsEnvVariable = "CONSUL_DNS_SERVICE_HOST" + dnsIP = "127.0.0.1" ) func TestAddRedirectTrafficConfig(t *testing.T) { @@ -66,8 +68,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, }, @@ -85,7 +87,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationUseProxyHealthCheck: "true", + annotationUseProxyHealthCheck: "true", }, }, Spec: corev1.PodSpec{ @@ -98,8 +100,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, ExcludeInboundPorts: []string{"21000"}, @@ -118,8 +120,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationEnableMetrics: "true", - constants.AnnotationPrometheusScrapePort: "13373", + annotationEnableMetrics: "true", + annotationPrometheusScrapePort: "13373", }, }, Spec: corev1.PodSpec{ @@ -132,8 +134,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, ExcludeInboundPorts: []string{"13373"}, @@ -152,8 +154,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationEnableMetrics: "invalid", - constants.AnnotationPrometheusScrapePort: "13373", + annotationEnableMetrics: "invalid", + annotationPrometheusScrapePort: "13373", }, }, Spec: corev1.PodSpec{ @@ -166,13 +168,13 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, ExcludeInboundPorts: []string{"13373"}, }, - expErr: fmt.Errorf("%s annotation value of %s was invalid: %s", constants.AnnotationEnableMetrics, "invalid", "strconv.ParseBool: parsing \"invalid\": invalid syntax"), + expErr: fmt.Errorf("%s annotation value of %s was invalid: %s", annotationEnableMetrics, "invalid", "strconv.ParseBool: parsing \"invalid\": invalid syntax"), }, { name: "overwrite probes, transparent proxy annotation set", @@ -187,8 +189,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationTransparentProxyOverwriteProbes: "true", - constants.KeyTransparentProxy: "true", + annotationTransparentProxyOverwriteProbes: "true", + keyTransparentProxy: "true", }, }, Spec: corev1.PodSpec{ @@ -208,8 +210,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, ExcludeInboundPorts: []string{strconv.Itoa(exposedPathsLivenessPortsRangeStart)}, @@ -228,7 +230,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationTProxyExcludeInboundPorts: "1111,11111", + annotationTProxyExcludeInboundPorts: "1111,11111", }, }, Spec: corev1.PodSpec{ @@ -241,8 +243,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, ExcludeInboundPorts: []string{"1111", "11111"}, @@ -261,7 +263,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationTProxyExcludeOutboundPorts: "2222,22222", + annotationTProxyExcludeOutboundPorts: "2222,22222", }, }, Spec: corev1.PodSpec{ @@ -274,8 +276,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"5996"}, ExcludeOutboundPorts: []string{"2222", "22222"}, @@ -294,7 +296,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationTProxyExcludeOutboundCIDRs: "3.3.3.3,3.3.3.3/24", + annotationTProxyExcludeOutboundCIDRs: "3.3.3.3,3.3.3.3/24", }, }, Spec: corev1.PodSpec{ @@ -307,8 +309,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{strconv.Itoa(initContainersUserAndGroupID)}, ExcludeOutboundCIDRs: []string{"3.3.3.3", "3.3.3.3/24"}, @@ -327,7 +329,7 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationTProxyExcludeUIDs: "4444,44444", + annotationTProxyExcludeUIDs: "4444,44444", }, }, Spec: corev1.PodSpec{ @@ -340,8 +342,8 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, expCfg: iptables.Config{ ConsulDNSIP: "", - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeUIDs: []string{"4444", "44444", strconv.Itoa(initContainersUserAndGroupID)}, }, @@ -359,10 +361,10 @@ func TestAddRedirectTrafficConfig(t *testing.T) { Namespace: defaultNamespace, Name: defaultPodName, Annotations: map[string]string{ - constants.AnnotationTProxyExcludeInboundPorts: "1111,11111", - constants.AnnotationTProxyExcludeOutboundPorts: "2222,22222", - constants.AnnotationTProxyExcludeOutboundCIDRs: "3.3.3.3,3.3.3.3/24", - constants.AnnotationTProxyExcludeUIDs: "4444,44444", + annotationTProxyExcludeInboundPorts: "1111,11111", + annotationTProxyExcludeOutboundPorts: "2222,22222", + annotationTProxyExcludeOutboundCIDRs: "3.3.3.3,3.3.3.3/24", + annotationTProxyExcludeUIDs: "4444,44444", }, }, Spec: corev1.PodSpec{ @@ -374,8 +376,9 @@ func TestAddRedirectTrafficConfig(t *testing.T) { }, }, expCfg: iptables.Config{ - ProxyUserID: strconv.Itoa(sidecarUserAndGroupID), - ProxyInboundPort: constants.ProxyDefaultInboundPort, + ConsulDNSIP: "", + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, ExcludeInboundPorts: []string{"1111", "11111"}, ExcludeOutboundPorts: []string{"2222", "22222"}, @@ -383,94 +386,94 @@ func TestAddRedirectTrafficConfig(t *testing.T) { ExcludeUIDs: []string{"4444", "44444", strconv.Itoa(initContainersUserAndGroupID)}, }, }, + { + name: "dns enabled", + dnsEnabled: true, + webhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + ResourcePrefix: resourcePrefix, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + keyConsulDNS: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: dnsIP, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{strconv.Itoa(initContainersUserAndGroupID)}, + }, + }, + { + name: "dns annotation set but environment variable missing", + dnsEnabled: false, + webhook: MeshWebhook{ + Log: logrtest.TestLogger{T: t}, + AllowK8sNamespacesSet: mapset.NewSetWith("*"), + DenyK8sNamespacesSet: mapset.NewSet(), + decoder: decoder, + ResourcePrefix: resourcePrefix, + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultPodName, + Annotations: map[string]string{ + keyConsulDNS: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + expCfg: iptables.Config{ + ConsulDNSIP: dnsIP, + ProxyUserID: strconv.Itoa(envoyUserAndGroupID), + ProxyInboundPort: proxyDefaultInboundPort, + ProxyOutboundPort: iptables.DefaultTProxyOutboundPort, + ExcludeUIDs: []string{strconv.Itoa(initContainersUserAndGroupID)}, + }, + expErr: fmt.Errorf("environment variable %s not found", dnsEnvVariable), + }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - err = c.webhook.addRedirectTrafficConfigAnnotation(c.pod, c.namespace) + if c.dnsEnabled { + os.Setenv(dnsEnvVariable, dnsIP) + } else { + os.Setenv(dnsEnvVariable, "") + } + err := c.webhook.addRedirectTrafficConfigAnnotation(c.pod, c.namespace) + require.Equal(t, c.expErr, err) // Only compare annotation and iptables config on successful runs if c.expErr == nil { - require.NoError(t, err) - anno, ok := c.pod.Annotations[constants.AnnotationRedirectTraffic] + anno, ok := c.pod.Annotations[annotationRedirectTraffic] require.Equal(t, ok, true) actualConfig := iptables.Config{} - err = json.Unmarshal([]byte(anno), &actualConfig) - require.NoError(t, err) + json.Unmarshal([]byte(anno), &actualConfig) require.Equal(t, c.expCfg, actualConfig) - } else { - require.EqualError(t, err, c.expErr.Error()) - } - }) - } -} - -func TestRedirectTraffic_consulDNS(t *testing.T) { - cases := map[string]struct { - globalEnabled bool - annotations map[string]string - namespaceLabel map[string]string - expectConsulDNSConfig bool - }{ - "enabled globally, ns not set, annotation not provided": { - globalEnabled: true, - expectConsulDNSConfig: true, - }, - "enabled globally, ns not set, annotation is false": { - globalEnabled: true, - annotations: map[string]string{constants.KeyConsulDNS: "false"}, - expectConsulDNSConfig: false, - }, - "enabled globally, ns not set, annotation is true": { - globalEnabled: true, - annotations: map[string]string{constants.KeyConsulDNS: "true"}, - expectConsulDNSConfig: true, - }, - "disabled globally, ns not set, annotation not provided": { - expectConsulDNSConfig: false, - }, - "disabled globally, ns not set, annotation is false": { - annotations: map[string]string{constants.KeyConsulDNS: "false"}, - expectConsulDNSConfig: false, - }, - "disabled globally, ns not set, annotation is true": { - annotations: map[string]string{constants.KeyConsulDNS: "true"}, - expectConsulDNSConfig: true, - }, - "disabled globally, ns enabled, annotation not set": { - namespaceLabel: map[string]string{constants.KeyConsulDNS: "true"}, - expectConsulDNSConfig: true, - }, - "enabled globally, ns disabled, annotation not set": { - globalEnabled: true, - namespaceLabel: map[string]string{constants.KeyConsulDNS: "false"}, - expectConsulDNSConfig: false, - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - EnableConsulDNS: c.globalEnabled, - EnableTransparentProxy: true, - ConsulConfig: &consul.Config{HTTPPort: 8500}, - } - - pod := minimal() - pod.Annotations = c.annotations - - ns := testNS - ns.Labels = c.namespaceLabel - iptablesConfig, err := w.iptablesConfigJSON(*pod, ns) - require.NoError(t, err) - - actualConfig := iptables.Config{} - err = json.Unmarshal([]byte(iptablesConfig), &actualConfig) - require.NoError(t, err) - if c.expectConsulDNSConfig { - require.Equal(t, "127.0.0.1", actualConfig.ConsulDNSIP) - require.Equal(t, 8600, actualConfig.ConsulDNSPort) - } else { - require.Empty(t, actualConfig.ConsulDNSIP) } }) } diff --git a/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go b/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go deleted file mode 100644 index ad3333ba1b..0000000000 --- a/control-plane/connect-inject/webhook/consul_dataplane_sidecar.go +++ /dev/null @@ -1,432 +0,0 @@ -package webhook - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/google/shlex" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" -) - -const ( - consulDataplaneDNSBindHost = "127.0.0.1" - consulDataplaneDNSBindPort = 8600 -) - -func (w *MeshWebhook) consulDataplaneSidecar(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { - resources, err := w.sidecarResources(pod) - if err != nil { - return corev1.Container{}, err - } - - // Extract the service account token's volume mount. - var bearerTokenFile string - var saTokenVolumeMount corev1.VolumeMount - if w.AuthMethod != "" { - saTokenVolumeMount, bearerTokenFile, err = findServiceAccountVolumeMount(pod, mpi.serviceName) - if err != nil { - return corev1.Container{}, err - } - } - - multiPort := mpi.serviceName != "" - args, err := w.getContainerSidecarArgs(namespace, mpi, bearerTokenFile, pod) - if err != nil { - return corev1.Container{}, err - } - - containerName := sidecarContainer - if multiPort { - containerName = fmt.Sprintf("%s-%s", sidecarContainer, mpi.serviceName) - } - - var probe *corev1.Probe - if useProxyHealthCheck(pod) { - // If using the proxy health check for a service, configure an HTTP handler - // that queries the '/ready' endpoint of the proxy. - probe = &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(constants.ProxyDefaultHealthPort + mpi.serviceIndex), - Path: "/ready", - }, - }, - InitialDelaySeconds: 1, - } - } else { - probe = &corev1.Probe{ - Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(constants.ProxyDefaultInboundPort + mpi.serviceIndex), - }, - }, - InitialDelaySeconds: 1, - } - } - - container := corev1.Container{ - Name: containerName, - Image: w.ImageConsulDataplane, - Resources: resources, - // We need to set tmp dir to an ephemeral volume that we're mounting so that - // consul-dataplane can write files to it. Otherwise, it wouldn't be able to - // because we set file system to be read-only. - Env: []corev1.EnvVar{ - { - Name: "TMPDIR", - Value: "/consul/connect-inject", - }, - { - Name: "NODE_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "spec.nodeName", - }, - }, - }, - { - Name: "DP_SERVICE_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - }, - Args: args, - ReadinessProbe: probe, - } - - if w.AuthMethod != "" { - container.VolumeMounts = append(container.VolumeMounts, saTokenVolumeMount) - } - - if useProxyHealthCheck(pod) { - // Configure the Readiness Address for the proxy's health check to be the Pod IP. - container.Env = append(container.Env, corev1.EnvVar{ - Name: "DP_ENVOY_READY_BIND_ADDRESS", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, - }, - }) - // Configure the port on which the readiness probe will query the proxy for its health. - container.Ports = append(container.Ports, corev1.ContainerPort{ - Name: fmt.Sprintf("%s-%d", "proxy-health", mpi.serviceIndex), - ContainerPort: int32(constants.ProxyDefaultHealthPort + mpi.serviceIndex), - }) - } - - // Add any extra VolumeMounts. - if userVolMount, ok := pod.Annotations[constants.AnnotationConsulSidecarUserVolumeMount]; ok { - var volumeMounts []corev1.VolumeMount - err := json.Unmarshal([]byte(userVolMount), &volumeMounts) - if err != nil { - return corev1.Container{}, err - } - container.VolumeMounts = append(container.VolumeMounts, volumeMounts...) - } - - tproxyEnabled, err := common.TransparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) - if err != nil { - return corev1.Container{}, err - } - - // If not running in transparent proxy mode and in an OpenShift environment, - // skip setting the security context and let OpenShift set it for us. - // When transparent proxy is enabled, then consul-dataplane needs to run as our specific user - // so that traffic redirection will work. - if tproxyEnabled || !w.EnableOpenShift { - if pod.Spec.SecurityContext != nil { - // User container and consul-dataplane container cannot have the same UID. - if pod.Spec.SecurityContext.RunAsUser != nil && *pod.Spec.SecurityContext.RunAsUser == sidecarUserAndGroupID { - return corev1.Container{}, fmt.Errorf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID) - } - } - // Ensure that none of the user's containers have the same UID as consul-dataplane. At this point in injection the meshWebhook - // has only injected init containers so all containers defined in pod.Spec.Containers are from the user. - for _, c := range pod.Spec.Containers { - // User container and consul-dataplane container cannot have the same UID. - if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser == sidecarUserAndGroupID && c.Image != w.ImageConsulDataplane { - return corev1.Container{}, fmt.Errorf("container %q has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", c.Name, sidecarUserAndGroupID) - } - } - container.SecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), - } - } - - return container, nil -} - -func (w *MeshWebhook) getContainerSidecarArgs(namespace corev1.Namespace, mpi multiPortInfo, bearerTokenFile string, pod corev1.Pod) ([]string, error) { - proxyIDFileName := "/consul/connect-inject/proxyid" - if mpi.serviceName != "" { - proxyIDFileName = fmt.Sprintf("/consul/connect-inject/proxyid-%s", mpi.serviceName) - } - - envoyConcurrency := w.DefaultEnvoyProxyConcurrency - - // Check to see if the user has overriden concurrency via an annotation. - if envoyConcurrencyAnnotation, ok := pod.Annotations[constants.AnnotationEnvoyProxyConcurrency]; ok { - val, err := strconv.ParseUint(envoyConcurrencyAnnotation, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse annotation %q: %w", constants.AnnotationEnvoyProxyConcurrency, err) - } - envoyConcurrency = int(val) - } - - args := []string{ - "-addresses", w.ConsulAddress, - "-grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort), - "-proxy-service-id-path=" + proxyIDFileName, - "-log-level=" + w.LogLevel, - "-log-json=" + strconv.FormatBool(w.LogJSON), - "-envoy-concurrency=" + strconv.Itoa(envoyConcurrency), - } - - if w.SkipServerWatch { - args = append(args, "-server-watch-disabled=true") - } - - if w.AuthMethod != "" { - args = append(args, - "-credential-type=login", - "-login-auth-method="+w.AuthMethod, - "-login-bearer-token-path="+bearerTokenFile, - "-login-meta="+fmt.Sprintf("pod=%s/%s", namespace.Name, pod.Name), - ) - if w.EnableNamespaces { - if w.EnableK8SNSMirroring { - args = append(args, "-login-namespace=default") - } else { - args = append(args, "-login-namespace="+w.consulNamespace(namespace.Name)) - } - } - if w.ConsulPartition != "" { - args = append(args, "-login-partition="+w.ConsulPartition) - } - } - if w.EnableNamespaces { - args = append(args, "-service-namespace="+w.consulNamespace(namespace.Name)) - } - if w.ConsulPartition != "" { - args = append(args, "-service-partition="+w.ConsulPartition) - } - if w.TLSEnabled { - if w.ConsulTLSServerName != "" { - args = append(args, "-tls-server-name="+w.ConsulTLSServerName) - } - if w.ConsulCACert != "" { - args = append(args, "-ca-certs="+constants.ConsulCAFile) - } - } else { - args = append(args, "-tls-disabled") - } - - // Configure the readiness port on the dataplane sidecar if proxy health checks are enabled. - if useProxyHealthCheck(pod) { - args = append(args, fmt.Sprintf("%s=%d", "-envoy-ready-bind-port", constants.ProxyDefaultHealthPort+mpi.serviceIndex)) - } - - if mpi.serviceName != "" { - args = append(args, fmt.Sprintf("-envoy-admin-bind-port=%d", 19000+mpi.serviceIndex)) - } - - // Set a default scrape path that can be overwritten by the annotation. - prometheusScrapePath := w.MetricsConfig.PrometheusScrapePath(pod) - args = append(args, "-telemetry-prom-scrape-path="+prometheusScrapePath) - - metricsServer, err := w.MetricsConfig.ShouldRunMergedMetricsServer(pod) - if err != nil { - return nil, fmt.Errorf("unable to determine if merged metrics is enabled: %w", err) - } - if metricsServer { - mergedMetricsPort, err := w.MetricsConfig.MergedMetricsPort(pod) - if err != nil { - return nil, fmt.Errorf("unable to determine if merged metrics port: %w", err) - } - args = append(args, "-telemetry-prom-merge-port="+mergedMetricsPort) - - serviceMetricsPath := w.MetricsConfig.ServiceMetricsPath(pod) - serviceMetricsPort, err := w.MetricsConfig.ServiceMetricsPort(pod) - if err != nil { - return nil, fmt.Errorf("unable to determine if service metrics port: %w", err) - } - - if serviceMetricsPath != "" && serviceMetricsPort != "" { - args = append(args, "-telemetry-prom-service-metrics-url="+fmt.Sprintf("http://127.0.0.1:%s%s", serviceMetricsPort, serviceMetricsPath)) - } - - // Pull the TLS config from the relevant annotations. - var prometheusCAFile string - if raw, ok := pod.Annotations[constants.AnnotationPrometheusCAFile]; ok && raw != "" { - prometheusCAFile = raw - } - - var prometheusCAPath string - if raw, ok := pod.Annotations[constants.AnnotationPrometheusCAPath]; ok && raw != "" { - prometheusCAPath = raw - } - - var prometheusCertFile string - if raw, ok := pod.Annotations[constants.AnnotationPrometheusCertFile]; ok && raw != "" { - prometheusCertFile = raw - } - - var prometheusKeyFile string - if raw, ok := pod.Annotations[constants.AnnotationPrometheusKeyFile]; ok && raw != "" { - prometheusKeyFile = raw - } - - // Validate required Prometheus TLS config is present if set. - if prometheusCAFile != "" || prometheusCAPath != "" || prometheusCertFile != "" || prometheusKeyFile != "" { - if prometheusCAFile == "" && prometheusCAPath == "" { - return nil, fmt.Errorf("must set one of %q or %q when providing prometheus TLS config", constants.AnnotationPrometheusCAFile, constants.AnnotationPrometheusCAPath) - } - if prometheusCertFile == "" { - return nil, fmt.Errorf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusCertFile) - } - if prometheusKeyFile == "" { - return nil, fmt.Errorf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusKeyFile) - } - // TLS config has been validated, add them to the consul-dataplane cmd args - args = append(args, "-telemetry-prom-ca-certs-file="+prometheusCAFile, - "-telemetry-prom-ca-certs-path="+prometheusCAPath, - "-telemetry-prom-cert-file="+prometheusCertFile, - "-telemetry-prom-key-file="+prometheusKeyFile) - } - } - - // If Consul DNS is enabled, we want to configure consul-dataplane to be the DNS proxy - // for Consul DNS in the pod. - if w.EnableConsulDNS { - args = append(args, "-consul-dns-bind-port="+strconv.Itoa(consulDataplaneDNSBindPort)) - } - - var envoyExtraArgs []string - extraArgs, annotationSet := pod.Annotations[constants.AnnotationEnvoyExtraArgs] - // --base-id is an envoy arg rather than consul-dataplane, and so we need to make sure we're passing it - // last separated by the --. - if mpi.serviceName != "" { - // --base-id is needed so multiple Envoy proxies can run on the same host. - envoyExtraArgs = append(envoyExtraArgs, "--base-id", fmt.Sprintf("%d", mpi.serviceIndex)) - } - - if annotationSet || w.EnvoyExtraArgs != "" { - extraArgsToUse := w.EnvoyExtraArgs - - // Prefer args set by pod annotation over the flag to the consul-k8s binary (h.EnvoyExtraArgs). - if annotationSet { - extraArgsToUse = extraArgs - } - - // Split string into tokens. - // e.g. "--foo bar --boo baz" --> ["--foo", "bar", "--boo", "baz"] - tokens, err := shlex.Split(extraArgsToUse) - if err != nil { - return []string{}, err - } - for _, t := range tokens { - if strings.Contains(t, " ") { - t = strconv.Quote(t) - } - envoyExtraArgs = append(envoyExtraArgs, t) - } - } - if envoyExtraArgs != nil { - args = append(args, "--") - args = append(args, envoyExtraArgs...) - } - return args, nil -} - -func (w *MeshWebhook) sidecarResources(pod corev1.Pod) (corev1.ResourceRequirements, error) { - resources := corev1.ResourceRequirements{ - Limits: corev1.ResourceList{}, - Requests: corev1.ResourceList{}, - } - // zeroQuantity is used for comparison to see if a quantity was explicitly - // set. - var zeroQuantity resource.Quantity - - // NOTE: We only want to set the limit/request if the default or annotation - // was explicitly set. If it's not explicitly set, it will be the zero value - // which would show up in the pod spec as being explicitly set to zero if we - // set that key, e.g. "cpu" to zero. - // We want it to not show up in the pod spec at all if it's not explicitly - // set so that users aren't wondering why it's set to 0 when they didn't specify - // a request/limit. If they have explicitly set it to 0 then it will be set - // to 0 in the pod spec because we're doing a comparison to the zero-valued - // struct. - - // CPU Limit. - if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyCPULimit]; ok { - cpuLimit, err := resource.ParseQuantity(anno) - if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyCPULimit, anno, err) - } - resources.Limits[corev1.ResourceCPU] = cpuLimit - } else if w.DefaultProxyCPULimit != zeroQuantity { - resources.Limits[corev1.ResourceCPU] = w.DefaultProxyCPULimit - } - - // CPU Request. - if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyCPURequest]; ok { - cpuRequest, err := resource.ParseQuantity(anno) - if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyCPURequest, anno, err) - } - resources.Requests[corev1.ResourceCPU] = cpuRequest - } else if w.DefaultProxyCPURequest != zeroQuantity { - resources.Requests[corev1.ResourceCPU] = w.DefaultProxyCPURequest - } - - // Memory Limit. - if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyMemoryLimit]; ok { - memoryLimit, err := resource.ParseQuantity(anno) - if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyMemoryLimit, anno, err) - } - resources.Limits[corev1.ResourceMemory] = memoryLimit - } else if w.DefaultProxyMemoryLimit != zeroQuantity { - resources.Limits[corev1.ResourceMemory] = w.DefaultProxyMemoryLimit - } - - // Memory Request. - if anno, ok := pod.Annotations[constants.AnnotationSidecarProxyMemoryRequest]; ok { - memoryRequest, err := resource.ParseQuantity(anno) - if err != nil { - return corev1.ResourceRequirements{}, fmt.Errorf("parsing annotation %s:%q: %s", constants.AnnotationSidecarProxyMemoryRequest, anno, err) - } - resources.Requests[corev1.ResourceMemory] = memoryRequest - } else if w.DefaultProxyMemoryRequest != zeroQuantity { - resources.Requests[corev1.ResourceMemory] = w.DefaultProxyMemoryRequest - } - - return resources, nil -} - -// useProxyHealthCheck returns true if the pod has the annotation 'consul.hashicorp.com/use-proxy-health-check' -// set to truthy values. -func useProxyHealthCheck(pod corev1.Pod) bool { - if v, ok := pod.Annotations[constants.AnnotationUseProxyHealthCheck]; ok { - useProxyHealthCheck, err := strconv.ParseBool(v) - if err != nil { - return false - } - return useProxyHealthCheck - } - return false -} diff --git a/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go b/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go deleted file mode 100644 index 37aa1619bf..0000000000 --- a/control-plane/connect-inject/webhook/consul_dataplane_sidecar_test.go +++ /dev/null @@ -1,1201 +0,0 @@ -package webhook - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" -) - -const nodeName = "test-node" - -func TestHandlerConsulDataplaneSidecar(t *testing.T) { - cases := map[string]struct { - webhookSetupFunc func(w *MeshWebhook) - additionalExpCmdArgs string - }{ - "default": { - webhookSetupFunc: nil, - additionalExpCmdArgs: " -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with custom gRPC port": { - webhookSetupFunc: func(w *MeshWebhook) { - w.ConsulConfig.GRPCPort = 8602 - }, - additionalExpCmdArgs: " -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with ACLs": { - webhookSetupFunc: func(w *MeshWebhook) { - w.AuthMethod = "test-auth-method" - }, - additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + - "-login-meta=pod=k8snamespace/test-pod -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with ACLs and namespace mirroring": { - webhookSetupFunc: func(w *MeshWebhook) { - w.AuthMethod = "test-auth-method" - w.EnableNamespaces = true - w.EnableK8SNSMirroring = true - }, - additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + - "-login-meta=pod=k8snamespace/test-pod -login-namespace=default -service-namespace=k8snamespace -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with ACLs and single destination namespace": { - webhookSetupFunc: func(w *MeshWebhook) { - w.AuthMethod = "test-auth-method" - w.EnableNamespaces = true - w.ConsulDestinationNamespace = "test-ns" - }, - additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + - "-login-meta=pod=k8snamespace/test-pod -login-namespace=test-ns -service-namespace=test-ns -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with ACLs and partitions": { - webhookSetupFunc: func(w *MeshWebhook) { - w.AuthMethod = "test-auth-method" - w.ConsulPartition = "test-part" - }, - additionalExpCmdArgs: " -credential-type=login -login-auth-method=test-auth-method -login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token " + - "-login-meta=pod=k8snamespace/test-pod -login-partition=test-part -service-partition=test-part -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with TLS and CA cert provided": { - webhookSetupFunc: func(w *MeshWebhook) { - w.TLSEnabled = true - w.ConsulTLSServerName = "server.dc1.consul" - w.ConsulCACert = "consul-ca-cert" - }, - additionalExpCmdArgs: " -tls-server-name=server.dc1.consul -ca-certs=/consul/connect-inject/consul-ca.pem -telemetry-prom-scrape-path=/metrics", - }, - "with TLS and no CA cert provided": { - webhookSetupFunc: func(w *MeshWebhook) { - w.TLSEnabled = true - w.ConsulTLSServerName = "server.dc1.consul" - }, - additionalExpCmdArgs: " -tls-server-name=server.dc1.consul -telemetry-prom-scrape-path=/metrics", - }, - "with single destination namespace": { - webhookSetupFunc: func(w *MeshWebhook) { - w.EnableNamespaces = true - w.ConsulDestinationNamespace = "consul-namespace" - }, - additionalExpCmdArgs: " -service-namespace=consul-namespace -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with namespace mirroring": { - webhookSetupFunc: func(w *MeshWebhook) { - w.EnableNamespaces = true - w.EnableK8SNSMirroring = true - }, - additionalExpCmdArgs: " -service-namespace=k8snamespace -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with namespace mirroring prefix": { - webhookSetupFunc: func(w *MeshWebhook) { - w.EnableNamespaces = true - w.EnableK8SNSMirroring = true - w.K8SNSMirroringPrefix = "foo-" - }, - additionalExpCmdArgs: " -service-namespace=foo-k8snamespace -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with partitions": { - webhookSetupFunc: func(w *MeshWebhook) { - w.ConsulPartition = "partition-1" - }, - additionalExpCmdArgs: " -service-partition=partition-1 -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with different log level": { - webhookSetupFunc: func(w *MeshWebhook) { - w.LogLevel = "debug" - }, - additionalExpCmdArgs: " -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "with different log level and log json": { - webhookSetupFunc: func(w *MeshWebhook) { - w.LogLevel = "debug" - w.LogJSON = true - }, - additionalExpCmdArgs: " -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "skip server watch enabled": { - webhookSetupFunc: func(w *MeshWebhook) { - w.SkipServerWatch = true - }, - additionalExpCmdArgs: " -server-watch-disabled=true -tls-disabled -telemetry-prom-scrape-path=/metrics", - }, - "custom prometheus scrape path": { - webhookSetupFunc: func(w *MeshWebhook) { - w.MetricsConfig.DefaultPrometheusScrapePath = "/scrape-path" // Simulate what would be passed as a flag - }, - additionalExpCmdArgs: " -tls-disabled -telemetry-prom-scrape-path=/scrape-path", - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - w := &MeshWebhook{ - ConsulAddress: "1.1.1.1", - ConsulConfig: &consul.Config{GRPCPort: 8502}, - LogLevel: "info", - LogJSON: false, - } - if c.webhookSetupFunc != nil { - c.webhookSetupFunc(w) - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Annotations: map[string]string{ - constants.AnnotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - { - Name: "auth-method-secret", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }, - }, - }, - ServiceAccountName: "web", - NodeName: nodeName, - }, - } - - container, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.NoError(t, err) - expCmd := "-addresses 1.1.1.1 -grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort) + - " -proxy-service-id-path=/consul/connect-inject/proxyid " + - "-log-level=" + w.LogLevel + " -log-json=" + strconv.FormatBool(w.LogJSON) + " -envoy-concurrency=0" + c.additionalExpCmdArgs - require.Equal(t, expCmd, strings.Join(container.Args, " ")) - - if w.AuthMethod != "" { - require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }) - } else { - require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - }) - } - - expectedProbe := &corev1.Probe{ - Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(constants.ProxyDefaultInboundPort), - }, - }, - InitialDelaySeconds: 1, - } - require.Equal(t, expectedProbe, container.ReadinessProbe) - require.Nil(t, container.StartupProbe) - require.Len(t, container.Env, 3) - require.Equal(t, container.Env[0].Name, "TMPDIR") - require.Equal(t, container.Env[0].Value, "/consul/connect-inject") - require.Equal(t, container.Env[2].Name, "DP_SERVICE_NODE_NAME") - require.Equal(t, container.Env[2].Value, "$(NODE_NAME)-virtual") - }) - } -} - -func TestHandlerConsulDataplaneSidecar_Concurrency(t *testing.T) { - cases := map[string]struct { - annotations map[string]string - expFlags string - expErr string - }{ - "default settings, no annotations": { - annotations: map[string]string{ - constants.AnnotationService: "foo", - }, - expFlags: "-envoy-concurrency=0", - }, - "default settings, annotation override": { - annotations: map[string]string{ - constants.AnnotationService: "foo", - constants.AnnotationEnvoyProxyConcurrency: "42", - }, - expFlags: "-envoy-concurrency=42", - }, - "default settings, invalid concurrency annotation negative number": { - annotations: map[string]string{ - constants.AnnotationService: "foo", - constants.AnnotationEnvoyProxyConcurrency: "-42", - }, - expErr: "unable to parse annotation \"consul.hashicorp.com/consul-envoy-proxy-concurrency\": strconv.ParseUint: parsing \"-42\": invalid syntax", - }, - "default settings, not-parseable concurrency annotation": { - annotations: map[string]string{ - constants.AnnotationService: "foo", - constants.AnnotationEnvoyProxyConcurrency: "not-int", - }, - expErr: "unable to parse annotation \"consul.hashicorp.com/consul-envoy-proxy-concurrency\": strconv.ParseUint: parsing \"not-int\": invalid syntax", - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - h := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: c.annotations, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := h.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - if c.expErr != "" { - require.EqualError(t, err, c.expErr) - } else { - require.NoError(t, err) - require.Contains(t, strings.Join(container.Args, " "), c.expFlags) - } - }) - } -} - -func TestHandlerConsulDataplaneSidecar_DNSProxy(t *testing.T) { - h := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - EnableConsulDNS: true, - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{}, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := h.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.NoError(t, err) - require.Contains(t, container.Args, "-consul-dns-bind-port=8600") -} - -func TestHandlerConsulDataplaneSidecar_ProxyHealthCheck(t *testing.T) { - h := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - ConsulAddress: "1.1.1.1", - LogLevel: "info", - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationUseProxyHealthCheck: "true", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := h.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - expectedProbe := &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(21000), - Path: "/ready", - }, - }, - InitialDelaySeconds: 1, - } - require.NoError(t, err) - require.Contains(t, container.Args, "-envoy-ready-bind-port=21000") - require.Equal(t, expectedProbe, container.ReadinessProbe) - require.Contains(t, container.Env, corev1.EnvVar{ - Name: "DP_ENVOY_READY_BIND_ADDRESS", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, - }, - }) - require.Contains(t, container.Ports, corev1.ContainerPort{ - Name: "proxy-health-0", - ContainerPort: 21000, - }) -} - -func TestHandlerConsulDataplaneSidecar_ProxyHealthCheck_Multiport(t *testing.T) { - h := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - ConsulAddress: "1.1.1.1", - LogLevel: "info", - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Annotations: map[string]string{ - constants.AnnotationService: "web,web-admin", - constants.AnnotationUseProxyHealthCheck: "true", - }, - }, - - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "web-admin-service-account", - }, - }, - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - { - Name: "web-admin", - }, - { - Name: "web-admin-side", - }, - { - Name: "auth-method-secret", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }, - }, - }, - ServiceAccountName: "web", - }, - } - multiPortInfos := []multiPortInfo{ - { - serviceIndex: 0, - serviceName: "web", - }, - { - serviceIndex: 1, - serviceName: "web-admin", - }, - } - expectedArgs := []string{ - "-envoy-ready-bind-port=21000", - "-envoy-ready-bind-port=21001", - } - expectedProbe := []*corev1.Probe{ - { - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(21000), - Path: "/ready", - }, - }, - InitialDelaySeconds: 1, - }, - { - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(21001), - Path: "/ready", - }, - }, - InitialDelaySeconds: 1, - }, - } - expectedPort := []corev1.ContainerPort{ - { - Name: "proxy-health-0", - ContainerPort: 21000, - }, - { - Name: "proxy-health-1", - ContainerPort: 21001, - }, - } - expectedEnvVar := corev1.EnvVar{ - Name: "DP_ENVOY_READY_BIND_ADDRESS", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, - }, - } - for i, info := range multiPortInfos { - container, err := h.consulDataplaneSidecar(testNS, pod, info) - require.NoError(t, err) - require.Contains(t, container.Args, expectedArgs[i]) - require.Equal(t, expectedProbe[i], container.ReadinessProbe) - require.Contains(t, container.Ports, expectedPort[i]) - require.Contains(t, container.Env, expectedEnvVar) - } -} - -func TestHandlerConsulDataplaneSidecar_Multiport(t *testing.T) { - for _, aclsEnabled := range []bool{false, true} { - name := fmt.Sprintf("acls enabled: %t", aclsEnabled) - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - ConsulAddress: "1.1.1.1", - ConsulConfig: &consul.Config{GRPCPort: 8502}, - LogLevel: "info", - } - if aclsEnabled { - w.AuthMethod = "test-auth-method" - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Annotations: map[string]string{ - constants.AnnotationService: "web,web-admin", - }, - }, - - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "web-admin-service-account", - }, - }, - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - { - Name: "web-admin", - }, - { - Name: "web-admin-side", - }, - { - Name: "auth-method-secret", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }, - }, - }, - ServiceAccountName: "web", - }, - } - multiPortInfos := []multiPortInfo{ - { - serviceIndex: 0, - serviceName: "web", - }, - { - serviceIndex: 1, - serviceName: "web-admin", - }, - } - expArgs := []string{ - "-addresses 1.1.1.1 -grpc-port=8502 -proxy-service-id-path=/consul/connect-inject/proxyid-web " + - "-log-level=info -log-json=false -envoy-concurrency=0 -tls-disabled -envoy-admin-bind-port=19000 -telemetry-prom-scrape-path=/metrics -- --base-id 0", - "-addresses 1.1.1.1 -grpc-port=8502 -proxy-service-id-path=/consul/connect-inject/proxyid-web-admin " + - "-log-level=info -log-json=false -envoy-concurrency=0 -tls-disabled -envoy-admin-bind-port=19001 -telemetry-prom-scrape-path=/metrics -- --base-id 1", - } - if aclsEnabled { - expArgs = []string{ - "-addresses 1.1.1.1 -grpc-port=8502 -proxy-service-id-path=/consul/connect-inject/proxyid-web " + - "-log-level=info -log-json=false -envoy-concurrency=0 -credential-type=login -login-auth-method=test-auth-method " + - "-login-bearer-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token -login-meta=pod=k8snamespace/test-pod -tls-disabled -envoy-admin-bind-port=19000 -telemetry-prom-scrape-path=/metrics -- --base-id 0", - "-addresses 1.1.1.1 -grpc-port=8502 -proxy-service-id-path=/consul/connect-inject/proxyid-web-admin " + - "-log-level=info -log-json=false -envoy-concurrency=0 -credential-type=login -login-auth-method=test-auth-method " + - "-login-bearer-token-path=/consul/serviceaccount-web-admin/token -login-meta=pod=k8snamespace/test-pod -tls-disabled -envoy-admin-bind-port=19001 -telemetry-prom-scrape-path=/metrics -- --base-id 1", - } - } - expSAVolumeMounts := []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - { - Name: "web-admin-service-account", - MountPath: "/consul/serviceaccount-web-admin", - ReadOnly: true, - }, - } - - for i, expCmd := range expArgs { - container, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfos[i]) - require.NoError(t, err) - require.Equal(t, expCmd, strings.Join(container.Args, " ")) - - if w.AuthMethod != "" { - require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - expSAVolumeMounts[i], - }) - } else { - require.Equal(t, container.VolumeMounts, []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - }) - } - - port := constants.ProxyDefaultInboundPort + i - expectedProbe := &corev1.Probe{ - Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(port), - }, - }, - InitialDelaySeconds: 1, - } - require.Equal(t, expectedProbe, container.ReadinessProbe) - require.Nil(t, container.StartupProbe) - } - }) - } -} - -func TestHandlerConsulDataplaneSidecar_withSecurityContext(t *testing.T) { - cases := map[string]struct { - tproxyEnabled bool - openShiftEnabled bool - expSecurityContext *corev1.SecurityContext - }{ - "tproxy disabled; openshift disabled": { - tproxyEnabled: false, - openShiftEnabled: false, - expSecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), - }, - }, - "tproxy enabled; openshift disabled": { - tproxyEnabled: true, - openShiftEnabled: false, - expSecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), - }, - }, - "tproxy disabled; openshift enabled": { - tproxyEnabled: false, - openShiftEnabled: true, - expSecurityContext: nil, - }, - "tproxy enabled; openshift enabled": { - tproxyEnabled: true, - openShiftEnabled: true, - expSecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - RunAsGroup: pointer.Int64(sidecarUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), - }, - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - EnableTransparentProxy: c.tproxyEnabled, - EnableOpenShift: c.openShiftEnabled, - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - ec, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.NoError(t, err) - require.Equal(t, c.expSecurityContext, ec.SecurityContext) - }) - } -} - -// Test that if the user specifies a pod security context with the same uid as `sidecarUserAndGroupID` that we return -// an error to the meshWebhook. -func TestHandlerConsulDataplaneSidecar_FailsWithDuplicatePodSecurityContextUID(t *testing.T) { - require := require.New(t) - w := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - pod := corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - }, - }, - } - _, err := w.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - require.EqualError(err, fmt.Sprintf("pod's security context cannot have the same UID as consul-dataplane: %v", sidecarUserAndGroupID)) -} - -// Test that if the user specifies a container with security context with the same uid as `sidecarUserAndGroupID` that we -// return an error to the meshWebhook. If a container using the consul-dataplane image has the same uid, we don't return an error -// because in multiport pod there can be multiple consul-dataplane sidecars. -func TestHandlerConsulDataplaneSidecar_FailsWithDuplicateContainerSecurityContextUID(t *testing.T) { - cases := []struct { - name string - pod corev1.Pod - webhook MeshWebhook - expErr bool - expErrMessage string - }{ - { - name: "fails with non consul-dataplane image", - pod: corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - // Setting RunAsUser: 1 should succeed. - SecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(1), - }, - }, - { - Name: "app", - // Setting RunAsUser: 5995 should fail. - SecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - }, - Image: "not-consul-dataplane", - }, - }, - }, - }, - webhook: MeshWebhook{}, - expErr: true, - expErrMessage: fmt.Sprintf("container \"app\" has runAsUser set to the same UID \"%d\" as consul-dataplane which is not allowed", sidecarUserAndGroupID), - }, - { - name: "doesn't fail with envoy image", - pod: corev1.Pod{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - // Setting RunAsUser: 1 should succeed. - SecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(1), - }, - }, - { - Name: "sidecar", - // Setting RunAsUser: 5995 should succeed if the image matches h.ImageConsulDataplane. - SecurityContext: &corev1.SecurityContext{ - RunAsUser: pointer.Int64(sidecarUserAndGroupID), - }, - Image: "envoy", - }, - }, - }, - }, - webhook: MeshWebhook{ - ImageConsulDataplane: "envoy", - }, - expErr: false, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - tc.webhook.ConsulConfig = &consul.Config{HTTPPort: 8500, GRPCPort: 8502} - _, err := tc.webhook.consulDataplaneSidecar(testNS, tc.pod, multiPortInfo{}) - if tc.expErr { - require.EqualError(t, err, tc.expErrMessage) - } else { - require.NoError(t, err) - } - }) - } -} - -// Test that we can pass extra args to envoy via the extraEnvoyArgs flag -// or via pod annotations. When arguments are passed in both ways, the -// arguments set via pod annotations are used. -func TestHandlerConsulDataplaneSidecar_EnvoyExtraArgs(t *testing.T) { - cases := []struct { - name string - envoyExtraArgs string - pod *corev1.Pod - expectedExtraArgs string - }{ - { - name: "no extra options provided", - envoyExtraArgs: "", - pod: &corev1.Pod{}, - expectedExtraArgs: "", - }, - { - name: "via flag: extra log-level option", - envoyExtraArgs: "--log-level debug", - pod: &corev1.Pod{}, - expectedExtraArgs: "-- --log-level debug", - }, - { - name: "via flag: multiple arguments with quotes", - envoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - pod: &corev1.Pod{}, - expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - { - name: "via annotation: multiple arguments with quotes", - envoyExtraArgs: "", - pod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - }, - }, - expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - { - name: "via flag and annotation: should prefer setting via the annotation", - envoyExtraArgs: "this should be overwritten", - pod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - }, - }, - expectedExtraArgs: "-- --log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - h := MeshWebhook{ - ImageConsul: "hashicorp/consul:latest", - ImageConsulDataplane: "hashicorp/consul-k8s:latest", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - EnvoyExtraArgs: tc.envoyExtraArgs, - } - - c, err := h.consulDataplaneSidecar(testNS, *tc.pod, multiPortInfo{}) - require.NoError(t, err) - require.Contains(t, strings.Join(c.Args, " "), tc.expectedExtraArgs) - }) - } -} - -func TestHandlerConsulDataplaneSidecar_UserVolumeMounts(t *testing.T) { - cases := []struct { - name string - pod corev1.Pod - expectedContainerVolumeMounts []corev1.VolumeMount - expErr string - }{ - { - name: "able to set a sidecar container volume mount via annotation", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - constants.AnnotationConsulSidecarUserVolumeMount: "[{\"name\": \"tls-cert\", \"mountPath\": \"/custom/path\"}, {\"name\": \"tls-ca\", \"mountPath\": \"/custom/path2\"}]", - }, - }, - }, - expectedContainerVolumeMounts: []corev1.VolumeMount{ - { - Name: "consul-connect-inject-data", - MountPath: "/consul/connect-inject", - }, - { - Name: "tls-cert", - MountPath: "/custom/path", - }, - { - Name: "tls-ca", - MountPath: "/custom/path2", - }, - }, - }, - { - name: "invalid annotation results in error", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationEnvoyExtraArgs: "--log-level debug --admin-address-path \"/tmp/consul/foo bar\"", - constants.AnnotationConsulSidecarUserVolumeMount: "[abcdefg]", - }, - }, - }, - expErr: "invalid character 'a' looking ", - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - h := MeshWebhook{ - ImageConsul: "hashicorp/consul:latest", - ImageConsulDataplane: "hashicorp/consul-k8s:latest", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - c, err := h.consulDataplaneSidecar(testNS, tc.pod, multiPortInfo{}) - if tc.expErr == "" { - require.NoError(t, err) - require.Equal(t, tc.expectedContainerVolumeMounts, c.VolumeMounts) - } else { - require.Error(t, err) - require.Contains(t, err.Error(), tc.expErr) - } - }) - } -} - -func TestHandlerConsulDataplaneSidecar_Resources(t *testing.T) { - mem1 := resource.MustParse("100Mi") - mem2 := resource.MustParse("200Mi") - cpu1 := resource.MustParse("100m") - cpu2 := resource.MustParse("200m") - zero := resource.MustParse("0") - - cases := map[string]struct { - webhook MeshWebhook - annotations map[string]string - expResources corev1.ResourceRequirements - expErr string - }{ - "no defaults, no annotations": { - webhook: MeshWebhook{}, - annotations: nil, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{}, - Requests: corev1.ResourceList{}, - }, - }, - "all defaults, no annotations": { - webhook: MeshWebhook{ - DefaultProxyCPURequest: cpu1, - DefaultProxyCPULimit: cpu2, - DefaultProxyMemoryRequest: mem1, - DefaultProxyMemoryLimit: mem2, - }, - annotations: nil, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: cpu2, - corev1.ResourceMemory: mem2, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: cpu1, - corev1.ResourceMemory: mem1, - }, - }, - }, - "no defaults, all annotations": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - constants.AnnotationSidecarProxyCPURequest: "100m", - constants.AnnotationSidecarProxyMemoryRequest: "100Mi", - constants.AnnotationSidecarProxyCPULimit: "200m", - constants.AnnotationSidecarProxyMemoryLimit: "200Mi", - }, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: cpu2, - corev1.ResourceMemory: mem2, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: cpu1, - corev1.ResourceMemory: mem1, - }, - }, - }, - "annotations override defaults": { - webhook: MeshWebhook{ - DefaultProxyCPURequest: zero, - DefaultProxyCPULimit: zero, - DefaultProxyMemoryRequest: zero, - DefaultProxyMemoryLimit: zero, - }, - annotations: map[string]string{ - constants.AnnotationSidecarProxyCPURequest: "100m", - constants.AnnotationSidecarProxyMemoryRequest: "100Mi", - constants.AnnotationSidecarProxyCPULimit: "200m", - constants.AnnotationSidecarProxyMemoryLimit: "200Mi", - }, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: cpu2, - corev1.ResourceMemory: mem2, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: cpu1, - corev1.ResourceMemory: mem1, - }, - }, - }, - "defaults set to zero, no annotations": { - webhook: MeshWebhook{ - DefaultProxyCPURequest: zero, - DefaultProxyCPULimit: zero, - DefaultProxyMemoryRequest: zero, - DefaultProxyMemoryLimit: zero, - }, - annotations: nil, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: zero, - corev1.ResourceMemory: zero, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: zero, - corev1.ResourceMemory: zero, - }, - }, - }, - "annotations set to 0": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - constants.AnnotationSidecarProxyCPURequest: "0", - constants.AnnotationSidecarProxyMemoryRequest: "0", - constants.AnnotationSidecarProxyCPULimit: "0", - constants.AnnotationSidecarProxyMemoryLimit: "0", - }, - expResources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: zero, - corev1.ResourceMemory: zero, - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: zero, - corev1.ResourceMemory: zero, - }, - }, - }, - "invalid cpu request": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - constants.AnnotationSidecarProxyCPURequest: "invalid", - }, - expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-request:\"invalid\": quantities must match the regular expression", - }, - "invalid cpu limit": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - constants.AnnotationSidecarProxyCPULimit: "invalid", - }, - expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-cpu-limit:\"invalid\": quantities must match the regular expression", - }, - "invalid memory request": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - constants.AnnotationSidecarProxyMemoryRequest: "invalid", - }, - expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-request:\"invalid\": quantities must match the regular expression", - }, - "invalid memory limit": { - webhook: MeshWebhook{}, - annotations: map[string]string{ - constants.AnnotationSidecarProxyMemoryLimit: "invalid", - }, - expErr: "parsing annotation consul.hashicorp.com/sidecar-proxy-memory-limit:\"invalid\": quantities must match the regular expression", - }, - } - - for name, c := range cases { - t.Run(name, func(tt *testing.T) { - c.webhook.ConsulConfig = &consul.Config{HTTPPort: 8500, GRPCPort: 8502} - require := require.New(tt) - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: c.annotations, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := c.webhook.consulDataplaneSidecar(testNS, pod, multiPortInfo{}) - if c.expErr != "" { - require.NotNil(err) - require.Contains(err.Error(), c.expErr) - } else { - require.NoError(err) - require.Equal(c.expResources, container.Resources) - } - }) - } -} - -func TestHandlerConsulDataplaneSidecar_Metrics(t *testing.T) { - cases := []struct { - name string - pod corev1.Pod - expCmdArgs string - expErr string - }{ - { - name: "default", - pod: corev1.Pod{}, - expCmdArgs: "", - }, - { - name: "turning on merged metrics", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "web", - constants.AnnotationEnableMetrics: "true", - constants.AnnotationEnableMetricsMerging: "true", - constants.AnnotationMergedMetricsPort: "20100", - constants.AnnotationPort: "1234", - constants.AnnotationPrometheusScrapePath: "/scrape-path", - }, - }, - }, - expCmdArgs: "-telemetry-prom-scrape-path=/scrape-path -telemetry-prom-merge-port=20100 -telemetry-prom-service-metrics-url=http://127.0.0.1:1234/metrics", - }, - { - name: "merged metrics with TLS enabled", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "web", - constants.AnnotationEnableMetrics: "true", - constants.AnnotationEnableMetricsMerging: "true", - constants.AnnotationMergedMetricsPort: "20100", - constants.AnnotationPort: "1234", - constants.AnnotationPrometheusScrapePath: "/scrape-path", - constants.AnnotationPrometheusCAFile: "/certs/ca.crt", - constants.AnnotationPrometheusCAPath: "/certs/ca", - constants.AnnotationPrometheusCertFile: "/certs/server.crt", - constants.AnnotationPrometheusKeyFile: "/certs/key.pem", - }, - }, - }, - expCmdArgs: "-telemetry-prom-scrape-path=/scrape-path -telemetry-prom-merge-port=20100 -telemetry-prom-service-metrics-url=http://127.0.0.1:1234/metrics -telemetry-prom-ca-certs-file=/certs/ca.crt -telemetry-prom-ca-certs-path=/certs/ca -telemetry-prom-cert-file=/certs/server.crt -telemetry-prom-key-file=/certs/key.pem", - }, - { - name: "merge metrics with TLS enabled, missing CA gives an error", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "web", - constants.AnnotationEnableMetrics: "true", - constants.AnnotationEnableMetricsMerging: "true", - constants.AnnotationMergedMetricsPort: "20100", - constants.AnnotationPort: "1234", - constants.AnnotationPrometheusScrapePath: "/scrape-path", - constants.AnnotationPrometheusCertFile: "/certs/server.crt", - constants.AnnotationPrometheusKeyFile: "/certs/key.pem", - }, - }, - }, - expCmdArgs: "", - expErr: fmt.Sprintf("must set one of %q or %q when providing prometheus TLS config", constants.AnnotationPrometheusCAFile, constants.AnnotationPrometheusCAPath), - }, - { - name: "merge metrics with TLS enabled, missing cert gives an error", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "web", - constants.AnnotationEnableMetrics: "true", - constants.AnnotationEnableMetricsMerging: "true", - constants.AnnotationMergedMetricsPort: "20100", - constants.AnnotationPort: "1234", - constants.AnnotationPrometheusScrapePath: "/scrape-path", - constants.AnnotationPrometheusCAFile: "/certs/ca.crt", - constants.AnnotationPrometheusKeyFile: "/certs/key.pem", - }, - }, - }, - expCmdArgs: "", - expErr: fmt.Sprintf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusCertFile), - }, - { - name: "merge metrics with TLS enabled, missing key file gives an error", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "web", - constants.AnnotationEnableMetrics: "true", - constants.AnnotationEnableMetricsMerging: "true", - constants.AnnotationMergedMetricsPort: "20100", - constants.AnnotationPort: "1234", - constants.AnnotationPrometheusScrapePath: "/scrape-path", - constants.AnnotationPrometheusCAPath: "/certs/ca", - constants.AnnotationPrometheusCertFile: "/certs/server.crt", - }, - }, - }, - expCmdArgs: "", - expErr: fmt.Sprintf("must set %q when providing prometheus TLS config", constants.AnnotationPrometheusKeyFile), - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - h := MeshWebhook{ - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - } - container, err := h.consulDataplaneSidecar(testNS, c.pod, multiPortInfo{}) - if c.expErr != "" { - require.NotNil(t, err) - require.Contains(t, err.Error(), c.expErr) - } else { - require.NoError(t, err) - require.Contains(t, strings.Join(container.Args, " "), c.expCmdArgs) - } - }) - } -} diff --git a/control-plane/connect-inject/webhook/container_init.go b/control-plane/connect-inject/webhook/container_init.go deleted file mode 100644 index 328882bc04..0000000000 --- a/control-plane/connect-inject/webhook/container_init.go +++ /dev/null @@ -1,308 +0,0 @@ -package webhook - -import ( - "bytes" - "fmt" - "strconv" - "strings" - "text/template" - - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/common" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" -) - -const ( - injectInitContainerName = "consul-connect-inject-init" - rootUserAndGroupID = 0 - sidecarUserAndGroupID = 5995 - initContainersUserAndGroupID = 5996 - netAdminCapability = "NET_ADMIN" -) - -type initContainerCommandData struct { - ServiceName string - ServiceAccountName string - AuthMethod string - - // MultiPort determines whether this is a multi port Pod, which configures the init container to be specific to one - // of the services on the multi port Pod. - MultiPort bool - - // Log settings for the connect-init command. - LogLevel string - LogJSON bool -} - -// containerInit returns the init container spec for connect-init that polls for the service and the connect proxy service to be registered -// so that it can save the proxy service id to the shared volume and boostrap Envoy with the proxy-id. -func (w *MeshWebhook) containerInit(namespace corev1.Namespace, pod corev1.Pod, mpi multiPortInfo) (corev1.Container, error) { - // Check if tproxy is enabled on this pod. - tproxyEnabled, err := common.TransparentProxyEnabled(namespace, pod, w.EnableTransparentProxy) - if err != nil { - return corev1.Container{}, err - } - - multiPort := mpi.serviceName != "" - - data := initContainerCommandData{ - AuthMethod: w.AuthMethod, - MultiPort: multiPort, - LogLevel: w.LogLevel, - LogJSON: w.LogJSON, - } - - // Create expected volume mounts - volMounts := []corev1.VolumeMount{ - { - Name: volumeName, - MountPath: "/consul/connect-inject", - }, - } - - if multiPort { - data.ServiceName = mpi.serviceName - } else { - data.ServiceName = pod.Annotations[constants.AnnotationService] - } - var bearerTokenFile string - if w.AuthMethod != "" { - if multiPort { - // If multi port then we require that the service account name - // matches the service name. - data.ServiceAccountName = mpi.serviceName - } else { - data.ServiceAccountName = pod.Spec.ServiceAccountName - } - // Extract the service account token's volume mount - var saTokenVolumeMount corev1.VolumeMount - saTokenVolumeMount, bearerTokenFile, err = findServiceAccountVolumeMount(pod, mpi.serviceName) - if err != nil { - return corev1.Container{}, err - } - - // Append to volume mounts - volMounts = append(volMounts, saTokenVolumeMount) - } - - // Render the command - var buf bytes.Buffer - tpl := template.Must(template.New("root").Parse(strings.TrimSpace( - initContainerCommandTpl))) - err = tpl.Execute(&buf, &data) - if err != nil { - return corev1.Container{}, err - } - - initContainerName := injectInitContainerName - if multiPort { - initContainerName = fmt.Sprintf("%s-%s", injectInitContainerName, mpi.serviceName) - } - container := corev1.Container{ - Name: initContainerName, - Image: w.ImageConsulK8S, - Env: []corev1.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, - }, - }, - { - Name: "POD_NAMESPACE", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, - }, - }, - { - Name: "NODE_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "spec.nodeName", - }, - }, - }, - { - Name: "CONSUL_ADDRESSES", - Value: w.ConsulAddress, - }, - { - Name: "CONSUL_GRPC_PORT", - Value: strconv.Itoa(w.ConsulConfig.GRPCPort), - }, - { - Name: "CONSUL_HTTP_PORT", - Value: strconv.Itoa(w.ConsulConfig.HTTPPort), - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: w.ConsulConfig.APITimeout.String(), - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - }, - Resources: w.InitContainerResources, - VolumeMounts: volMounts, - Command: []string{"/bin/sh", "-ec", buf.String()}, - } - - if w.TLSEnabled { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_USE_TLS", - Value: "true", - }, - corev1.EnvVar{ - Name: "CONSUL_CACERT_PEM", - Value: w.ConsulCACert, - }, - corev1.EnvVar{ - Name: "CONSUL_TLS_SERVER_NAME", - Value: w.ConsulTLSServerName, - }) - } - - if w.AuthMethod != "" { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_AUTH_METHOD", - Value: w.AuthMethod, - }, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: bearerTokenFile, - }, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_META", - Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", - }) - - if w.EnableNamespaces { - if w.EnableK8SNSMirroring { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_NAMESPACE", - Value: "default", - }) - } else { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_NAMESPACE", - Value: w.consulNamespace(namespace.Name), - }) - } - } - - if w.ConsulPartition != "" { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_LOGIN_PARTITION", - Value: w.ConsulPartition, - }) - } - } - if w.EnableNamespaces { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_NAMESPACE", - Value: w.consulNamespace(namespace.Name), - }) - } - - if w.ConsulPartition != "" { - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_PARTITION", - Value: w.ConsulPartition, - }) - } - - if tproxyEnabled { - if !w.EnableCNI { - // Set redirect traffic config for the container so that we can apply iptables rules. - redirectTrafficConfig, err := w.iptablesConfigJSON(pod, namespace) - if err != nil { - return corev1.Container{}, err - } - container.Env = append(container.Env, - corev1.EnvVar{ - Name: "CONSUL_REDIRECT_TRAFFIC_CONFIG", - Value: redirectTrafficConfig, - }) - - // Running consul connect redirect-traffic with iptables - // requires both being a root user and having NET_ADMIN capability. - container.SecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(rootUserAndGroupID), - RunAsGroup: pointer.Int64(rootUserAndGroupID), - // RunAsNonRoot overrides any setting in the Pod so that we can still run as root here as required. - RunAsNonRoot: pointer.Bool(false), - Privileged: pointer.Bool(true), - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{netAdminCapability}, - }, - } - } else { - container.SecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(initContainersUserAndGroupID), - RunAsGroup: pointer.Int64(initContainersUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - Privileged: pointer.Bool(false), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - } - } - } - - return container, nil -} - -// consulDNSEnabled returns true if Consul DNS should be enabled for this pod. -// It returns an error when the annotation value cannot be parsed by strconv.ParseBool or if we are unable -// to read the pod's namespace label when it exists. -func consulDNSEnabled(namespace corev1.Namespace, pod corev1.Pod, globalEnabled bool) (bool, error) { - // First check to see if the pod annotation exists to override the namespace or global settings. - if raw, ok := pod.Annotations[constants.KeyConsulDNS]; ok { - return strconv.ParseBool(raw) - } - // Next see if the namespace has been defaulted. - if raw, ok := namespace.Labels[constants.KeyConsulDNS]; ok { - return strconv.ParseBool(raw) - } - // Else fall back to the global default. - return globalEnabled, nil -} - -// splitCommaSeparatedItemsFromAnnotation takes an annotation and a pod -// and returns the comma-separated value of the annotation as a list of strings. -func splitCommaSeparatedItemsFromAnnotation(annotation string, pod corev1.Pod) []string { - var items []string - if raw, ok := pod.Annotations[annotation]; ok { - items = append(items, strings.Split(raw, ",")...) - } - - return items -} - -// initContainerCommandTpl is the template for the command executed by -// the init container. -const initContainerCommandTpl = ` -consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level={{ .LogLevel }} \ - -log-json={{ .LogJSON }} \ - {{- if .AuthMethod }} - -service-account-name="{{ .ServiceAccountName }}" \ - -service-name="{{ .ServiceName }}" \ - {{- end }} - {{- if .MultiPort }} - -multiport=true \ - -proxy-id-file=/consul/connect-inject/proxyid-{{ .ServiceName }} \ - {{- if not .AuthMethod }} - -service-name="{{ .ServiceName }}" \ - {{- end }} - {{- end }} -` diff --git a/control-plane/connect-inject/webhook/container_init_test.go b/control-plane/connect-inject/webhook/container_init_test.go deleted file mode 100644 index 8e0b551b24..0000000000 --- a/control-plane/connect-inject/webhook/container_init_test.go +++ /dev/null @@ -1,962 +0,0 @@ -package webhook - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/namespaces" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" -) - -const k8sNamespace = "k8snamespace" - -func TestHandlerContainerInit(t *testing.T) { - minimal := func() *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "test-namespace", - Annotations: map[string]string{ - constants.AnnotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - }, - }, - Status: corev1.PodStatus{ - HostIP: "1.1.1.1", - PodIP: "2.2.2.2", - }, - } - } - - cases := []struct { - Name string - Pod func(*corev1.Pod) *corev1.Pod - Webhook MeshWebhook - ExpCmd string // Strings.Contains test - ExpEnv []corev1.EnvVar - }{ - { - "default cmd and env", - func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationService] = "web" - return pod - }, - MeshWebhook{ - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - LogLevel: "info", - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "0s", - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - }, - }, - - { - "with auth method", - func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationService] = "web" - pod.Spec.ServiceAccountName = "a-service-account-name" - pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{ - { - Name: "sa", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - } - return pod - }, - MeshWebhook{ - AuthMethod: "an-auth-method", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - LogLevel: "debug", - LogJSON: true, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=debug \ - -log-json=true \ - -service-account-name="a-service-account-name" \ - -service-name="web" \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - { - Name: "CONSUL_LOGIN_AUTH_METHOD", - Value: "an-auth-method", - }, - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - { - Name: "CONSUL_LOGIN_META", - Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", - }, - }, - }, - } - - for _, tt := range cases { - t.Run(tt.Name, func(t *testing.T) { - w := tt.Webhook - pod := *tt.Pod(minimal()) - container, err := w.containerInit(testNS, pod, multiPortInfo{}) - require.NoError(t, err) - actual := strings.Join(container.Command, " ") - require.Contains(t, actual, tt.ExpCmd) - require.EqualValues(t, container.Env[3:], tt.ExpEnv) - }) - } -} - -func TestHandlerContainerInit_transparentProxy(t *testing.T) { - cases := map[string]struct { - globalEnabled bool - cniEnabled bool - annotations map[string]string - expTproxyEnabled bool - namespaceLabel map[string]string - }{ - "enabled globally, ns not set, annotation not provided, cni disabled": { - true, - false, - nil, - true, - nil, - }, - "enabled globally, ns not set, annotation is false, cni disabled": { - true, - false, - map[string]string{constants.KeyTransparentProxy: "false"}, - false, - nil, - }, - "enabled globally, ns not set, annotation is true, cni disabled": { - true, - false, - map[string]string{constants.KeyTransparentProxy: "true"}, - true, - nil, - }, - "disabled globally, ns not set, annotation not provided, cni disabled": { - false, - false, - nil, - false, - nil, - }, - "disabled globally, ns not set, annotation is false, cni disabled": { - false, - false, - map[string]string{constants.KeyTransparentProxy: "false"}, - false, - nil, - }, - "disabled globally, ns not set, annotation is true, cni disabled": { - false, - false, - map[string]string{constants.KeyTransparentProxy: "true"}, - true, - nil, - }, - "disabled globally, ns enabled, annotation not set, cni disabled": { - false, - false, - nil, - true, - map[string]string{constants.KeyTransparentProxy: "true"}, - }, - "enabled globally, ns disabled, annotation not set, cni disabled": { - true, - false, - nil, - false, - map[string]string{constants.KeyTransparentProxy: "false"}, - }, - "disabled globally, ns enabled, annotation not set, cni enabled": { - false, - true, - nil, - false, - map[string]string{constants.KeyTransparentProxy: "true"}, - }, - - "enabled globally, ns not set, annotation not set, cni enabled": { - true, - true, - nil, - false, - nil, - }, - } - for name, c := range cases { - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - EnableTransparentProxy: c.globalEnabled, - EnableCNI: c.cniEnabled, - ConsulConfig: &consul.Config{HTTPPort: 8500}, - } - pod := minimal() - pod.Annotations = c.annotations - - var expectedSecurityContext *corev1.SecurityContext - if c.cniEnabled { - expectedSecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(initContainersUserAndGroupID), - RunAsGroup: pointer.Int64(initContainersUserAndGroupID), - RunAsNonRoot: pointer.Bool(true), - Privileged: pointer.Bool(false), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - } - } else if c.expTproxyEnabled { - expectedSecurityContext = &corev1.SecurityContext{ - RunAsUser: pointer.Int64(0), - RunAsGroup: pointer.Int64(0), - RunAsNonRoot: pointer.Bool(false), - Privileged: pointer.Bool(true), - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{netAdminCapability}, - }, - } - } - ns := testNS - ns.Labels = c.namespaceLabel - container, err := w.containerInit(ns, *pod, multiPortInfo{}) - require.NoError(t, err) - - redirectTrafficEnvVarFound := false - for _, ev := range container.Env { - if ev.Name == "CONSUL_REDIRECT_TRAFFIC_CONFIG" { - redirectTrafficEnvVarFound = true - break - } - } - - require.Equal(t, c.expTproxyEnabled, redirectTrafficEnvVarFound) - require.Equal(t, expectedSecurityContext, container.SecurityContext) - }) - } -} - -func TestHandlerContainerInit_namespacesAndPartitionsEnabled(t *testing.T) { - minimal := func() *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - { - Name: "auth-method-secret", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }, - }, - }, - ServiceAccountName: "web", - }, - } - } - - cases := []struct { - Name string - Pod func(*corev1.Pod) *corev1.Pod - Webhook MeshWebhook - Cmd string - ExpEnv []corev1.EnvVar - }{ - { - "default namespace, no partition", - func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationService] = "web" - return pod - }, - MeshWebhook{ - EnableNamespaces: true, - ConsulDestinationNamespace: "default", - ConsulPartition: "", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "default", - }, - }, - }, - { - "default namespace, default partition", - func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationService] = "web" - return pod - }, - MeshWebhook{ - EnableNamespaces: true, - ConsulDestinationNamespace: "default", - ConsulPartition: "default", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "default", - }, - { - Name: "CONSUL_PARTITION", - Value: "default", - }, - }, - }, - { - "non-default namespace, no partition", - func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationService] = "web" - return pod - }, - MeshWebhook{ - EnableNamespaces: true, - ConsulDestinationNamespace: "non-default", - ConsulPartition: "", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "non-default", - }, - }, - }, - { - "non-default namespace, non-default partition", - func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationService] = "web" - return pod - }, - MeshWebhook{ - EnableNamespaces: true, - ConsulDestinationNamespace: "non-default", - ConsulPartition: "non-default-part", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "non-default", - }, - { - Name: "CONSUL_PARTITION", - Value: "non-default-part", - }, - }, - }, - { - "auth method, non-default namespace, mirroring disabled, default partition", - func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationService] = "" - return pod - }, - MeshWebhook{ - AuthMethod: "auth-method", - EnableNamespaces: true, - ConsulDestinationNamespace: "non-default", - ConsulPartition: "default", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \ - -service-account-name="web" \ - -service-name="" \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - { - Name: "CONSUL_LOGIN_AUTH_METHOD", - Value: "auth-method", - }, - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - { - Name: "CONSUL_LOGIN_META", - Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", - }, - { - Name: "CONSUL_LOGIN_NAMESPACE", - Value: "non-default", - }, - { - Name: "CONSUL_LOGIN_PARTITION", - Value: "default", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "non-default", - }, - { - Name: "CONSUL_PARTITION", - Value: "default", - }, - }, - }, - { - "auth method, non-default namespace, mirroring enabled, non-default partition", - func(pod *corev1.Pod) *corev1.Pod { - pod.Annotations[constants.AnnotationService] = "" - return pod - }, - MeshWebhook{ - AuthMethod: "auth-method", - EnableNamespaces: true, - ConsulDestinationNamespace: "non-default", // Overridden by mirroring - EnableK8SNSMirroring: true, - ConsulPartition: "non-default", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - }, - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \ - -service-account-name="web" \ - -service-name="" \`, - []corev1.EnvVar{ - { - Name: "CONSUL_ADDRESSES", - Value: "10.0.0.0", - }, - { - Name: "CONSUL_GRPC_PORT", - Value: "8502", - }, - { - Name: "CONSUL_HTTP_PORT", - Value: "8500", - }, - { - Name: "CONSUL_API_TIMEOUT", - Value: "5s", - }, - { - Name: "CONSUL_NODE_NAME", - Value: "$(NODE_NAME)-virtual", - }, - { - Name: "CONSUL_LOGIN_AUTH_METHOD", - Value: "auth-method", - }, - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - { - Name: "CONSUL_LOGIN_META", - Value: "pod=$(POD_NAMESPACE)/$(POD_NAME)", - }, - { - Name: "CONSUL_LOGIN_NAMESPACE", - Value: "default", - }, - { - Name: "CONSUL_LOGIN_PARTITION", - Value: "non-default", - }, - { - Name: "CONSUL_NAMESPACE", - Value: "k8snamespace", - }, - { - Name: "CONSUL_PARTITION", - Value: "non-default", - }, - }, - }, - } - - for _, tt := range cases { - t.Run(tt.Name, func(t *testing.T) { - h := tt.Webhook - h.LogLevel = "info" - container, err := h.containerInit(testNS, *tt.Pod(minimal()), multiPortInfo{}) - require.NoError(t, err) - actual := strings.Join(container.Command, " ") - require.Equal(t, tt.Cmd, actual) - if tt.ExpEnv != nil { - require.Equal(t, tt.ExpEnv, container.Env[3:]) - } - }) - } -} - -func TestHandlerContainerInit_Multiport(t *testing.T) { - minimal := func() *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "web,web-admin", - }, - }, - - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "web-admin-service-account", - }, - }, - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - { - Name: "web-admin", - }, - { - Name: "web-admin-side", - }, - { - Name: "auth-method-secret", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "service-account-secret", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - }, - }, - }, - }, - ServiceAccountName: "web", - }, - } - } - - cases := []struct { - Name string - Pod func(*corev1.Pod) *corev1.Pod - Webhook MeshWebhook - NumInitContainers int - MultiPortInfos []multiPortInfo - Cmd []string // Strings.Contains test - ExpEnvVars []corev1.EnvVar - }{ - { - "Whole template, multiport", - func(pod *corev1.Pod) *corev1.Pod { - return pod - }, - MeshWebhook{ - LogLevel: "info", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502}, - }, - 2, - []multiPortInfo{ - { - serviceIndex: 0, - serviceName: "web", - }, - { - serviceIndex: 1, - serviceName: "web-admin", - }, - }, - []string{`/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \ - -multiport=true \ - -proxy-id-file=/consul/connect-inject/proxyid-web \ - -service-name="web" \`, - - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \ - -multiport=true \ - -proxy-id-file=/consul/connect-inject/proxyid-web-admin \ - -service-name="web-admin" \`, - }, - nil, - }, - { - "Whole template, multiport, auth method", - func(pod *corev1.Pod) *corev1.Pod { - return pod - }, - MeshWebhook{ - AuthMethod: "auth-method", - ConsulAddress: "10.0.0.0", - ConsulConfig: &consul.Config{HTTPPort: 8500, GRPCPort: 8502, APITimeout: 5 * time.Second}, - LogLevel: "info", - }, - 2, - []multiPortInfo{ - { - serviceIndex: 0, - serviceName: "web", - }, - { - serviceIndex: 1, - serviceName: "web-admin", - }, - }, - []string{`/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \ - -service-account-name="web" \ - -service-name="web" \ - -multiport=true \ - -proxy-id-file=/consul/connect-inject/proxyid-web \`, - - `/bin/sh -ec consul-k8s-control-plane connect-init -pod-name=${POD_NAME} -pod-namespace=${POD_NAMESPACE} \ - -log-level=info \ - -log-json=false \ - -service-account-name="web-admin" \ - -service-name="web-admin" \ - -multiport=true \ - -proxy-id-file=/consul/connect-inject/proxyid-web-admin \`, - }, - []corev1.EnvVar{ - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - { - Name: "CONSUL_LOGIN_BEARER_TOKEN_FILE", - Value: "/consul/serviceaccount-web-admin/token", - }, - }, - }, - } - - for _, tt := range cases { - t.Run(tt.Name, func(t *testing.T) { - h := tt.Webhook - for i := 0; i < tt.NumInitContainers; i++ { - container, err := h.containerInit(testNS, *tt.Pod(minimal()), tt.MultiPortInfos[i]) - require.NoError(t, err) - actual := strings.Join(container.Command, " ") - require.Equal(t, tt.Cmd[i], actual) - if tt.ExpEnvVars != nil { - require.Contains(t, container.Env, tt.ExpEnvVars[i]) - } - } - }) - } -} - -// If TLSEnabled is set, -// Consul addresses should use HTTPS -// and CA cert should be set as env variable if provided. -// Additionally, test that the init container is correctly configured -// when http or gRPC ports are different from defaults. -func TestHandlerContainerInit_WithTLSAndCustomPorts(t *testing.T) { - for _, caProvided := range []bool{true, false} { - name := fmt.Sprintf("ca provided: %t", caProvided) - t.Run(name, func(t *testing.T) { - w := MeshWebhook{ - ConsulAddress: "10.0.0.0", - TLSEnabled: true, - ConsulConfig: &consul.Config{HTTPPort: 443, GRPCPort: 8503}, - } - if caProvided { - w.ConsulCACert = "consul-ca-cert" - } - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := w.containerInit(testNS, *pod, multiPortInfo{}) - require.NoError(t, err) - require.Equal(t, "CONSUL_ADDRESSES", container.Env[3].Name) - require.Equal(t, w.ConsulAddress, container.Env[3].Value) - require.Equal(t, "CONSUL_GRPC_PORT", container.Env[4].Name) - require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.GRPCPort), container.Env[4].Value) - require.Equal(t, "CONSUL_HTTP_PORT", container.Env[5].Name) - require.Equal(t, fmt.Sprintf("%d", w.ConsulConfig.HTTPPort), container.Env[5].Value) - if w.TLSEnabled { - require.Equal(t, "CONSUL_USE_TLS", container.Env[8].Name) - require.Equal(t, "true", container.Env[8].Value) - if caProvided { - require.Equal(t, "CONSUL_CACERT_PEM", container.Env[9].Name) - require.Equal(t, "consul-ca-cert", container.Env[9].Value) - } else { - for _, ev := range container.Env { - if ev.Name == "CONSUL_CACERT_PEM" { - require.Empty(t, ev.Value) - } - } - } - } - - }) - } -} - -func TestHandlerContainerInit_Resources(t *testing.T) { - w := MeshWebhook{ - InitContainerResources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("10Mi"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("20m"), - corev1.ResourceMemory: resource.MustParse("25Mi"), - }, - }, - ConsulConfig: &consul.Config{HTTPPort: 8500, APITimeout: 5 * time.Second}, - } - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - constants.AnnotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - }, - }, - } - container, err := w.containerInit(testNS, *pod, multiPortInfo{}) - require.NoError(t, err) - require.Equal(t, corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("20m"), - corev1.ResourceMemory: resource.MustParse("25Mi"), - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, container.Resources) -} - -var testNS = corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: k8sNamespace, - }, -} - -func minimal() *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespaces.DefaultNamespace, - Name: "minimal", - Annotations: map[string]string{ - constants.AnnotationService: "foo", - }, - }, - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "web", - }, - { - Name: "web-side", - }, - }, - }, - } -} diff --git a/control-plane/connect-inject/webhook/dns.go b/control-plane/connect-inject/webhook/dns.go deleted file mode 100644 index ed4e95703b..0000000000 --- a/control-plane/connect-inject/webhook/dns.go +++ /dev/null @@ -1,90 +0,0 @@ -package webhook - -import ( - "fmt" - "strconv" - - "github.com/miekg/dns" - corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" -) - -const ( - // These defaults are taken from the /etc/resolv.conf man page - // and are used by the dns library. - defaultDNSOptionNdots = 1 - defaultDNSOptionTimeout = 5 - defaultDNSOptionAttempts = 2 - - // defaultEtcResolvConfFile is the default location of the /etc/resolv.conf file. - defaultEtcResolvConfFile = "/etc/resolv.conf" -) - -func (w *MeshWebhook) configureDNS(pod *corev1.Pod, k8sNS string) error { - // First, we need to determine the nameservers configured in this cluster from /etc/resolv.conf. - etcResolvConf := defaultEtcResolvConfFile - if w.etcResolvFile != "" { - etcResolvConf = w.etcResolvFile - } - cfg, err := dns.ClientConfigFromFile(etcResolvConf) - if err != nil { - return err - } - - // Set DNS policy on the pod to None because we want DNS to work according to the config we will provide. - pod.Spec.DNSPolicy = corev1.DNSNone - - // Set the consul-dataplane's DNS server as the first server in the list (i.e. localhost). - // We want to do that so that when consul cannot resolve the record, we will fall back to the nameservers - // configured in our /etc/resolv.conf. It's important to add Consul DNS as the first nameserver because - // if we put kube DNS first, it will return NXDOMAIN response and a DNS client will not fall back to other nameservers. - if pod.Spec.DNSConfig == nil { - nameservers := []string{consulDataplaneDNSBindHost} - nameservers = append(nameservers, cfg.Servers...) - var options []corev1.PodDNSConfigOption - if cfg.Ndots != defaultDNSOptionNdots { - ndots := strconv.Itoa(cfg.Ndots) - options = append(options, corev1.PodDNSConfigOption{ - Name: "ndots", - Value: &ndots, - }) - } - if cfg.Timeout != defaultDNSOptionTimeout { - options = append(options, corev1.PodDNSConfigOption{ - Name: "timeout", - Value: pointer.String(strconv.Itoa(cfg.Timeout)), - }) - } - if cfg.Attempts != defaultDNSOptionAttempts { - options = append(options, corev1.PodDNSConfigOption{ - Name: "attempts", - Value: pointer.String(strconv.Itoa(cfg.Attempts)), - }) - } - - // Replace release namespace in the searches with the pod namespace. - // This is so that the searches we generate will be for the pod's namespace - // instead of the namespace of the connect-injector. E.g. instead of - // consul.svc.cluster.local it should be .svc.cluster.local. - var searches []string - // Kubernetes will add a search domain for .svc.cluster.local so we can always - // expect it to be there. See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#namespaces-of-services. - consulReleaseNSSearchDomain := fmt.Sprintf("%s.svc.cluster.local", w.ReleaseNamespace) - for _, search := range cfg.Search { - if search == consulReleaseNSSearchDomain { - searches = append(searches, fmt.Sprintf("%s.svc.cluster.local", k8sNS)) - } else { - searches = append(searches, search) - } - } - - pod.Spec.DNSConfig = &corev1.PodDNSConfig{ - Nameservers: nameservers, - Searches: searches, - Options: options, - } - } else { - return fmt.Errorf("DNS redirection to Consul is not supported with an already defined DNSConfig on the pod") - } - return nil -} diff --git a/control-plane/connect-inject/webhook/dns_test.go b/control-plane/connect-inject/webhook/dns_test.go deleted file mode 100644 index d6392c5317..0000000000 --- a/control-plane/connect-inject/webhook/dns_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package webhook - -import ( - "os" - "testing" - - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" -) - -func TestMeshWebhook_configureDNS(t *testing.T) { - cases := map[string]struct { - etcResolv string - expDNSConfig *corev1.PodDNSConfig - }{ - "empty /etc/resolv.conf file": { - expDNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"127.0.0.1"}, - }, - }, - "one nameserver": { - etcResolv: `nameserver 1.1.1.1`, - expDNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"127.0.0.1", "1.1.1.1"}, - }, - }, - "mutiple nameservers, searches, and options": { - etcResolv: ` -nameserver 1.1.1.1 -nameserver 2.2.2.2 -search foo.bar bar.baz -options ndots:5 timeout:6 attempts:3`, - expDNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"127.0.0.1", "1.1.1.1", "2.2.2.2"}, - Searches: []string{"foo.bar", "bar.baz"}, - Options: []corev1.PodDNSConfigOption{ - { - Name: "ndots", - Value: pointer.String("5"), - }, - { - Name: "timeout", - Value: pointer.String("6"), - }, - { - Name: "attempts", - Value: pointer.String("3"), - }, - }, - }, - }, - "replaces release specific search domains": { - etcResolv: ` -nameserver 1.1.1.1 -nameserver 2.2.2.2 -search consul.svc.cluster.local svc.cluster.local cluster.local -options ndots:5`, - expDNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"127.0.0.1", "1.1.1.1", "2.2.2.2"}, - Searches: []string{"default.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{ - { - Name: "ndots", - Value: pointer.String("5"), - }, - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - etcResolvFile, err := os.CreateTemp("", "") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(etcResolvFile.Name()) - }) - _, err = etcResolvFile.WriteString(c.etcResolv) - require.NoError(t, err) - w := MeshWebhook{ - etcResolvFile: etcResolvFile.Name(), - ReleaseNamespace: "consul", - } - - pod := minimal() - err = w.configureDNS(pod, "default") - require.NoError(t, err) - require.Equal(t, corev1.DNSNone, pod.Spec.DNSPolicy) - require.Equal(t, c.expDNSConfig, pod.Spec.DNSConfig) - }) - } -} - -func TestMeshWebhook_configureDNS_error(t *testing.T) { - w := MeshWebhook{} - - pod := minimal() - pod.Spec.DNSConfig = &corev1.PodDNSConfig{Nameservers: []string{"1.1.1.1"}} - err := w.configureDNS(pod, "default") - require.EqualError(t, err, "DNS redirection to Consul is not supported with an already defined DNSConfig on the pod") -} diff --git a/control-plane/consul/consul.go b/control-plane/consul/consul.go index bb46308ff8..ad4feec785 100644 --- a/control-plane/consul/consul.go +++ b/control-plane/consul/consul.go @@ -6,17 +6,9 @@ import ( "time" "github.com/hashicorp/consul-k8s/control-plane/version" - "github.com/hashicorp/consul-server-connection-manager/discovery" capi "github.com/hashicorp/consul/api" ) -//go:generate mockery --name ServerConnectionManager --inpkg -type ServerConnectionManager interface { - State() (discovery.State, error) - Run() - Stop() -} - // NewClient returns a Consul API client. It adds a required User-Agent // header that describes the version of consul-k8s making the call. func NewClient(config *capi.Config, consulAPITimeout time.Duration) (*capi.Client, error) { @@ -57,36 +49,3 @@ func NewClient(config *capi.Config, consulAPITimeout time.Duration) (*capi.Clien client.AddHeader("User-Agent", fmt.Sprintf("consul-k8s/%s", version.GetHumanVersion())) return client, nil } - -type Config struct { - APIClientConfig *capi.Config - HTTPPort int - GRPCPort int - APITimeout time.Duration -} - -// todo (ishustava): replace all usages of this one. -// NewClientFromConnMgrState creates a new API client with an IP address from the state -// of the consul-server-connection-manager. -func NewClientFromConnMgrState(config *Config, state discovery.State) (*capi.Client, error) { - ipAddress := state.Address.IP - config.APIClientConfig.Address = fmt.Sprintf("%s:%d", ipAddress.String(), config.HTTPPort) - if state.Token != "" { - config.APIClientConfig.Token = state.Token - } - return NewClient(config.APIClientConfig, config.APITimeout) -} - -// NewClientFromConnMgr creates a new API client by first getting the state of the passed watcher. -func NewClientFromConnMgr(config *Config, watcher ServerConnectionManager) (*capi.Client, error) { - // Create a new consul client. - serverState, err := watcher.State() - if err != nil { - return nil, err - } - consulClient, err := NewClientFromConnMgrState(config, serverState) - if err != nil { - return nil, err - } - return consulClient, nil -} diff --git a/control-plane/consul/mock_ServerConnectionManager.go b/control-plane/consul/mock_ServerConnectionManager.go deleted file mode 100644 index d0189c5380..0000000000 --- a/control-plane/consul/mock_ServerConnectionManager.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by mockery v2.14.0. DO NOT EDIT. - -package consul - -import ( - discovery "github.com/hashicorp/consul-server-connection-manager/discovery" - mock "github.com/stretchr/testify/mock" -) - -// MockServerConnectionManager is an autogenerated mock type for the ServerConnectionManager type -type MockServerConnectionManager struct { - mock.Mock -} - -// Run provides a mock function with given fields: -func (_m *MockServerConnectionManager) Run() { - _m.Called() -} - -// State provides a mock function with given fields: -func (_m *MockServerConnectionManager) State() (discovery.State, error) { - ret := _m.Called() - - var r0 discovery.State - if rf, ok := ret.Get(0).(func() discovery.State); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(discovery.State) - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Stop provides a mock function with given fields: -func (_m *MockServerConnectionManager) Stop() { - _m.Called() -} - -type mockConstructorTestingTNewMockServerConnectionManager interface { - mock.TestingT - Cleanup(func()) -} - -// NewMockServerConnectionManager creates a new instance of MockServerConnectionManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMockServerConnectionManager(t mockConstructorTestingTNewMockServerConnectionManager) *MockServerConnectionManager { - mock := &MockServerConnectionManager{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/control-plane/controller/configentry_controller.go b/control-plane/controller/configentry_controller.go index 8ae90a56a6..94206c8f4d 100644 --- a/control-plane/controller/configentry_controller.go +++ b/control-plane/controller/configentry_controller.go @@ -9,7 +9,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" capi "github.com/hashicorp/consul/api" "golang.org/x/time/rate" @@ -51,11 +50,7 @@ type Controller interface { // all config entry types, e.g. ServiceDefaults, ServiceResolver, etc, since // they share the same reconcile behaviour. type ConfigEntryController struct { - // ConsulClientConfig is the config for the Consul API client. - ConsulClientConfig *consul.Config - - // ConsulServerConnMgr is the watcher for the Consul server addresses. - ConsulServerConnMgr consul.ServerConnectionManager + ConsulClient *capi.Client // DatacenterName indicates the Consul Datacenter name the controller is // operating in. Adds this value as metadata on managed resources. @@ -102,18 +97,6 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont return ctrl.Result{}, err } - // Create Consul client for this reconcile. - serverState, err := r.ConsulServerConnMgr.State() - if err != nil { - logger.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - consulClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) - if err != nil { - logger.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) - return ctrl.Result{}, err - } - consulEntry := configEntry.ToConsul(r.DatacenterName) if configEntry.GetDeletionTimestamp().IsZero() { @@ -131,7 +114,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont if containsString(configEntry.GetFinalizers(), FinalizerName) { logger.Info("deletion event") // Check to see if consul has config entry with the same name - entry, _, err := consulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ + entry, _, err := r.ConsulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) @@ -142,7 +125,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } else if err == nil { // Only delete the resource from Consul if it is owned by our datacenter. if entry.GetMeta()[common.DatacenterKey] == r.DatacenterName { - _, err := consulClient.ConfigEntries().Delete(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.WriteOptions{ + _, err := r.ConsulClient.ConfigEntries().Delete(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -167,7 +150,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } // Check to see if consul has config entry with the same name - entry, _, err := consulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ + entry, _, err := r.ConsulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) // If a config entry with this name does not exist @@ -178,7 +161,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont // destination consul namespace first. if r.EnableConsulNamespaces { consulNS := r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()) - created, err := namespaces.EnsureExists(consulClient, consulNS, r.CrossNSACLPolicy) + created, err := namespaces.EnsureExists(r.ConsulClient, consulNS, r.CrossNSACLPolicy) if err != nil { return r.syncFailed(ctx, logger, crdCtrl, configEntry, ConsulAgentError, fmt.Errorf("creating consul namespace %q: %w", consulNS, err)) @@ -189,7 +172,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } // Create the config entry - _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -237,7 +220,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } logger.Info("config entry does not match consul", "modify-index", entry.GetModifyIndex()) - _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -251,7 +234,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont // matches the entry in Kubernetes. We just need to update the metadata // of the entry in Consul to say that it's now managed by Kubernetes. logger.Info("migrating config entry to be managed by Kubernetes") - _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { diff --git a/control-plane/controller/configentry_controller_ent_test.go b/control-plane/controller/configentry_controller_ent_test.go index 61a6aef947..7b40947df5 100644 --- a/control-plane/controller/configentry_controller_ent_test.go +++ b/control-plane/controller/configentry_controller_ent_test.go @@ -13,8 +13,8 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "github.com/hashicorp/consul-k8s/control-plane/controller" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" capi "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -193,9 +193,14 @@ func TestConfigEntryController_createsConfigEntry_consulNamespaces(tt *testing.T s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) ctx := context.Background() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -204,8 +209,7 @@ func TestConfigEntryController_createsConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -455,9 +459,14 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) ctx := context.Background() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -466,8 +475,7 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -491,14 +499,14 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T // Now update it. { // First get it so we have the latest revision number. - err := fakeClient.Get(ctx, types.NamespacedName{ + err = fakeClient.Get(ctx, types.NamespacedName{ Namespace: c.SourceKubeNS, Name: in.KubeResource.KubernetesName(), }, in.KubeResource) req.NoError(err) // Update the resource. - err = in.UpdateResourceFunc(fakeClient, ctx, in.KubeResource) + err := in.UpdateResourceFunc(fakeClient, ctx, in.KubeResource) req.NoError(err) resp, err := r.Reconcile(ctx, ctrl.Request{ @@ -704,9 +712,14 @@ func TestConfigEntryController_deletesConfigEntry_consulNamespaces(tt *testing.T s := runtime.NewScheme() s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -715,8 +728,7 @@ func TestConfigEntryController_deletesConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, diff --git a/control-plane/controller/configentry_controller_test.go b/control-plane/controller/configentry_controller_test.go index 83b9e3eecf..1df9364d5f 100644 --- a/control-plane/controller/configentry_controller_test.go +++ b/control-plane/controller/configentry_controller_test.go @@ -12,9 +12,8 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" capi "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,7 +39,7 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { consulKind string consulPrereqs []capi.ConfigEntry configEntryResource common.ConfigEntryResource - reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler compare func(t *testing.T, consul capi.ConfigEntry) }{ { @@ -58,14 +57,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { LocalRequestTimeoutMs: 15000, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -92,14 +90,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -123,14 +120,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -154,14 +150,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -198,14 +193,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -238,14 +232,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -317,14 +310,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -366,14 +358,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -406,14 +397,13 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -439,17 +429,22 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResource) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResource).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) for _, configEntry := range c.consulPrereqs { written, _, err := consulClient.ConfigEntries().Set(configEntry, nil) req.NoError(err) req.True(written) } - r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) namespacedName := types.NamespacedName{ Namespace: kubeNS, Name: c.configEntryResource.KubernetesName(), @@ -485,7 +480,7 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { consulKind string consulPrereqs []capi.ConfigEntry configEntryResource common.ConfigEntryResource - reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler updateF func(common.ConfigEntryResource) compare func(t *testing.T, consul capi.ConfigEntry) }{ @@ -501,14 +496,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { Protocol: "http", }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -536,14 +530,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -571,14 +564,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -606,14 +598,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -655,14 +646,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -713,14 +703,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -787,14 +776,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -835,14 +823,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -879,14 +866,13 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -916,9 +902,15 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResource) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResource).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) // Create any prereqs. for _, configEntry := range c.consulPrereqs { @@ -942,14 +934,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { Name: c.configEntryResource.KubernetesName(), } // First get it so we have the latest revision number. - err := fakeClient.Get(ctx, namespacedName, c.configEntryResource) + err = fakeClient.Get(ctx, namespacedName, c.configEntryResource) req.NoError(err) // Update the entry in Kube and run reconcile. c.updateF(c.configEntryResource) - err = fakeClient.Update(ctx, c.configEntryResource) + err := fakeClient.Update(ctx, c.configEntryResource) req.NoError(err) - r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) resp, err := r.Reconcile(ctx, ctrl.Request{ NamespacedName: namespacedName, }) @@ -975,7 +967,7 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { consulKind string consulPrereq []capi.ConfigEntry configEntryResourceWithDeletion common.ConfigEntryResource - reconciler func(client.Client, *consul.Config, consul.ServerConnectionManager, logr.Logger) testReconciler + reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler }{ { kubeKind: "ServiceDefaults", @@ -991,14 +983,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { Protocol: "http", }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1019,14 +1010,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1047,14 +1037,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1075,14 +1064,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1117,14 +1105,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1154,14 +1141,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1221,14 +1207,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1260,14 +1245,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1294,14 +1278,13 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, cfg *consul.Config, watcher consul.ServerConnectionManager, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: cfg, - ConsulServerConnMgr: watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } }, @@ -1316,9 +1299,15 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResourceWithDeletion) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResourceWithDeletion).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) // Create any prereqs. for _, configEntry := range c.consulPrereq { @@ -1341,7 +1330,7 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { Namespace: kubeNS, Name: c.configEntryResourceWithDeletion.KubernetesName(), } - r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) resp, err := r.Reconcile(context.Background(), ctrl.Request{ NamespacedName: namespacedName, }) @@ -1377,22 +1366,18 @@ func TestConfigEntryControllers_errorUpdatesSyncStatus(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaults) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - - // Get watcher state to make sure we can get a healthy address. - _, err := testClient.Watcher.State() - require.NoError(t, err) - // Stop the server before calling reconcile imitating a server that's not running. - _ = testClient.TestServer.Stop() - + // Construct a Consul client that will error by giving it + // an unresolvable address. + consulClient, err := capi.NewClient(&capi.Config{ + Address: "incorrect-address", + }) + req.NoError(err) reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } @@ -1406,8 +1391,7 @@ func TestConfigEntryControllers_errorUpdatesSyncStatus(t *testing.T) { }) req.Error(err) - expErr := fmt.Sprintf("Get \"http://127.0.0.1:%d/v1/config/%s/%s\": dial tcp 127.0.0.1:%d: connect: connection refused", - testClient.Cfg.HTTPPort, capi.ServiceDefaults, svcDefaults.ConsulName(), testClient.Cfg.HTTPPort) + expErr := fmt.Sprintf("Get \"http://incorrect-address/v1/config/%s/%s\": dial tcp: lookup incorrect-address", capi.ServiceDefaults, svcDefaults.ConsulName()) req.Contains(err.Error(), expErr) req.False(resp.Requeue) @@ -1450,22 +1434,27 @@ func TestConfigEntryControllers_setsSyncedToTrue(t *testing.T) { // The config entry exists in kube but its status will be nil. fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } // Create the resource in Consul to mimic that it was created // successfully (but its status hasn't been updated). - _, _, err := consulClient.ConfigEntries().Set(svcDefaults.ToConsul(datacenterName), nil) + _, _, err = consulClient.ConfigEntries().Set(svcDefaults.ToConsul(datacenterName), nil) require.NoError(t, err) namespacedName := types.NamespacedName{ @@ -1522,9 +1511,15 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaults) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) // We haven't run reconcile yet. We must create the config entry // in Consul ourselves in a different datacenter. @@ -1541,7 +1536,7 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { Name: svcDefaults.KubernetesName(), } // First get it so we have the latest revision number. - err := fakeClient.Get(ctx, namespacedName, svcDefaults) + err = fakeClient.Get(ctx, namespacedName, svcDefaults) req.NoError(err) // Attempt to create the entry in Kube and run reconcile. @@ -1549,9 +1544,8 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } resp, err := reconciler.Reconcile(ctx, ctrl.Request{ @@ -1606,16 +1600,21 @@ func TestConfigEntryControllers_doesNotDeleteUnownedConfig(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaultsWithDeletion) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaultsWithDeletion).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } @@ -1688,8 +1687,15 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(defaults, splitter).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) + consul, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + require.NoError(t, err) logger := logrtest.TestLogger{T: t} @@ -1697,18 +1703,16 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) { Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } svcSplitterReconciler := ServiceSplitterController{ Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } @@ -1819,9 +1823,15 @@ func TestConfigEntryController_Migration(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.ServiceDefaults{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(&c.KubeResource).Build() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + require.NoError(t, err) // Create the service-defaults in Consul. success, _, err := consulClient.ConfigEntries().Set(&c.ConsulResource, nil) @@ -1834,9 +1844,8 @@ func TestConfigEntryController_Migration(t *testing.T) { Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, - DatacenterName: datacenterName, + ConsulClient: consulClient, + DatacenterName: datacenterName, }, } diff --git a/control-plane/controller/exportedservices_controller_ent_test.go b/control-plane/controller/exportedservices_controller_ent_test.go index dd91c49b57..ec8f771586 100644 --- a/control-plane/controller/exportedservices_controller_ent_test.go +++ b/control-plane/controller/exportedservices_controller_ent_test.go @@ -12,8 +12,8 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "github.com/hashicorp/consul-k8s/control-plane/controller" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" capi "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -94,9 +94,14 @@ func TestExportedServicesController_createsExportedServices(tt *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) ctx := context.Background() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() @@ -105,8 +110,7 @@ func TestExportedServicesController_createsExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -210,9 +214,15 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) ctx := context.Background() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() controller := &controller.ExportedServicesController{ @@ -220,8 +230,7 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -250,7 +259,7 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { // Now update it. { // First get it so we have the latest revision number. - err := fakeClient.Get(ctx, types.NamespacedName{ + err = fakeClient.Get(ctx, types.NamespacedName{ Namespace: c.SourceKubeNS, Name: exportedServices.KubernetesName(), }, exportedServices) @@ -258,7 +267,7 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { // Update the resource. exportedServices.Spec.Services[0].Name = "backend" - err = fakeClient.Update(ctx, exportedServices) + err := fakeClient.Update(ctx, exportedServices) req.NoError(err) resp, err := controller.Reconcile(ctx, ctrl.Request{ @@ -347,9 +356,14 @@ func TestExportedServicesController_deletesExportedServices(tt *testing.T) { } s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) - testClient.TestServer.WaitForServiceIntentions(t) - consulClient := testClient.APIClient + consul, err := testutil.NewTestServerConfigT(t, nil) + req.NoError(err) + defer consul.Stop() + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + req.NoError(err) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() @@ -358,8 +372,7 @@ func TestExportedServicesController_deletesExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClientConfig: testClient.Cfg, - ConsulServerConnMgr: testClient.Watcher, + ConsulClient: consulClient, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, diff --git a/control-plane/go.mod b/control-plane/go.mod index 51e4ad39a0..8dd66de28d 100644 --- a/control-plane/go.mod +++ b/control-plane/go.mod @@ -9,15 +9,11 @@ require ( github.com/google/go-cmp v0.5.7 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20220831174802-b8af65262de8 - github.com/hashicorp/consul-server-connection-manager v0.1.0 - github.com/hashicorp/consul/api v1.10.1-0.20230203155153-2f149d60ccbf - github.com/hashicorp/consul/sdk v0.13.0 + github.com/hashicorp/consul/api v1.10.1-0.20221005170644-13da2c5fad69 + github.com/hashicorp/consul/sdk v0.11.0 github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f github.com/hashicorp/go-hclog v1.2.2 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-netaddrs v0.1.0 - github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/serf v0.10.1 github.com/kr/text v0.2.0 github.com/miekg/dns v1.1.41 @@ -50,12 +46,11 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/armon/go-metrics v0.4.1 // indirect + github.com/armon/go-metrics v0.3.10 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.25.41 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 // indirect @@ -74,10 +69,10 @@ require ( github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gophercloud/gophercloud v0.1.0 // indirect - github.com/hashicorp/consul/proto-public v0.1.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.0 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/mdns v1.0.4 // indirect @@ -134,6 +129,4 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) -replace github.com/hashicorp/consul/sdk => github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892 - -go 1.20 +go 1.18 diff --git a/control-plane/go.sum b/control-plane/go.sum index 1fbb30b7ff..54a0604111 100644 --- a/control-plane/go.sum +++ b/control-plane/go.sum @@ -96,8 +96,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -117,8 +117,6 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -343,15 +341,12 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20220831174802-b8af65262de8 h1:TQY0oKtLV15UNYWeSkTxi4McBIyLecsEtbc/VfxvbYA= github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20220831174802-b8af65262de8/go.mod h1:aw35GB76URgbtxaSSMxbOetbG7YEHHPkIX3/SkTBaWc= -github.com/hashicorp/consul-server-connection-manager v0.1.0 h1:XCweGvMHzra88rYv2zxwwuUOjBUdcQmNKVrnQmt/muo= -github.com/hashicorp/consul-server-connection-manager v0.1.0/go.mod h1:XVVlO+Yk7aiRpspiHZkrrFVn9BJIiOPnQIzqytPxGaU= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.10.1-0.20230203155153-2f149d60ccbf h1:vvsHghmX3LyNUaDe7onYKHyDiny+ystdHKIEujbNj4Q= -github.com/hashicorp/consul/api v1.10.1-0.20230203155153-2f149d60ccbf/go.mod h1:c1u8FzGHcavbEtRW/p1YditvfMgn4QsKNgz2rnCDF7c= -github.com/hashicorp/consul/proto-public v0.1.0 h1:O0LSmCqydZi363hsqc6n2v5sMz3usQMXZF6ziK3SzXU= -github.com/hashicorp/consul/proto-public v0.1.0/go.mod h1:vs2KkuWwtjkIgA5ezp4YKPzQp4GitV+q/+PvksrA92k= -github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892 h1:jw0NwPmNPr5CxAU04hACdj61JSaJBKZ0FdBo+kwfNp4= -github.com/hashicorp/consul/sdk v0.4.1-0.20221021205723-cc843c4be892/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/api v1.10.1-0.20221005170644-13da2c5fad69 h1:IALuDSO0f6x0txq/tjUDF3sShyDMT8dmjn9af6Ik8BA= +github.com/hashicorp/consul/api v1.10.1-0.20221005170644-13da2c5fad69/go.mod h1:T09kWtKqm8j1S9yTd1r0hVhfOyPrvLb0zb6dPKpNXxQ= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= +github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -361,32 +356,31 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f h1:7WFMVeuJQp6BkzuTv9O52pzwtEFVUJubKYN+zez8eTI= github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.2 h1:ihRI7YFwcZdiSD7SIenIhHfQH3OuDvWerAUBZbeQS3M= github.com/hashicorp/go-hclog v1.2.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-netaddrs v0.1.0 h1:TnlYvODD4C/wO+j7cX1z69kV5gOzI87u3OcUinANaW8= -github.com/hashicorp/go-netaddrs v0.1.0/go.mod h1:33+a/emi5R5dqRspOuZKO0E+Tuz5WV1F84eRWALkedA= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -449,6 +443,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -489,6 +484,8 @@ github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXx github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -588,6 +585,7 @@ github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKk github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= @@ -796,6 +794,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= diff --git a/control-plane/helper/test/test_util.go b/control-plane/helper/test/test_util.go index 0ad4601fde..08b28b00fd 100644 --- a/control-plane/helper/test/test_util.go +++ b/control-plane/helper/test/test_util.go @@ -2,7 +2,6 @@ package test import ( "fmt" - "net" "net/http" "net/http/httptest" "os" @@ -10,11 +9,8 @@ import ( "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/helper/cert" - "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" ) @@ -22,99 +18,44 @@ const ( componentAuthMethod = "consul-k8s-component-auth-method" ) -type TestServerClient struct { - TestServer *testutil.TestServer - APIClient *api.Client - Cfg *consul.Config - Watcher consul.ServerConnectionManager -} - -func TestServerWithMockConnMgrWatcher(t *testing.T, callback testutil.ServerConfigCallback) *TestServerClient { - t.Helper() - - var cfg *testutil.TestServerConfig - consulServer, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - if callback != nil { - callback(c) - } - cfg = c - }) - require.NoError(t, err) - t.Cleanup(func() { - _ = consulServer.Stop() - }) - consulServer.WaitForSerfCheck(t) - - consulConfig := &consul.Config{ - APIClientConfig: &api.Config{Address: consulServer.HTTPAddr}, - HTTPPort: cfg.Ports.HTTP, - } - if cfg.ACL.Tokens.InitialManagement != "" { - consulConfig.APIClientConfig.Token = cfg.ACL.Tokens.InitialManagement - } - client, err := api.NewClient(consulConfig.APIClientConfig) - require.NoError(t, err) - - return &TestServerClient{ - TestServer: consulServer, - APIClient: client, - Cfg: consulConfig, - Watcher: MockConnMgrForIPAndPort("127.0.0.1", cfg.Ports.GRPC), - } -} - -func MockConnMgrForIPAndPort(ip string, port int) *consul.MockServerConnectionManager { - parsedIP := net.ParseIP(ip) - connMgr := &consul.MockServerConnectionManager{} - mockState := discovery.State{ - Address: discovery.Addr{ - TCPAddr: net.TCPAddr{ - IP: parsedIP, - Port: port, - }, - }} - connMgr.On("State").Return(mockState, nil) - connMgr.On("Run").Return(nil) - connMgr.On("Stop").Return(nil) - return connMgr -} - // GenerateServerCerts generates Consul CA // and a server certificate and saves them to temp files. // It returns file names in this order: // CA certificate, server certificate, and server key. func GenerateServerCerts(t *testing.T) (string, string, string) { + require := require.New(t) + caFile, err := os.CreateTemp("", "ca") - require.NoError(t, err) + require.NoError(err) certFile, err := os.CreateTemp("", "cert") - require.NoError(t, err) + require.NoError(err) certKeyFile, err := os.CreateTemp("", "key") - require.NoError(t, err) + require.NoError(err) // Generate CA signer, _, caCertPem, caCertTemplate, err := cert.GenerateCA("Consul Agent CA - Test") - require.NoError(t, err) + require.NoError(err) // Generate Server Cert name := "server.dc1.consul" hosts := []string{name, "localhost", "127.0.0.1"} certPem, keyPem, err := cert.GenerateCert(name, 1*time.Hour, caCertTemplate, signer, hosts) - require.NoError(t, err) + require.NoError(err) // Write certs and key to files _, err = caFile.WriteString(caCertPem) - require.NoError(t, err) + require.NoError(err) _, err = certFile.WriteString(certPem) - require.NoError(t, err) + require.NoError(err) _, err = certKeyFile.WriteString(keyPem) - require.NoError(t, err) + require.NoError(err) t.Cleanup(func() { - _ = os.RemoveAll(caFile.Name()) - _ = os.RemoveAll(certFile.Name()) - _ = os.RemoveAll(certKeyFile.Name()) + os.Remove(caFile.Name()) + os.Remove(certFile.Name()) + os.Remove(certKeyFile.Name()) }) return caFile.Name(), certFile.Name(), certKeyFile.Name() } diff --git a/control-plane/subcommand/acl-init/command.go b/control-plane/subcommand/acl-init/command.go index af85128ea8..f045dda076 100644 --- a/control-plane/subcommand/acl-init/command.go +++ b/control-plane/subcommand/acl-init/command.go @@ -6,7 +6,6 @@ import ( "errors" "flag" "fmt" - "net" "os" "path/filepath" "strings" @@ -14,14 +13,13 @@ import ( "text/template" "time" - "github.com/cenkalti/backoff" "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/subcommand" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-netaddrs" "github.com/mitchellh/cli" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,24 +34,36 @@ const ( type Command struct { UI cli.Ui - flags *flag.FlagSet - k8s *flags.K8SFlags - consul *flags.ConsulFlags + flags *flag.FlagSet + k8s *flags.K8SFlags + http *flags.HTTPFlags - flagSecretName string - flagInitType string - flagACLDir string - flagTokenSinkFile string - flagK8sNamespace string + flagSecretName string + flagInitType string + flagNamespace string + flagPrimaryDatacenter string + flagACLDir string + flagTokenSinkFile string - flagLogLevel string - flagLogJSON bool + flagACLAuthMethod string // Auth Method to use for ACLs. + flagLogLevel string + flagLogJSON bool + + bearerTokenFile string // Location of the bearer token. Default is defaultBearerTokenFile. + flagComponentName string // Name of the component to be used as metadata to ACL Login. + + // Flags to configure Consul connection + flagServerAddresses []string + flagServerPort uint + flagConsulCACert string + flagUseHTTPS bool k8sClient kubernetes.Interface - once sync.Once - help string - logger hclog.Logger + once sync.Once + help string + logger hclog.Logger + providers map[string]discover.Provider ctx context.Context consulClient *api.Client @@ -72,8 +82,19 @@ func (c *Command) init() { "Optional filepath to write acl token") // Flags related to using consul login to fetch the ACL token. - c.flags.StringVar(&c.flagK8sNamespace, "k8s-namespace", "", - "Name of Kubernetes namespace where the token Kubernetes secret is stored.") + c.flags.StringVar(&c.flagNamespace, "k8s-namespace", "", "Name of Kubernetes namespace where the token Kubernetes secret is stored.") + c.flags.StringVar(&c.flagPrimaryDatacenter, "primary-datacenter", "", "Name of the primary datacenter when federation is enabled and the command is run in a secondary datacenter.") + c.flags.StringVar(&c.flagACLAuthMethod, "acl-auth-method", "", "Name of the auth method to login with.") + c.flags.StringVar(&c.flagComponentName, "component-name", "", + "Name of the component to pass to ACL Login as metadata.") + c.flags.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", + "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ + "At least one value is required.") + c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") + c.flags.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", + "Path to the PEM-encoded CA certificate of the Consul cluster.") + c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, + "Toggle for using HTTPS for all API calls to Consul.") c.flags.StringVar(&c.flagLogLevel, "log-level", "info", "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ "\"debug\", \"info\", \"warn\", and \"error\".") @@ -81,9 +102,9 @@ func (c *Command) init() { "Enable or disable JSON output format for logging.") c.k8s = &flags.K8SFlags{} - c.consul = &flags.ConsulFlags{} + c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.k8s.Flags()) - flags.Merge(c.flags, c.consul.Flags()) + flags.Merge(c.flags, c.http.Flags()) c.help = flags.Usage(help, c.flags) } @@ -99,18 +120,18 @@ func (c *Command) Run(args []string) int { return 1 } - if c.consul.ConsulLogin.BearerTokenFile == "" { - c.consul.ConsulLogin.BearerTokenFile = defaultBearerTokenFile + if c.bearerTokenFile == "" { + c.bearerTokenFile = defaultBearerTokenFile } // This allows us to utilize the default path of `/consul/login/acl-token` for the ACL token // but only in the case of when we're using ACL.Login. If flagACLAuthMethod is not set and // the tokenSinkFile is also unset it means we do not want to write an ACL token in the case // of the client token. - if c.flagTokenSinkFile == "" { + if c.flagTokenSinkFile == "" && c.flagACLAuthMethod != "" { c.flagTokenSinkFile = defaultTokenSinkFile } - if c.flagK8sNamespace == "" { - c.flagK8sNamespace = corev1.NamespaceDefault + if c.flagNamespace == "" { + c.flagNamespace = corev1.NamespaceDefault } if c.ctx == nil { @@ -141,40 +162,45 @@ func (c *Command) Run(args []string) int { } var secret string - if c.consul.ConsulLogin.AuthMethod != "" { - var ipAddrs []net.IPAddr - if err := backoff.Retry(func() error { - ipAddrs, err = netaddrs.IPAddrs(c.ctx, c.consul.Addresses, c.logger) + if c.flagACLAuthMethod != "" { + cfg := api.DefaultConfig() + c.http.MergeOntoConfig(cfg) + + if len(c.flagServerAddresses) > 0 { + serverAddresses, err := common.GetResolvedServerAddresses(c.flagServerAddresses, c.providers, c.logger) if err != nil { - c.logger.Error("Error resolving IP Address", "err", err) - return err + c.UI.Error(fmt.Sprintf("Unable to discover any Consul addresses from %q: %s", c.flagServerAddresses[0], err)) + return 1 } - return nil - }, exponentialBackoffWithMaxInterval()); err != nil { - c.UI.Error(err.Error()) - return 1 - } - firstServerAddr := fmt.Sprintf("%s:%d", ipAddrs[0].IP.String(), c.consul.HTTPPort) - config := c.consul.ConsulClientConfig().APIClientConfig - config.Address = firstServerAddr + scheme := "http" + if c.flagUseHTTPS { + scheme = "https" + } + + serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) + cfg.Address = serverAddr + cfg.Scheme = scheme + } - c.consulClient, err = consul.NewClient(config, c.consul.APITimeout) + c.consulClient, err = consul.NewClient(cfg, c.http.ConsulAPITimeout()) if err != nil { - c.logger.Error("Failed to create Consul client", "error", err) + c.logger.Error("Unable to get client connection", "error", err) return 1 } loginParams := common.LoginParams{ - AuthMethod: c.consul.ConsulLogin.AuthMethod, - Datacenter: c.consul.ConsulLogin.Datacenter, - BearerTokenFile: c.consul.ConsulLogin.BearerTokenFile, + AuthMethod: c.flagACLAuthMethod, + Datacenter: c.flagPrimaryDatacenter, + BearerTokenFile: c.bearerTokenFile, TokenSinkFile: c.flagTokenSinkFile, - Meta: c.consul.ConsulLogin.Meta, + Meta: map[string]string{ + "component": c.flagComponentName, + }, } secret, err = common.ConsulLogin(c.consulClient, loginParams, c.logger) if err != nil { - c.logger.Error("Failed to login to Consul", "error", err) + c.logger.Error("Consul login failed", "error", err) return 1 } c.logger.Info("Successfully read ACL token from the server") @@ -232,7 +258,7 @@ func (c *Command) Run(args []string) int { } func (c *Command) getSecret(secretName string) (string, error) { - secret, err := c.k8sClient.CoreV1().Secrets(c.flagK8sNamespace).Get(c.ctx, secretName, metav1.GetOptions{}) + secret, err := c.k8sClient.CoreV1().Secrets(c.flagNamespace).Get(c.ctx, secretName, metav1.GetOptions{}) if err != nil { return "", err } @@ -245,23 +271,13 @@ func (c *Command) validateFlags() error { if len(c.flags.Args()) > 0 { return errors.New("Should have no non-flag arguments.") } - if c.consul.APITimeout <= 0 { + if c.http.ConsulAPITimeout() <= 0 { return errors.New("-consul-api-timeout must be set to a value greater than 0") } return nil } -// exponentialBackoffWithMaxInterval creates an exponential backoff but limits the -// maximum backoff to 10 seconds so that we don't find ourselves in a situation -// where we are waiting for minutes before retries. -func exponentialBackoffWithMaxInterval() *backoff.ExponentialBackOff { - backoff := backoff.NewExponentialBackOff() - backoff.MaxInterval = 10 * time.Second - backoff.Reset() - return backoff -} - func (c *Command) Synopsis() string { return synopsis } func (c *Command) Help() string { c.once.Do(c.init) diff --git a/control-plane/subcommand/acl-init/command_test.go b/control-plane/subcommand/acl-init/command_test.go index c9f5703459..f069b5ec98 100644 --- a/control-plane/subcommand/acl-init/command_test.go +++ b/control-plane/subcommand/acl-init/command_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" @@ -24,6 +25,30 @@ const ( componentAuthMethod = "consul-k8s-component-auth-method" ) +func TestRun_FlagValidation(t *testing.T) { + t.Parallel() + cases := []struct { + flags []string + expErr string + }{ + { + flags: []string{}, + expErr: "-consul-api-timeout must be set to a value greater than 0", + }, + } + for _, c := range cases { + t.Run(c.expErr, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + code := cmd.Run(c.flags) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), c.expErr) + }) + } +} + // Test that we write the secret data to a file. func TestRun_TokenSinkFile(t *testing.T) { t.Parallel() @@ -61,6 +86,7 @@ func TestRun_TokenSinkFile(t *testing.T) { code := cmd.Run([]string{ "-token-sink-file", sinkFile, "-secret-name", secretName, + "-consul-api-timeout", "5s", }) require.Equal(0, code, ui.ErrorWriter.String()) bytes, err := os.ReadFile(sinkFile) @@ -101,6 +127,7 @@ func TestRun_TokenSinkFileErr(t *testing.T) { code := cmd.Run([]string{ "-token-sink-file", "/this/filepath/does/not/exist", "-secret-name", secretName, + "-consul-api-timeout", "5s", }) require.Equal(1, code) @@ -148,6 +175,7 @@ func TestRun_TokenSinkFileTwice(t *testing.T) { code := cmd.Run([]string{ "-token-sink-file", sinkFile, "-secret-name", secretName, + "-consul-api-timeout", "5s", }) require.Equal(0, code, ui.ErrorWriter.String()) @@ -168,36 +196,49 @@ func TestRun_PerformsConsulLogin(t *testing.T) { // Start Consul server with ACLs enabled and default deny policy. masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" - server := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.ACL.DefaultPolicy = "deny" c.ACL.Tokens.InitialManagement = masterToken }) + require.NoError(t, err) + defer server.Stop() + server.WaitForLeader(t) + cfg := &api.Config{ + Scheme: "http", + Address: server.HTTPAddr, + Token: masterToken, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) + // Set up the Component Auth Method, this pre-loads Consul with bindingrule, roles and an acl:write policy so we // can issue an ACL.Login(). - client := server.APIClient - test.SetupK8sComponentAuthMethod(t, client, "test-sa", "default") + test.SetupK8sComponentAuthMethod(t, consulClient, "test-sa", "default") ui := cli.NewMockUi() cmd := Command{ - UI: ui, - k8sClient: k8s, + UI: ui, + k8sClient: k8s, + bearerTokenFile: bearerFile, } code := cmd.Run([]string{ "-token-sink-file", tokenFile, - "-consul-login-bearer-token-file", bearerFile, - "-auth-method-name", componentAuthMethod, - "-consul-login-meta", "component-name=foo", - "-addresses", strings.Split(server.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(server.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(server.TestServer.GRPCAddr, ":")[1], + "-acl-auth-method", componentAuthMethod, + "-component-name", "foo", + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-consul-api-timeout", "5s", }) require.Equal(t, 0, code, ui.ErrorWriter.String()) // Validate the Token got written. tokenBytes, err := os.ReadFile(tokenFile) require.NoError(t, err) require.Equal(t, 36, len(tokenBytes)) + // Validate the Token and its Description. + tok, _, err := consulClient.ACL().TokenReadSelf(&api.QueryOptions{Token: string(tokenBytes)}) + require.NoError(t, err) + require.Equal(t, "token created via login: {\"component\":\"foo\"}", tok.Description) } // TestRun_WithAclAuthMethodDefinedWritesConfigJsonWithTokenMatchingSinkFile @@ -209,7 +250,7 @@ func TestRun_WithAclAuthMethodDefined_WritesConfigJson_WithTokenMatchingSinkFile tmpDir, err := os.MkdirTemp("", "") require.NoError(t, err) t.Cleanup(func() { - os.RemoveAll(tokenFile) + os.Remove(tokenFile) os.RemoveAll(tmpDir) }) @@ -218,12 +259,21 @@ func TestRun_WithAclAuthMethodDefined_WritesConfigJson_WithTokenMatchingSinkFile // Start Consul server with ACLs enabled and default deny policy. masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" - server := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.ACL.DefaultPolicy = "deny" c.ACL.Tokens.InitialManagement = masterToken }) - consulClient := server.APIClient + require.NoError(t, err) + defer server.Stop() + server.WaitForLeader(t) + cfg := &api.Config{ + Scheme: "http", + Address: server.HTTPAddr, + Token: masterToken, + } + consulClient, err := api.NewClient(cfg) + require.NoError(t, err) // Set up the Component Auth Method, this pre-loads Consul with bindingrule, // roles and an acl:write policy so we can issue an ACL.Login(). @@ -231,20 +281,19 @@ func TestRun_WithAclAuthMethodDefined_WritesConfigJson_WithTokenMatchingSinkFile ui := cli.NewMockUi() cmd := Command{ - UI: ui, - k8sClient: k8s, + UI: ui, + k8sClient: k8s, + bearerTokenFile: bearerFile, } code := cmd.Run([]string{ + "-token-sink-file", tokenFile, + "-acl-auth-method", componentAuthMethod, + "-component-name", "foo", + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), "-init-type", "client", "-acl-dir", tmpDir, - "-token-sink-file", tokenFile, - "-consul-login-bearer-token-file", bearerFile, - "-auth-method-name", componentAuthMethod, - "-consul-login-meta", "component-name=foo", - "-addresses", strings.Split(server.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(server.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(server.TestServer.GRPCAddr, ":")[1], + "-consul-api-timeout", "5s", }) require.Equal(t, 0, code, ui.ErrorWriter.String()) // Validate the ACL Config file got written. @@ -308,6 +357,7 @@ func TestRun_WithoutAclAuthMethodDefined_WritesConfigJsonWithTokenMatchingSinkFi "-secret-name", secretName, "-init-type", "client", "-acl-dir", tmpDir, + "-consul-api-timeout", "5s", }) // Validate the ACL Config file got written. aclConfigBytes, err := os.ReadFile(fmt.Sprintf("%s/acl-config.json", tmpDir)) diff --git a/control-plane/subcommand/common/common.go b/control-plane/subcommand/common/common.go index e3a569ddf6..5b8479c9ab 100644 --- a/control-plane/subcommand/common/common.go +++ b/control-plane/subcommand/common/common.go @@ -42,11 +42,6 @@ const ( // Logger returns an hclog instance with log level set and JSON logging enabled/disabled, or an error if level is invalid. func Logger(level string, jsonLogging bool) (hclog.Logger, error) { - return NamedLogger(level, jsonLogging, "") -} - -// NamedLogger Logger returns a named hclog instance with log level set and JSON logging enabled/disabled, or an error if level is invalid. -func NamedLogger(level string, jsonLogging bool, name string) (hclog.Logger, error) { parsedLevel := hclog.LevelFromString(level) if parsedLevel == hclog.NoLevel { return nil, fmt.Errorf("unknown log level: %s", level) @@ -55,7 +50,7 @@ func NamedLogger(level string, jsonLogging bool, name string) (hclog.Logger, err JSONFormat: jsonLogging, Level: parsedLevel, Output: os.Stderr, - }).Named(name), nil + }), nil } // ZapLogger returns a logr.Logger instance with log level set and JSON logging enabled/disabled, or an error if the level is invalid. @@ -104,8 +99,8 @@ type LoginParams struct { // Meta is the metadata to set on the token. Meta map[string]string - // NumRetries is the number of times to try to log in. - NumRetries uint64 + // numRetries is only used in tests to make them run faster. + numRetries uint64 } // ConsulLogin issues an ACL().Login to Consul and writes out the token to tokenSinkFile. @@ -121,8 +116,8 @@ func ConsulLogin(client *api.Client, params LoginParams, log hclog.Logger) (stri return "", fmt.Errorf("no bearer token found in %q", params.BearerTokenFile) } - if params.NumRetries == 0 { - params.NumRetries = numLoginRetries + if params.numRetries == 0 { + params.numRetries = numLoginRetries } var token *api.ACLToken err = backoff.Retry(func() error { @@ -149,7 +144,7 @@ func ConsulLogin(client *api.Client, params LoginParams, log hclog.Logger) (stri } } return err - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), params.NumRetries)) + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), params.numRetries)) if err != nil { log.Error("Hit maximum retries for consul login", "error", err) return "", err @@ -207,7 +202,7 @@ func WriteFileWithPerms(outputFile, payload string, mode os.FileMode) error { // os.WriteFile truncates existing files and overwrites them, but only if they are writable. // If the file exists it will already likely be read-only. Remove it first. if _, err := os.Stat(outputFile); err == nil { - if err = os.RemoveAll(outputFile); err != nil { + if err = os.Remove(outputFile); err != nil { return fmt.Errorf("unable to delete existing file: %s", err) } } diff --git a/control-plane/subcommand/common/common_test.go b/control-plane/subcommand/common/common_test.go index 6021bd0b49..9bab362560 100644 --- a/control-plane/subcommand/common/common_test.go +++ b/control-plane/subcommand/common/common_test.go @@ -8,6 +8,7 @@ import ( "net/url" "os" "testing" + "time" "github.com/hashicorp/consul-k8s/control-plane/helper/go-discover/mocks" "github.com/hashicorp/consul/api" @@ -162,50 +163,54 @@ func TestConsulLogin_TokenNotReplicated(t *testing.T) { func TestConsulLogin_EmptyBearerTokenFile(t *testing.T) { t.Parallel() + require := require.New(t) bearerTokenFile := WriteTempFile(t, "") params := LoginParams{ BearerTokenFile: bearerTokenFile, } _, err := ConsulLogin(nil, params, hclog.NewNullLogger()) - require.EqualError(t, err, fmt.Sprintf("no bearer token found in %q", bearerTokenFile)) + require.EqualError(err, fmt.Sprintf("no bearer token found in %q", bearerTokenFile)) } func TestConsulLogin_BearerTokenFileDoesNotExist(t *testing.T) { t.Parallel() + require := require.New(t) randFileName := fmt.Sprintf("/foo/%d/%d", rand.Int(), rand.Int()) params := LoginParams{ BearerTokenFile: randFileName, } _, err := ConsulLogin(nil, params, hclog.NewNullLogger()) - require.Error(t, err) - require.Contains(t, err.Error(), "unable to read bearer token file") + require.Error(err) + require.Contains(err.Error(), "unable to read bearer token file") } func TestConsulLogin_TokenFileUnwritable(t *testing.T) { t.Parallel() + require := require.New(t) bearerTokenFile := WriteTempFile(t, "foo") client := startMockServer(t) // This is a common.Logger. log, err := Logger("INFO", false) - require.NoError(t, err) + require.NoError(err) randFileName := fmt.Sprintf("/foo/%d/%d", rand.Int(), rand.Int()) params := LoginParams{ AuthMethod: testAuthMethod, BearerTokenFile: bearerTokenFile, TokenSinkFile: randFileName, - NumRetries: 2, + numRetries: 2, } _, err = ConsulLogin(client, params, log) - require.Error(t, err) - require.Contains(t, err.Error(), "error writing token to file sink") + require.Error(err) + require.Contains(err.Error(), "error writing token to file sink") } func TestWriteFileWithPerms_InvalidOutputFile(t *testing.T) { t.Parallel() + rand.Seed(time.Now().UnixNano()) randFileName := fmt.Sprintf("/tmp/tmp/tmp/%d", rand.Int()) t.Cleanup(func() { - os.RemoveAll(randFileName) + os.Remove(randFileName) }) err := WriteFileWithPerms(randFileName, "", os.FileMode(0444)) require.Errorf(t, err, "unable to create file: %s", randFileName) @@ -213,11 +218,12 @@ func TestWriteFileWithPerms_InvalidOutputFile(t *testing.T) { func TestWriteFileWithPerms_OutputFileExists(t *testing.T) { t.Parallel() + rand.Seed(time.Now().UnixNano()) randFileName := fmt.Sprintf("/tmp/%d", rand.Int()) err := os.WriteFile(randFileName, []byte("foo"), os.FileMode(0444)) require.NoError(t, err) t.Cleanup(func() { - os.RemoveAll(randFileName) + os.Remove(randFileName) }) payload := "abcd" err = WriteFileWithPerms(randFileName, payload, os.FileMode(0444)) @@ -230,9 +236,10 @@ func TestWriteFileWithPerms_OutputFileExists(t *testing.T) { func TestWriteFileWithPerms(t *testing.T) { t.Parallel() payload := "foo-foo-foo-foo" + rand.Seed(time.Now().UnixNano()) randFileName := fmt.Sprintf("/tmp/%d", rand.Int()) t.Cleanup(func() { - os.RemoveAll(randFileName) + os.Remove(randFileName) }) // Issue the write. mode := os.FileMode(0444) diff --git a/control-plane/subcommand/common/test_util.go b/control-plane/subcommand/common/test_util.go index 13d9017fe4..ff73e62ae3 100644 --- a/control-plane/subcommand/common/test_util.go +++ b/control-plane/subcommand/common/test_util.go @@ -17,7 +17,7 @@ func WriteTempFile(t *testing.T, contents string) string { require.NoError(t, err) t.Cleanup(func() { - os.RemoveAll(file.Name()) + os.Remove(file.Name()) }) return file.Name() } diff --git a/control-plane/subcommand/connect-init/command.go b/control-plane/subcommand/connect-init/command.go index 4750e9455c..77d79221d2 100644 --- a/control-plane/subcommand/connect-init/command.go +++ b/control-plane/subcommand/connect-init/command.go @@ -1,34 +1,27 @@ package connectinit import ( - "context" - "encoding/json" "errors" "flag" "fmt" - "net" "os" - "os/signal" "sync" - "syscall" "time" "github.com/cenkalti/backoff" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + connectinject "github.com/hashicorp/consul-k8s/control-plane/connect-inject" "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-k8s/control-plane/namespaces" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/iptables" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" - "github.com/mitchellh/mapstructure" ) const ( - defaultProxyIDFile = "/consul/connect-inject/proxyid" + defaultBearerTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + defaultTokenSinkFile = "/consul/connect-inject/acl-token" + defaultProxyIDFile = "/consul/connect-inject/proxyid" // The number of times to attempt to read this service (120s). defaultServicePollingRetries = 120 @@ -37,48 +30,43 @@ const ( type Command struct { UI cli.Ui - flagConsulNodeName string - flagPodName string // Pod name. - flagPodNamespace string // Pod namespace. - flagServiceAccountName string // Service account name. - flagServiceName string // Service name. - flagGatewayKind string - flagRedirectTrafficConfig string - flagLogLevel string - flagLogJSON bool - - flagProxyIDFile string // Location to write the output proxyID. Default is defaultProxyIDFile. - flagMultiPort bool - + flagACLAuthMethod string // Auth Method to use for ACLs, if enabled. + flagPodName string // Pod name. + flagPodNamespace string // Pod namespace. + flagAuthMethodNamespace string // Consul namespace the auth-method is defined in. + flagConsulServiceNamespace string // Consul destination namespace for the service. + flagServiceAccountName string // Service account name. + flagServiceName string // Service name. + flagLogLevel string + flagLogJSON bool + + flagBearerTokenFile string // Location of the bearer token. Default is /var/run/secrets/kubernetes.io/serviceaccount/token. + flagACLTokenSink string // Location to write the output token. Default is defaultTokenSinkFile. + flagProxyIDFile string // Location to write the output proxyID. Default is defaultProxyIDFile. + flagMultiPort bool serviceRegistrationPollingAttempts uint64 // Number of times to poll for this service to be registered. flagSet *flag.FlagSet - consul *flags.ConsulFlags + http *flags.HTTPFlags once sync.Once help string logger hclog.Logger - - watcher *discovery.Watcher - - nonRetryableError error - - // Only used in tests. - iptablesProvider iptables.Provider - iptablesConfig iptables.Config } func (c *Command) init() { c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + c.flagSet.StringVar(&c.flagACLAuthMethod, "acl-auth-method", "", "Name of the auth method to login to.") c.flagSet.StringVar(&c.flagPodName, "pod-name", "", "Name of the pod.") - c.flagSet.StringVar(&c.flagConsulNodeName, "consul-node-name", os.Getenv("CONSUL_NODE_NAME"), "Name of the Consul node where services are registered.") c.flagSet.StringVar(&c.flagPodNamespace, "pod-namespace", "", "Name of the pod namespace.") + c.flagSet.StringVar(&c.flagAuthMethodNamespace, "auth-method-namespace", "", "Consul namespace the auth-method is defined in") + c.flagSet.StringVar(&c.flagConsulServiceNamespace, "consul-service-namespace", "", "Consul destination namespace of the service.") c.flagSet.StringVar(&c.flagServiceAccountName, "service-account-name", "", "Service account name on the pod.") c.flagSet.StringVar(&c.flagServiceName, "service-name", "", "Service name as specified via the pod annotation.") + c.flagSet.StringVar(&c.flagBearerTokenFile, "bearer-token-file", defaultBearerTokenFile, "Path to service account token file.") + c.flagSet.StringVar(&c.flagACLTokenSink, "acl-token-sink", defaultTokenSinkFile, "File name where where ACL token should be saved.") c.flagSet.StringVar(&c.flagProxyIDFile, "proxy-id-file", defaultProxyIDFile, "File name where proxy's Consul service ID should be saved.") c.flagSet.BoolVar(&c.flagMultiPort, "multiport", false, "If the pod is a multi port pod.") - c.flagSet.StringVar(&c.flagGatewayKind, "gateway-kind", "", "Kind of gateway that is being registered: ingress-gateway, terminating-gateway, or mesh-gateway.") - c.flagSet.StringVar(&c.flagRedirectTrafficConfig, "redirect-traffic-config", os.Getenv("CONSUL_REDIRECT_TRAFFIC_CONFIG"), "Config (in JSON format) to configure iptables for this pod.") c.flagSet.StringVar(&c.flagLogLevel, "log-level", "info", "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ "\"debug\", \"info\", \"warn\", and \"error\".") @@ -89,12 +77,13 @@ func (c *Command) init() { c.serviceRegistrationPollingAttempts = defaultServicePollingRetries } - c.consul = &flags.ConsulFlags{} - flags.Merge(c.flagSet, c.consul.Flags()) + c.http = &flags.HTTPFlags{} + flags.Merge(c.flagSet, c.http.Flags()) c.help = flags.Usage(help, c.flagSet) } func (c *Command) Run(args []string) int { + var err error c.once.Do(c.init) if err := c.flagSet.Parse(args); err != nil { @@ -115,112 +104,68 @@ func (c *Command) Run(args []string) int { return 1 } } - - // Create Consul API config object. - consulConfig := c.consul.ConsulClientConfig() - - // Create a context to be used by the processes started in this command. - ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer cancelFunc() - - // Start Consul server Connection manager. - serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() - // Disable server watch because we only need to get server IPs once. - serverConnMgrCfg.ServerWatchDisabled = true + cfg := api.DefaultConfig() + cfg.Namespace = c.flagConsulServiceNamespace + c.http.MergeOntoConfig(cfg) + consulClient, err := consul.NewClient(cfg, c.http.ConsulAPITimeout()) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 - } - if c.watcher == nil { - c.watcher, err = discovery.NewWatcher(ctx, serverConnMgrCfg, c.logger.Named("consul-server-connection-manager")) - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) - return 1 - } - go c.watcher.Run() - defer c.watcher.Stop() - } - - state, err := c.watcher.State() - if err != nil { - c.logger.Error("Unable to get state from consul-server-connection-manager", "error", err) - return 1 - } - - consulClient, err := consul.NewClientFromConnMgrState(consulConfig, state) - if err != nil { - if c.flagServiceAccountName == "default" { - c.logger.Warn("The service account name for this Pod is \"default\"." + - " In default installations this is not a supported service account name." + - " The service account name must match the name of the Kubernetes Service" + - " or the consul.hashicorp.com/connect-service annotation.") - } c.logger.Error("Unable to get client connection", "error", err) return 1 } - proxyService := &api.AgentService{} - if c.flagGatewayKind != "" { - err = backoff.Retry(c.getGatewayRegistration(consulClient), backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.serviceRegistrationPollingAttempts)) - if err != nil { - c.logger.Error("Timed out waiting for gateway registration", "error", err) - return 1 - } - if c.nonRetryableError != nil { - c.logger.Error("Error processing gateway registration", "error", c.nonRetryableError) - return 1 - } - } else { - var err = backoff.Retry(c.getConnectServiceRegistrations(consulClient, proxyService), backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.serviceRegistrationPollingAttempts)) - if err != nil { - c.logger.Error("Timed out waiting for service registration", "error", err) - return 1 - } - if c.nonRetryableError != nil { - c.logger.Error("Error processing service registration", "error", c.nonRetryableError) - return 1 - } - } - // todo (agentless): this should eventually be passed to consul-dataplane as a string so we don't need to write it to file. - if c.consul.UseTLS && c.consul.CACertPEM != "" { - if err = common.WriteFileWithPerms(constants.ConsulCAFile, c.consul.CACertPEM, 0444); err != nil { - c.logger.Error("error writing CA cert file", "error", err) - return 1 + // First do the ACL Login, if necessary. + if c.flagACLAuthMethod != "" { + // loginMeta is the default metadata that we pass to the consul login API. + loginMeta := map[string]string{"pod": fmt.Sprintf("%s/%s", c.flagPodNamespace, c.flagPodName)} + loginParams := common.LoginParams{ + AuthMethod: c.flagACLAuthMethod, + Namespace: c.flagAuthMethodNamespace, + BearerTokenFile: c.flagBearerTokenFile, + TokenSinkFile: c.flagACLTokenSink, + Meta: loginMeta, } - } - - if c.flagRedirectTrafficConfig != "" { - err = c.applyTrafficRedirectionRules(proxyService) + token, err := common.ConsulLogin(consulClient, loginParams, c.logger) if err != nil { - c.logger.Error("error applying traffic redirection rules", "err", err) + if c.flagServiceAccountName == "default" { + c.logger.Warn("The service account name for this Pod is \"default\"." + + " In default installations this is not a supported service account name." + + " The service account name must match the name of the Kubernetes Service" + + " or the consul.hashicorp.com/connect-service annotation.") + } + c.logger.Error("unable to complete login", "error", err) return 1 } + cfg.Token = token } - c.logger.Info("Connect initialization completed") - return 0 -} - -func (c *Command) getConnectServiceRegistrations(consulClient *api.Client, proxyService *api.AgentService) backoff.Operation { + // Now wait for the service to be registered. Do this by querying the Agent for a service + // which maps to this pod+namespace. var proxyID string registrationRetryCount := 0 - return func() error { + var errServiceNameMismatch error + // We need a new client so that we can use the ACL token that was fetched during login to do the next bit, + // otherwise `consulClient` will still be using the bearerToken that was passed in. + consulClient, err = consul.NewClient(cfg, c.http.ConsulAPITimeout()) + if err != nil { + c.logger.Error("Unable to update client connection", "error", err) + return 1 + } + err = backoff.Retry(func() error { registrationRetryCount++ filter := fmt.Sprintf("Meta[%q] == %q and Meta[%q] == %q ", - constants.MetaKeyPodName, c.flagPodName, constants.MetaKeyKubeNS, c.flagPodNamespace) + connectinject.MetaKeyPodName, c.flagPodName, connectinject.MetaKeyKubeNS, c.flagPodNamespace) if c.flagMultiPort && c.flagServiceName != "" { // If the service name is set and this is a multi-port pod there may be multiple services registered for // this one Pod. If so, we want to ensure the service and proxy matching our expected name is registered. filter += fmt.Sprintf(` and (Service == %q or Service == "%s-sidecar-proxy")`, c.flagServiceName, c.flagServiceName) } - serviceList, _, err := consulClient.Catalog().NodeServiceList(c.flagConsulNodeName, - &api.QueryOptions{Filter: filter, MergeCentralConfig: true}) + serviceList, err := consulClient.Agent().ServicesWithFilter(filter) if err != nil { - c.logger.Error("Unable to get services", "error", err) + c.logger.Error("Unable to get Agent services", "error", err) return err } // Wait for the service and the connect-proxy service to be registered. - if len(serviceList.Services) != 2 { + if len(serviceList) != 2 { c.logger.Info("Unable to find registered services; retrying") // Once every 10 times we're going to print this informational message to the pod logs so that // it is not "lost" to the user at the end of the retries when the pod enters a CrashLoop. @@ -228,33 +173,32 @@ func (c *Command) getConnectServiceRegistrations(consulClient *api.Client, proxy c.logger.Info("Check to ensure a Kubernetes service has been created for this application." + " If your pod is not starting also check the connect-inject deployment logs.") } - if len(serviceList.Services) > 2 { + if len(serviceList) > 2 { c.logger.Error("There are multiple Consul services registered for this pod when there must only be one." + " Check if there are multiple Kubernetes services selecting this pod and add the label" + " `consul.hashicorp.com/service-ignore: \"true\"` to all services except the one used by Consul for handling requests.") } - return fmt.Errorf("did not find correct number of services, found: %d, services: %+v", len(serviceList.Services), serviceList) + return fmt.Errorf("did not find correct number of services, found: %d, services: %+v", len(serviceList), serviceList) } - for _, svc := range serviceList.Services { + for _, svc := range serviceList { c.logger.Info("Registered service has been detected", "service", svc.Service) - if c.consul.ConsulLogin.AuthMethod != "" { + if c.flagACLAuthMethod != "" { if c.flagServiceName != "" && c.flagServiceAccountName != c.flagServiceName { - // Save an error but return nil so that we don't retry this step. - c.nonRetryableError = fmt.Errorf("service account name %s doesn't match annotation service name %s", c.flagServiceAccountName, c.flagServiceName) + // Set the error but return nil so we don't retry. + errServiceNameMismatch = fmt.Errorf("service account name %s doesn't match annotation service name %s", c.flagServiceAccountName, c.flagServiceName) return nil } if c.flagServiceName == "" && svc.Kind != api.ServiceKindConnectProxy && c.flagServiceAccountName != svc.Service { - // Save an error but return nil so that we don't retry this step. - c.nonRetryableError = fmt.Errorf("service account name %s doesn't match Consul service name %s", c.flagServiceAccountName, svc.Service) + // Set the error but return nil so we don't retry. + errServiceNameMismatch = fmt.Errorf("service account name %s doesn't match Consul service name %s", c.flagServiceAccountName, svc.Service) return nil } } if svc.Kind == api.ServiceKindConnectProxy { // This is the proxy service ID. proxyID = svc.ID - *proxyService = *svc } } @@ -262,78 +206,26 @@ func (c *Command) getConnectServiceRegistrations(consulClient *api.Client, proxy // In theory we can't reach this point unless we have 2 services registered against // this pod and neither are the connect-proxy. We don't support this case anyway, but it // is necessary to return from the function. - c.logger.Error("Unable to write proxy ID to file", "error", err) return fmt.Errorf("unable to find registered connect-proxy service") } - - // Write the proxy ID to the shared volume so `consul connect envoy` can use it for bootstrapping. - if err = common.WriteFileWithPerms(c.flagProxyIDFile, proxyID, os.FileMode(0444)); err != nil { - // Save an error but return nil so that we don't retry this step. - c.nonRetryableError = err - return nil - } - return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.serviceRegistrationPollingAttempts)) + if err != nil { + c.logger.Error("Timed out waiting for service registration", "error", err) + return 1 } -} - -func (c *Command) getGatewayRegistration(client *api.Client) backoff.Operation { - var proxyID string - registrationRetryCount := 0 - return func() error { - registrationRetryCount++ - var gatewayList *api.CatalogNodeServiceList - var err error - filter := fmt.Sprintf("Meta[%q] == %q and Meta[%q] == %q ", - constants.MetaKeyPodName, c.flagPodName, constants.MetaKeyKubeNS, c.flagPodNamespace) - if c.consul.Namespace != "" { - gatewayList, _, err = client.Catalog().NodeServiceList(c.flagConsulNodeName, &api.QueryOptions{Filter: filter, Namespace: namespaces.WildcardNamespace}) - } else { - gatewayList, _, err = client.Catalog().NodeServiceList(c.flagConsulNodeName, &api.QueryOptions{Filter: filter}) - } - if err != nil { - c.logger.Error("Unable to get gateway", "error", err) - return err - } - // Wait for the service and the connect-proxy service to be registered. - if len(gatewayList.Services) != 1 { - c.logger.Info("Unable to find registered gateway; retrying") - // Once every 10 times we're going to print this informational message to the pod logs so that - // it is not "lost" to the user at the end of the retries when the pod enters a CrashLoop. - if registrationRetryCount%10 == 0 { - c.logger.Info("Check to ensure a Kubernetes service has been created for this application." + - " If your pod is not starting also check the connect-inject deployment logs.") - } - if len(gatewayList.Services) > 1 { - c.logger.Error("There are multiple Consul gateway services registered for this pod when there must only be one." + - " Check if there are multiple Kubernetes services selecting this gateway pod and add the label" + - " `consul.hashicorp.com/service-ignore: \"true\"` to all services except the one used by Consul for handling requests.") - } - return fmt.Errorf("did not find correct number of gateways, found: %d, services: %+v", len(gatewayList.Services), gatewayList) - } - for _, gateway := range gatewayList.Services { - switch gateway.Kind { - case api.ServiceKindMeshGateway, api.ServiceKindIngressGateway, api.ServiceKindTerminatingGateway: - proxyID = gateway.ID - } - } - if proxyID == "" { - // In theory we can't reach this point unless we have a service registered against - // this pod but it isnt a Connect Gateway. We don't support this case, but it - // is necessary to return from the function. - c.nonRetryableError = fmt.Errorf("unable to find registered connect-proxy service") - return nil - } - - // Write the proxy ID to the shared volume so the consul-dataplane can use it for bootstrapping. - if err := common.WriteFileWithPerms(c.flagProxyIDFile, proxyID, os.FileMode(0444)); err != nil { - // Save an error but return nil so that we don't retry this step. - c.nonRetryableError = err - return nil - } - - return nil + if errServiceNameMismatch != nil { + c.logger.Error(errServiceNameMismatch.Error()) + return 1 + } + // Write the proxy ID to the shared volume so `consul connect envoy` can use it for bootstrapping. + err = common.WriteFileWithPerms(c.flagProxyIDFile, proxyID, os.FileMode(0444)) + if err != nil { + c.logger.Error("Unable to write proxy ID to file", "error", err) + return 1 } + c.logger.Info("Connect initialization completed") + return 0 } func (c *Command) validateFlags() error { @@ -343,13 +235,13 @@ func (c *Command) validateFlags() error { if c.flagPodNamespace == "" { return errors.New("-pod-namespace must be set") } - if c.consul.ConsulLogin.AuthMethod != "" && c.flagServiceAccountName == "" && c.flagGatewayKind == "" { + if c.flagACLAuthMethod != "" && c.flagServiceAccountName == "" { return errors.New("-service-account-name must be set when ACLs are enabled") } - if c.flagConsulNodeName == "" { - return errors.New("-consul-node-name must be set") - } + if c.http.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") + } return nil } @@ -359,58 +251,6 @@ func (c *Command) Help() string { return c.help } -// This below implementation is loosely based on -// https://github.com/hashicorp/consul/blob/fe2d41ddad9ba2b8ff86cbdebbd8f05855b1523c/command/connect/redirecttraffic/redirect_traffic.go#L136. - -// trafficRedirectProxyConfig is a snippet of xds/config.go -// with only the configuration values that we need to parse from Proxy.Config -// to apply traffic redirection rules. -type trafficRedirectProxyConfig struct { - BindPort int `mapstructure:"bind_port"` - StatsBindAddr string `mapstructure:"envoy_stats_bind_addr"` -} - -func (c *Command) applyTrafficRedirectionRules(svc *api.AgentService) error { - err := json.Unmarshal([]byte(c.flagRedirectTrafficConfig), &c.iptablesConfig) - if err != nil { - return err - } - if c.iptablesProvider != nil { - c.iptablesConfig.IptablesProvider = c.iptablesProvider - } - - if svc.Proxy.TransparentProxy != nil && svc.Proxy.TransparentProxy.OutboundListenerPort != 0 { - c.iptablesConfig.ProxyOutboundPort = svc.Proxy.TransparentProxy.OutboundListenerPort - } - - // Decode proxy's opaque config so that we can use it later to configure - // traffic redirection with iptables. - var trCfg trafficRedirectProxyConfig - if err = mapstructure.WeakDecode(svc.Proxy.Config, &trCfg); err != nil { - return fmt.Errorf("failed parsing Proxy.Config: %s", err) - } - if trCfg.BindPort != 0 { - c.iptablesConfig.ProxyInboundPort = trCfg.BindPort - } - - if trCfg.StatsBindAddr != "" { - _, port, err := net.SplitHostPort(trCfg.StatsBindAddr) - if err != nil { - return fmt.Errorf("failed parsing host and port from envoy_stats_bind_addr: %s", err) - } - - c.iptablesConfig.ExcludeInboundPorts = append(c.iptablesConfig.ExcludeInboundPorts, port) - } - - // Configure any relevant information from the proxy service - err = iptables.Setup(c.iptablesConfig) - if err != nil { - return err - } - c.logger.Info("Successfully applied traffic redirection rules") - return nil -} - const synopsis = "Inject connect init command." const help = ` Usage: consul-k8s-control-plane connect-init [options] diff --git a/control-plane/subcommand/connect-init/command_ent_test.go b/control-plane/subcommand/connect-init/command_ent_test.go index ecdc34122e..4e23fb2244 100644 --- a/control-plane/subcommand/connect-init/command_ent_test.go +++ b/control-plane/subcommand/connect-init/command_ent_test.go @@ -6,55 +6,162 @@ import ( "fmt" "math/rand" "os" - "strconv" "testing" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/namespaces" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) -func TestRun_WithNamespaces(t *testing.T) { +func TestRun_ServicePollingWithACLsAndTLSWithNamespaces(t *testing.T) { t.Parallel() cases := []struct { name string + tls bool consulServiceNamespace string + acls bool + authMethodNamespace string + adminPartition string }{ { - name: "serviceNS=default", + name: "ACLs enabled, no tls, serviceNS=default, authMethodNS=default, partition=default", + tls: false, consulServiceNamespace: "default", + authMethodNamespace: "default", + acls: true, + adminPartition: "default", }, { - name: "serviceNS=default-ns", + name: "ACLs enabled, tls, serviceNS=default, authMethodNS=default, partition=default", + tls: true, + consulServiceNamespace: "default", + authMethodNamespace: "default", + acls: true, + adminPartition: "default", + }, + { + name: "ACLs enabled, no tls, serviceNS=default-ns, authMethodNS=default, partition=default", + tls: false, + consulServiceNamespace: "default-ns", + authMethodNamespace: "default", + acls: true, + adminPartition: "default", + }, + { + name: "ACLs enabled, tls, serviceNS=default-ns, authMethodNS=default, partition=default", + tls: true, consulServiceNamespace: "default-ns", + authMethodNamespace: "default", + acls: true, + adminPartition: "default", }, { - name: "serviceNS=other", + name: "ACLs enabled, no tls, serviceNS=other, authMethodNS=other, partition=default", + tls: false, consulServiceNamespace: "other", + authMethodNamespace: "other", + acls: true, + adminPartition: "default", + }, + { + name: "ACLs enabled, tls, serviceNS=other, authMethodNS=other, partition=default", + tls: true, + consulServiceNamespace: "other", + authMethodNamespace: "other", + acls: true, + adminPartition: "default", + }, + { + name: "ACLs disabled, no tls, serviceNS=default, authMethodNS=default, partition=default", + tls: false, + consulServiceNamespace: "default", + authMethodNamespace: "default", + adminPartition: "default", + }, + { + name: "ACLs disabled, tls, serviceNS=default, authMethodNS=default, partition=default", + tls: true, + consulServiceNamespace: "default", + authMethodNamespace: "default", + adminPartition: "default", + }, + { + name: "ACLs disabled, no tls, serviceNS=default-ns, authMethodNS=default, partition=default", + tls: false, + consulServiceNamespace: "default-ns", + authMethodNamespace: "default", + adminPartition: "default", + }, + { + name: "ACLs disabled, tls, serviceNS=default-ns, authMethodNS=default, partition=default", + tls: true, + consulServiceNamespace: "default-ns", + authMethodNamespace: "default", + adminPartition: "default", + }, + { + name: "ACLs disabled, no tls, serviceNS=other, authMethodNS=other, partition=default", + tls: false, + consulServiceNamespace: "other", + authMethodNamespace: "other", + adminPartition: "default", + }, + { + name: "ACLs disabled, tls, serviceNS=other, authMethodNS=other, partition=default", + tls: true, + consulServiceNamespace: "other", + authMethodNamespace: "other", + adminPartition: "default", }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { + bearerFile := common.WriteTempFile(t, test.ServiceAccountJWTToken) tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) proxyFile := fmt.Sprintf("/tmp/%d2", rand.Int()) t.Cleanup(func() { - _ = os.RemoveAll(proxyFile) - _ = os.RemoveAll(tokenFile) + os.Remove(proxyFile) + os.Remove(tokenFile) }) + var caFile, certFile, keyFile string // Start Consul server with ACLs enabled and default deny policy. - var serverCfg *testutil.TestServerConfig + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" server, err := testutil.NewTestServerConfigT(t, func(cfg *testutil.TestServerConfig) { - serverCfg = cfg + if c.acls { + cfg.ACL.Enabled = true + cfg.ACL.DefaultPolicy = "deny" + cfg.ACL.Tokens.InitialManagement = masterToken + } + if c.tls { + caFile, certFile, keyFile = test.GenerateServerCerts(t) + cfg.CAFile = caFile + cfg.CertFile = certFile + cfg.KeyFile = keyFile + } }) require.NoError(t, err) defer server.Stop() server.WaitForLeader(t) cfg := &api.Config{ + Scheme: "http", Address: server.HTTPAddr, Namespace: c.consulServiceNamespace, + Partition: c.adminPartition, + } + if c.acls { + cfg.Token = masterToken + } + if c.tls { + cfg.Address = server.HTTPSAddr + cfg.Scheme = "https" + cfg.TLSConfig = api.TLSConfig{ + CAFile: caFile, + } } consulClient, err := api.NewClient(cfg) @@ -63,16 +170,14 @@ func TestRun_WithNamespaces(t *testing.T) { _, err = namespaces.EnsureExists(consulClient, c.consulServiceNamespace, "") require.NoError(t, err) + if c.acls { + test.SetupK8sAuthMethodWithNamespaces(t, consulClient, testServiceAccountName, "default-ns", c.authMethodNamespace, c.authMethodNamespace != c.consulServiceNamespace, "") + } + // Register Consul services. - testConsulServices := []api.AgentService{consulCountingSvc, consulCountingSvcSidecar} + testConsulServices := []api.AgentServiceRegistration{consulCountingSvc, consulCountingSvcSidecar} for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: nodeName, - Address: "127.0.0.1", - Service: &svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) } ui := cli.NewMockUi() @@ -80,22 +185,43 @@ func TestRun_WithNamespaces(t *testing.T) { UI: ui, serviceRegistrationPollingAttempts: 5, } - // We build the consul-addr because normally it's defined by the init container setting + // We build the http-addr because normally it's defined by the init container setting // CONSUL_HTTP_ADDR when it processes the command template. flags := []string{"-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), - "-namespace", c.consulServiceNamespace, + "-service-account-name", testServiceAccountName, + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-consul-service-namespace", c.consulServiceNamespace, + "-acl-token-sink", tokenFile, + "-bearer-token-file", bearerFile, "-proxy-id-file", proxyFile, - "-consul-node-name", nodeName, + "-consul-api-timeout", "5s", + } + if c.acls { + flags = append(flags, "-acl-auth-method", test.AuthMethod, "-auth-method-namespace", c.authMethodNamespace) + } + // Add the CA File if necessary since we're not setting CONSUL_CACERT in test ENV. + if c.tls { + flags = append(flags, "-ca-file", caFile) } - // Run the command. code := cmd.Run(flags) require.Equal(t, 0, code, ui.ErrorWriter.String()) + if c.acls { + // Validate the ACL token was written. + tokenData, err := os.ReadFile(tokenFile) + require.NoError(t, err) + require.NotEmpty(t, tokenData) + + // Check that the token has the metadata with pod name and pod namespace. + consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData), Namespace: c.consulServiceNamespace}) + require.NoError(t, err) + token, _, err := consulClient.ACL().TokenReadSelf(&api.QueryOptions{Namespace: c.authMethodNamespace}) + require.NoError(t, err) + require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) + } + // Validate contents of proxyFile. data, err := os.ReadFile(proxyFile) require.NoError(t, err) diff --git a/control-plane/subcommand/connect-init/command_test.go b/control-plane/subcommand/connect-init/command_test.go index 14bdc5280c..58514d98d9 100644 --- a/control-plane/subcommand/connect-init/command_test.go +++ b/control-plane/subcommand/connect-init/command_test.go @@ -1,27 +1,24 @@ package connectinit import ( - "encoding/json" "fmt" "math/rand" + "net/http" + "net/http/httptest" + "net/url" "os" "strconv" - "strings" - "sync" "testing" "time" "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/iptables" "github.com/hashicorp/consul/sdk/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) -const nodeName = "test-node" - func TestRun_FlagValidation(t *testing.T) { t.Parallel() cases := []struct { @@ -40,24 +37,25 @@ func TestRun_FlagValidation(t *testing.T) { flags: []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-auth-method-name", test.AuthMethod}, + "-acl-auth-method", test.AuthMethod}, expErr: "-service-account-name must be set when ACLs are enabled", }, { flags: []string{ "-pod-name", testPodName, - "-pod-namespace", testPodNamespace}, - expErr: "-consul-node-name must be set", + "-pod-namespace", testPodNamespace, + "-acl-auth-method", test.AuthMethod, + "-service-account-name", "foo"}, + expErr: "-consul-api-timeout must be set to a value greater than 0", }, { flags: []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-auth-method-name", test.AuthMethod, + "-acl-auth-method", test.AuthMethod, "-service-account-name", "foo", - "-log-level", "invalid", - "-consul-node-name", "bar", - }, + "-consul-api-timeout", "5s", + "-log-level", "invalid"}, expErr: "unknown log level: invalid", }, } @@ -74,15 +72,14 @@ func TestRun_FlagValidation(t *testing.T) { } } -// TestRun_ConnectServices tests that the command can log in to Consul (if ACLs are enabled) using a kubernetes -// auth method and using the obtained token find the services for the provided pod name -// and namespace provided and write the proxy ID of the proxy service to a file. -func TestRun_ConnectServices(t *testing.T) { +// TestRun_ServicePollingWithACLsAndTLS bootstraps and starts a consul server using a mock +// kubernetes server to provide responses for setting up the consul AuthMethod +// then validates that the command runs end to end successfully. Also tests with TLS on/off. +func TestRun_ServicePollingWithACLsAndTLS(t *testing.T) { t.Parallel() - cases := []struct { name string - aclsEnabled bool + tls bool serviceAccountName string serviceName string includeServiceAccountName bool @@ -91,25 +88,44 @@ func TestRun_ConnectServices(t *testing.T) { multiport bool }{ { - name: "service-name not provided", + name: "ACLs enabled, no tls", + tls: false, serviceAccountName: "counting", }, { - name: "multi-port service", + name: "ACLs enabled, tls", + tls: true, + serviceAccountName: "counting", + }, + { + name: "ACLs enabled, K8s service name matches service account name", + tls: false, + serviceAccountName: "counting", + serviceName: "", + }, + { + name: "ACLs enabled, service name annotation matches service account name", + tls: false, + serviceAccountName: "web", + serviceName: "web", + }, + { + name: "ACLs enabled, multiport service", + tls: false, serviceAccountName: "counting-admin", serviceName: "counting-admin", multiport: true, }, { - name: "acls enabled; service name annotation doesn't match service account name", - aclsEnabled: true, + name: "ACLs enabled, service name annotation doesn't match service account name", + tls: false, serviceAccountName: "not-a-match", serviceName: "web", expFail: true, }, { - name: "acls enabled; K8s service name doesn't match service account name", - aclsEnabled: true, + name: "ACLs enabled, K8s service name doesn't match service account name", + tls: false, serviceAccountName: "not-a-match", serviceName: "", expFail: true, @@ -117,17 +133,27 @@ func TestRun_ConnectServices(t *testing.T) { } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { + bearerFile := common.WriteTempFile(t, test.ServiceAccountJWTToken) tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) proxyFile := fmt.Sprintf("/tmp/%d2", rand.Int()) t.Cleanup(func() { - _ = os.RemoveAll(proxyFile) - _ = os.RemoveAll(tokenFile) + os.Remove(proxyFile) + os.Remove(tokenFile) }) + var caFile, certFile, keyFile string // Start Consul server with ACLs enabled and default deny policy. - var serverCfg *testutil.TestServerConfig + masterToken := "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586" server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c + c.ACL.Enabled = true + c.ACL.DefaultPolicy = "deny" + c.ACL.Tokens.InitialManagement = masterToken + if tt.tls { + caFile, certFile, keyFile = test.GenerateServerCerts(t) + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile + } }) require.NoError(t, err) defer server.Stop() @@ -135,23 +161,27 @@ func TestRun_ConnectServices(t *testing.T) { cfg := &api.Config{ Scheme: "http", Address: server.HTTPAddr, + Token: masterToken, + } + if tt.tls { + cfg.Address = server.HTTPSAddr + cfg.Scheme = "https" + cfg.TLSConfig = api.TLSConfig{ + CAFile: caFile, + } } consulClient, err := api.NewClient(cfg) require.NoError(t, err) + test.SetupK8sAuthMethod(t, consulClient, testServiceAccountName, "default") + // Register Consul services. - testConsulServices := []api.AgentService{consulCountingSvc, consulCountingSvcSidecar} + testConsulServices := []api.AgentServiceRegistration{consulCountingSvc, consulCountingSvcSidecar} if tt.multiport { testConsulServices = append(testConsulServices, consulCountingSvcMultiport, consulCountingSvcSidecarMultiport) } for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: nodeName, - Address: "127.0.0.1", - Service: &svc, - } - _, err := consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) } ui := cli.NewMockUi() @@ -160,24 +190,24 @@ func TestRun_ConnectServices(t *testing.T) { serviceRegistrationPollingAttempts: 3, } - // We build the consul-addr because normally it's defined by the init container setting + // We build the http-addr because normally it's defined by the init container setting // CONSUL_HTTP_ADDR when it processes the command template. flags := []string{"-pod-name", testPodName, "-pod-namespace", testPodNamespace, + "-acl-auth-method", test.AuthMethod, + "-service-account-name", tt.serviceAccountName, "-service-name", tt.serviceName, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-bearer-token-file", bearerFile, + "-acl-token-sink", tokenFile, "-proxy-id-file", proxyFile, "-multiport=" + strconv.FormatBool(tt.multiport), - "-consul-node-name", nodeName, + "-consul-api-timeout=5s", } - if tt.aclsEnabled { - flags = append(flags, "-auth-method-name", test.AuthMethod, - "-service-account-name", tt.serviceAccountName, - "-acl-token-sink", tokenFile) + // Add the CA File if necessary since we're not setting CONSUL_CACERT in tt ENV. + if tt.tls { + flags = append(flags, "-ca-file", caFile) } - // Run the command. code := cmd.Run(flags) if tt.expFail { @@ -186,19 +216,17 @@ func TestRun_ConnectServices(t *testing.T) { } require.Equal(t, 0, code, ui.ErrorWriter.String()) - if tt.aclsEnabled { - // Validate the ACL token was written. - tokenData, err := os.ReadFile(tokenFile) - require.NoError(t, err) - require.NotEmpty(t, tokenData) - - // Check that the token has the metadata with pod name and pod namespace. - consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData)}) - require.NoError(t, err) - token, _, err := consulClient.ACL().TokenReadSelf(nil) - require.NoError(t, err) - require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) - } + // Validate the ACL token was written. + tokenData, err := os.ReadFile(tokenFile) + require.NoError(t, err) + require.NotEmpty(t, tokenData) + + // Check that the token has the metadata with pod name and pod namespace. + consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData)}) + require.NoError(t, err) + token, _, err := consulClient.ACL().TokenReadSelf(nil) + require.NoError(t, err) + require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) // Validate contents of proxyFile. data, err := os.ReadFile(proxyFile) @@ -212,100 +240,73 @@ func TestRun_ConnectServices(t *testing.T) { } } -// TestRun_Gateways tests that the command can log in to Consul (if ACLs are enabled) using a kubernetes -// auth method and using the obtained token find the service for the provided gateway -// and namespace provided and write the proxy ID of the gateway service to a file. -func TestRun_Gateways(t *testing.T) { +// This test validates service polling works in a happy case scenario with and without TLS. +func TestRun_ServicePollingOnly(t *testing.T) { t.Parallel() - cases := []struct { - name string - gatewayKind string - agentService api.AgentService - serviceName string - expFail bool + name string + tls bool + serviceName string + multiport bool }{ { - name: "mesh-gateway", - gatewayKind: "mesh-gateway", - agentService: api.AgentService{ - ID: "mesh-gateway", - Service: "mesh-gateway", - Kind: api.ServiceKindMeshGateway, - Port: 4444, - Address: "127.0.0.1", - Meta: map[string]string{ - "component": "mesh-gateway", - metaKeyPodName: testGatewayName, - metaKeyKubeNS: "default-ns", - }, - }, + name: "ACLs disabled, no tls", + tls: false, }, { - name: "ingress-gateway", - gatewayKind: "ingress-gateway", - agentService: api.AgentService{ - ID: "ingress-gateway", - Service: "ingress-gateway", - Kind: api.ServiceKindMeshGateway, - Port: 4444, - Address: "127.0.0.1", - Meta: map[string]string{ - "component": "ingress-gateway", - metaKeyPodName: testGatewayName, - metaKeyKubeNS: "default-ns", - }, - }, + name: "ACLs disabled, tls", + tls: true, }, { - name: "terminating-gateway", - gatewayKind: "terminating-gateway", - agentService: api.AgentService{ - ID: "terminating-gateway", - Service: "terminating-gateway", - Kind: api.ServiceKindMeshGateway, - Port: 4444, - Address: "127.0.0.1", - Meta: map[string]string{ - "component": "terminating-gateway", - metaKeyPodName: testGatewayName, - metaKeyKubeNS: "default-ns", - }, - }, + name: "Multiport, ACLs disabled, no tls", + tls: false, + serviceName: "counting-admin", + multiport: true, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - proxyFile := fmt.Sprintf("/tmp/%d2", rand.Int()) + proxyFile := fmt.Sprintf("/tmp/%d", rand.Int()) t.Cleanup(func() { - _ = os.RemoveAll(proxyFile) + os.Remove(proxyFile) }) - // Start Consul server with ACLs enabled and default deny policy. - var serverCfg *testutil.TestServerConfig + var caFile, certFile, keyFile string + // Start Consul server with TLS enabled if required. server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c + if tt.tls { + caFile, certFile, keyFile = test.GenerateServerCerts(t) + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile + } }) require.NoError(t, err) defer server.Stop() server.WaitForLeader(t) + + // Get the Consul Client. cfg := &api.Config{ Scheme: "http", Address: server.HTTPAddr, } + if tt.tls { + cfg.Address = server.HTTPSAddr + cfg.Scheme = "https" + cfg.TLSConfig = api.TLSConfig{ + CAFile: caFile, + } + } consulClient, err := api.NewClient(cfg) require.NoError(t, err) // Register Consul services. - testConsulServices := []api.AgentService{tt.agentService} + testConsulServices := []api.AgentServiceRegistration{consulCountingSvc, consulCountingSvcSidecar} + if tt.multiport { + testConsulServices = append(testConsulServices, consulCountingSvcMultiport, consulCountingSvcSidecarMultiport) + } for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: nodeName, - Address: "127.0.0.1", - Service: &svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) } ui := cli.NewMockUi() @@ -313,38 +314,46 @@ func TestRun_Gateways(t *testing.T) { UI: ui, serviceRegistrationPollingAttempts: 3, } - // We build the http-addr because normally it's defined by the init container setting // CONSUL_HTTP_ADDR when it processes the command template. - flags := []string{"-pod-name", testGatewayName, + flags := []string{ + "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-gateway-kind", tt.gatewayKind, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-proxy-id-file", proxyFile, - "-consul-node-name", nodeName, + "-multiport=" + strconv.FormatBool(tt.multiport), + "-http-addr", fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address), + "-consul-api-timeout", "5s"} + + // In a multiport case, the service name will be passed in to the test. + if tt.serviceName != "" { + flags = append(flags, "-service-name", tt.serviceName) + } + + // Add the CA File if necessary since we're not setting CONSUL_CACERT in tt ENV. + if tt.tls { + flags = append(flags, "-ca-file", caFile) } // Run the command. code := cmd.Run(flags) - if tt.expFail { - require.Equal(t, 1, code) - return - } require.Equal(t, 0, code, ui.ErrorWriter.String()) // Validate contents of proxyFile. data, err := os.ReadFile(proxyFile) require.NoError(t, err) - require.Contains(t, string(data), tt.gatewayKind) + if tt.multiport { + require.Contains(t, string(data), "counting-admin-sidecar-proxy-id") + } else { + require.Contains(t, string(data), "counting-counting-sidecar-proxy") + } }) } + } -// TestRun_ConnectServices_Errors tests that when registered services could not be found, +// TestRun_ServicePollingErrors tests that when registered services could not be found, // we error out. -func TestRun_ConnectServices_Errors(t *testing.T) { +func TestRun_ServicePollingErrors(t *testing.T) { t.Parallel() cases := []struct { @@ -489,103 +498,7 @@ func TestRun_ConnectServices_Errors(t *testing.T) { t.Run(c.name, func(t *testing.T) { proxyFile := fmt.Sprintf("/tmp/%d", rand.Int()) t.Cleanup(func() { - os.RemoveAll(proxyFile) - }) - - // Start Consul server. - var serverCfg *testutil.TestServerConfig - server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) - require.NoError(t, err) - defer server.Stop() - server.WaitForLeader(t) - consulClient, err := api.NewClient(&api.Config{Address: server.HTTPAddr}) - require.NoError(t, err) - - // Register Consul services. - for _, svc := range c.services { - require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) - } - - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - serviceRegistrationPollingAttempts: 1, - } - flags := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), - "-pod-name", testPodName, - "-pod-namespace", testPodNamespace, - "-proxy-id-file", proxyFile, - "-consul-node-name", nodeName, - } - - code := cmd.Run(flags) - require.Equal(t, 1, code) - }) - } -} - -// TestRun_Gateways_Errors tests that when registered services could not be found, -// we error out. -func TestRun_Gateways_Errors(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - services []api.AgentServiceRegistration - }{ - { - name: "gateway without pod-name or k8s-namespace meta", - services: []api.AgentServiceRegistration{ - { - ID: "mesh-gateway", - Name: "mesh-gateway", - Kind: "mesh-gateway", - Port: 9999, - Address: "127.0.0.1", - }, - }, - }, - { - name: "gateway with pod-name meta but without k8s-namespace meta", - services: []api.AgentServiceRegistration{ - { - ID: "mesh-gateway", - Name: "mesh-gateway", - Kind: "mesh-gateway", - Port: 9999, - Address: "127.0.0.1", - Meta: map[string]string{ - metaKeyPodName: "mesh-gateway", - }, - }, - }, - }, - { - name: "service and proxy with k8s-namespace meta but pod-name meta", - services: []api.AgentServiceRegistration{ - { - ID: "mesh-gateway", - Name: "mesh-gateway", - Kind: "mesh-gateway", - Port: 9999, - Address: "127.0.0.1", - Meta: map[string]string{ - metaKeyKubeNS: "default-ns", - }, - }, - }}, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - proxyFile := fmt.Sprintf("/tmp/%d", rand.Int()) - t.Cleanup(func() { - os.RemoveAll(proxyFile) + os.Remove(proxyFile) }) // Start Consul server. @@ -608,12 +521,10 @@ func TestRun_Gateways_Errors(t *testing.T) { } flags := []string{ "-http-addr", server.HTTPAddr, - "-gateway-kind", "mesh-gateway", "-pod-name", testPodName, "-pod-namespace", testPodNamespace, "-proxy-id-file", proxyFile, "-consul-api-timeout", "5s", - "-consul-node-name", nodeName, } code := cmd.Run(flags) @@ -629,10 +540,7 @@ func TestRun_RetryServicePolling(t *testing.T) { proxyFile := common.WriteTempFile(t, "") // Start Consul server. - var serverCfg *testutil.TestServerConfig - server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) + server, err := testutil.NewTestServerConfigT(t, nil) require.NoError(t, err) defer server.Stop() server.WaitForLeader(t) @@ -641,24 +549,14 @@ func TestRun_RetryServicePolling(t *testing.T) { // Start the consul service registration in a go func and delay it so that it runs // after the cmd.Run() starts. - var wg sync.WaitGroup - wg.Add(1) go func() { - defer wg.Done() // Wait a moment, this ensures that we are already in the retry logic. time.Sleep(time.Second * 2) // Register counting service. - serviceRegistration := &api.CatalogRegistration{ - Node: nodeName, - Address: "127.0.0.1", - Service: &consulCountingSvc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&consulCountingSvc)) + time.Sleep(time.Second * 2) // Register proxy sidecar service. - serviceRegistration.Service = &consulCountingSvcSidecar - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&consulCountingSvcSidecar)) }() ui := cli.NewMockUi() @@ -669,14 +567,11 @@ func TestRun_RetryServicePolling(t *testing.T) { flags := []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-http-addr", server.HTTPAddr, "-proxy-id-file", proxyFile, - "-consul-node-name", nodeName, + "-consul-api-timeout", "5s", } code := cmd.Run(flags) - wg.Wait() require.Equal(t, 0, code) // Validate contents of proxyFile. @@ -693,10 +588,7 @@ func TestRun_InvalidProxyFile(t *testing.T) { randFileName := fmt.Sprintf("/foo/%d/%d", rand.Int(), rand.Int()) // Start Consul server. - var serverCfg *testutil.TestServerConfig - server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) + server, err := testutil.NewTestServerConfigT(t, nil) require.NoError(t, err) defer server.Stop() server.WaitForLeader(t) @@ -704,15 +596,9 @@ func TestRun_InvalidProxyFile(t *testing.T) { require.NoError(t, err) // Register Consul services. - testConsulServices := []api.AgentService{consulCountingSvc, consulCountingSvcSidecar} + testConsulServices := []api.AgentServiceRegistration{consulCountingSvc, consulCountingSvcSidecar} for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: nodeName, - Address: "127.0.0.1", - Service: &svc, - } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) + require.NoError(t, consulClient.Agent().ServiceRegister(&svc)) } ui := cli.NewMockUi() cmd := Command{ @@ -722,9 +608,7 @@ func TestRun_InvalidProxyFile(t *testing.T) { flags := []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-http-addr", server.HTTPAddr, "-proxy-id-file", randFileName, "-consul-api-timeout", "5s", } @@ -734,155 +618,234 @@ func TestRun_InvalidProxyFile(t *testing.T) { require.Error(t, err) } -func TestRun_TrafficRedirection(t *testing.T) { - cases := map[string]struct { - proxyConfig map[string]interface{} - tproxyConfig api.TransparentProxyConfig - registerProxyDefaults bool - expIptablesParamsFunc func(actual iptables.Config) (bool, string) +// TestRun_FailsWithBadServerResponses tests error handling with invalid server responses. +func TestRun_FailsWithBadServerResponses(t *testing.T) { + t.Parallel() + const servicesGetRetries int = 2 + cases := []struct { + name string + loginResponse string + getServicesListResponse string + expectedServiceGets int }{ - "no extra proxy config provided": {}, - "envoy bind port is provided in service proxy config": { - proxyConfig: map[string]interface{}{"bind_port": "21000"}, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if actual.ProxyInboundPort == 21000 { - return true, "" - } else { - return false, fmt.Sprintf("ProxyInboundPort in iptables.Config was %d, but should be 21000", actual.ProxyInboundPort) - } - }, + { + name: "acls enabled, acl login response invalid", + loginResponse: "", + expectedServiceGets: 0, }, - // This test is to make sure that we use merge-central-config parameter when we query the service - // so that we get all config merged into the proxy configuration on the service. - "envoy bind port is provided in a config entry": { - proxyConfig: map[string]interface{}{"bind_port": "21000"}, - registerProxyDefaults: true, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if actual.ProxyInboundPort == 21000 { - return true, "" - } else { - return false, fmt.Sprintf("ProxyInboundPort in iptables.Config was %d, but should be 21000", actual.ProxyInboundPort) - } - }, + { + name: "acls enabled, get service response invalid", + loginResponse: testLoginResponse, + getServicesListResponse: "", + expectedServiceGets: servicesGetRetries + 1, // Plus 1 because we RETRY after an initial attempt. }, - "tproxy outbound listener port is provided in service proxy config": { - tproxyConfig: api.TransparentProxyConfig{OutboundListenerPort: 16000}, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if actual.ProxyOutboundPort == 16000 { - return true, "" - } else { - return false, fmt.Sprintf("ProxyOutboundPort in iptables.Config was %d, but should be 16000", actual.ProxyOutboundPort) + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + bearerFile := common.WriteTempFile(t, "bearerTokenFile") + tokenFile := common.WriteTempFile(t, "") + + servicesGetCounter := 0 + // Start the mock Consul server. + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ACL login request. + if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { + w.Write([]byte(c.loginResponse)) } - }, - }, - "tproxy outbound listener port is provided in a config entry": { - tproxyConfig: api.TransparentProxyConfig{OutboundListenerPort: 16000}, - registerProxyDefaults: true, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if actual.ProxyOutboundPort == 16000 { - return true, "" - } else { - return false, fmt.Sprintf("ProxyOutboundPort in iptables.Config was %d, but should be 16000", actual.ProxyOutboundPort) + // Token read request. + if r != nil && r.URL.Path == "/v1/acl/token/self" && r.Method == "GET" { + w.Write([]byte(testTokenReadSelfResponse)) } - }, - }, - "envoy stats addr is provided in service proxy config": { - proxyConfig: map[string]interface{}{"envoy_stats_bind_addr": "0.0.0.0:9090"}, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if len(actual.ExcludeInboundPorts) == 1 && actual.ExcludeInboundPorts[0] == "9090" { - return true, "" - } else { - return false, fmt.Sprintf("ExcludeInboundPorts in iptables.Config was %v, but should be [9090]", actual.ExcludeInboundPorts) + // Agent Services get. + if r != nil && r.URL.Path == "/v1/agent/services" && r.Method == "GET" { + servicesGetCounter++ + w.Write([]byte(c.getServicesListResponse)) } - }, + })) + defer consulServer.Close() + + // Set up the Command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + flagBearerTokenFile: bearerFile, + flagACLTokenSink: tokenFile, + serviceRegistrationPollingAttempts: uint64(servicesGetRetries), + } + + serverURL, err := url.Parse(consulServer.URL) + require.NoError(t, err) + flags := []string{ + "-pod-name", testPodName, "-pod-namespace", testPodNamespace, + "-acl-auth-method", test.AuthMethod, + "-service-account-name", testServiceAccountName, + "-bearer-token-file", bearerFile, + "-acl-token-sink", tokenFile, + "-http-addr", serverURL.String(), + "-consul-api-timeout", "5s", + } + code := cmd.Run(flags) + require.Equal(t, 1, code) + // We use the counter to ensure we failed at ACL Login (when counter = 0) or proceeded to the service get portion of the command. + require.Equal(t, c.expectedServiceGets, servicesGetCounter) + }) + } +} + +// Tests ACL Login with Retries. +func TestRun_LoginWithRetries(t *testing.T) { + t.Parallel() + cases := []struct { + Description string + TestRetry bool + LoginAttemptsCount int + ExpCode int + }{ + { + Description: "Login succeeds without retries", + TestRetry: false, + LoginAttemptsCount: 1, // 1 because we dont actually retry. + ExpCode: 0, }, - "envoy stats addr is provided in a config entry": { - proxyConfig: map[string]interface{}{"envoy_stats_bind_addr": "0.0.0.0:9090"}, - registerProxyDefaults: true, - expIptablesParamsFunc: func(actual iptables.Config) (bool, string) { - if len(actual.ExcludeInboundPorts) == 1 && actual.ExcludeInboundPorts[0] == "9090" { - return true, "" - } else { - return false, fmt.Sprintf("ExcludeInboundPorts in iptables.Config was %v, but should be [9090]", actual.ExcludeInboundPorts) - } - }, + { + Description: "Login succeeds after 1 retry", + TestRetry: true, + LoginAttemptsCount: 2, + ExpCode: 0, }, } + for _, c := range cases { + t.Run(c.Description, func(t *testing.T) { + // Create a fake input bearer token file and an output file. + bearerFile := common.WriteTempFile(t, "bearerTokenFile") + tokenFile := common.WriteTempFile(t, "") + proxyFile := common.WriteTempFile(t, "") - for name, c := range cases { - t.Run(name, func(t *testing.T) { - proxyFile := fmt.Sprintf("/tmp/%d", rand.Int()) - t.Cleanup(func() { - _ = os.RemoveAll(proxyFile) - }) + // Start the mock Consul server. + counter := 0 + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ACL Login. + if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { + counter++ + if !c.TestRetry || (c.TestRetry && c.LoginAttemptsCount == counter) { + w.Write([]byte(testLoginResponse)) + } + } + // Token read request. + if r != nil && r.URL.Path == "/v1/acl/token/self" && r.Method == "GET" { + w.Write([]byte(testTokenReadSelfResponse)) + } + // Agent Services get. + if r != nil && r.URL.Path == "/v1/agent/services" && r.Method == "GET" { + w.Write([]byte(testServiceListResponse)) + } + })) + defer consulServer.Close() - // Start Consul server. - var serverCfg *testutil.TestServerConfig - server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { - serverCfg = c - }) + serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - t.Cleanup(func() { - _ = server.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + code := cmd.Run([]string{ + "-pod-name", testPodName, + "-pod-namespace", testPodNamespace, + "-acl-auth-method", test.AuthMethod, + "-service-account-name", testServiceAccountName, + "-acl-token-sink", tokenFile, + "-bearer-token-file", bearerFile, + "-proxy-id-file", proxyFile, + "-http-addr", serverURL.String(), + "-consul-api-timeout", "5s", }) - server.WaitForLeader(t) - consulClient, err := api.NewClient(&api.Config{Address: server.HTTPAddr}) + fmt.Println(ui.ErrorWriter.String()) + require.Equal(t, c.ExpCode, code) + // Cmd will return 1 after numACLLoginRetries, so bound LoginAttemptsCount if we exceeded it. + require.Equal(t, c.LoginAttemptsCount, counter) + // Validate that the token was written to disk if we succeeded. + tokenData, err := os.ReadFile(tokenFile) require.NoError(t, err) + require.Equal(t, "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586", string(tokenData)) + // Validate contents of proxyFile. + proxydata, err := os.ReadFile(proxyFile) + require.NoError(t, err) + require.Equal(t, "counting-counting-sidecar-proxy", string(proxydata)) + }) + } +} - // Add additional proxy configuration either to a config entry or to the service itself. - if c.registerProxyDefaults { - _, _, err = consulClient.ConfigEntries().Set(&api.ProxyConfigEntry{ - Name: api.ProxyConfigGlobal, - Kind: api.ProxyDefaults, - TransparentProxy: &c.tproxyConfig, - Config: c.proxyConfig, - }, nil) - require.NoError(t, err) - } else { - consulCountingSvcSidecar.Proxy.TransparentProxy = &c.tproxyConfig - consulCountingSvcSidecar.Proxy.Config = c.proxyConfig - } - // Register Consul services. - testConsulServices := []api.AgentService{consulCountingSvc, consulCountingSvcSidecar} - for _, svc := range testConsulServices { - serviceRegistration := &api.CatalogRegistration{ - Node: nodeName, - Address: "127.0.0.1", - Service: &svc, +// Test that we check token exists when reading it in the stale consistency mode. +func TestRun_EnsureTokenExists(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + neverSucceed bool + }{ + "succeed after first retry": {neverSucceed: false}, + "never succeed": {neverSucceed: true}, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + // Create a fake input bearer token file and an output file. + bearerFile := common.WriteTempFile(t, "bearerTokenFile") + tokenFile := common.WriteTempFile(t, "") + proxyFile := common.WriteTempFile(t, "") + + // Start the mock Consul server. + counter := 0 + consulServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ACL Login. + if r != nil && r.URL.Path == "/v1/acl/login" && r.Method == "POST" { + w.Write([]byte(testLoginResponse)) } - _, err = consulClient.Catalog().Register(serviceRegistration, nil) - require.NoError(t, err) - } - ui := cli.NewMockUi() + // Token read request. + if r != nil && + r.URL.Path == "/v1/acl/token/self" && + r.Method == "GET" && + r.URL.Query().Has("stale") { - iptablesProvider := &fakeIptablesProvider{} - iptablesCfg := iptables.Config{ - ProxyUserID: "5995", - ProxyInboundPort: 20000, - } + // Fail the first request but succeed on the next. + if counter == 0 || c.neverSucceed { + counter++ + w.WriteHeader(http.StatusForbidden) + w.Write([]byte("ACL not found")) + } else { + w.Write([]byte(testTokenReadSelfResponse)) + } + } + // Agent Services get. + if r != nil && r.URL.Path == "/v1/agent/services" && r.Method == "GET" { + w.Write([]byte(testServiceListResponse)) + } + })) + defer consulServer.Close() + + serverURL, err := url.Parse(consulServer.URL) + require.NoError(t, err) + + ui := cli.NewMockUi() cmd := Command{ - UI: ui, - serviceRegistrationPollingAttempts: 3, - iptablesProvider: iptablesProvider, + UI: ui, } - iptablesCfgJSON, err := json.Marshal(iptablesCfg) - require.NoError(t, err) - flags := []string{ + code := cmd.Run([]string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-consul-node-name", nodeName, - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), - "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-acl-auth-method", test.AuthMethod, + "-service-account-name", testServiceAccountName, + "-acl-token-sink", tokenFile, + "-bearer-token-file", bearerFile, "-proxy-id-file", proxyFile, - "-redirect-traffic-config", string(iptablesCfgJSON), - } - code := cmd.Run(flags) - require.Equal(t, 0, code, ui.ErrorWriter.String()) - require.Truef(t, iptablesProvider.applyCalled, "redirect traffic rules were not applied") - if c.expIptablesParamsFunc != nil { - actualIptablesConfigParamsEqualExpected, errMsg := c.expIptablesParamsFunc(cmd.iptablesConfig) - require.Truef(t, actualIptablesConfigParamsEqualExpected, errMsg) + "-http-addr", serverURL.String(), + "-consul-api-timeout", "5s", + }) + if c.neverSucceed { + require.Equal(t, 1, code) + } else { + require.Equal(t, 0, code) + require.Equal(t, 1, counter) } }) } @@ -894,13 +857,129 @@ const ( metaKeyKubeServiceName = "k8s-service-name" testPodNamespace = "default-ns" testPodName = "counting-pod" - testGatewayName = "gateway-pod" + testServiceAccountName = "counting" + + // Sample response from https://consul.io/api-docs/acl#sample-response. + testLoginResponse = `{ + "AccessorID": "926e2bd2-b344-d91b-0c83-ae89f372cd9b", + "SecretID": "b78d37c7-0ca7-5f4d-99ee-6d9975ce4586", + "Description": "token created via login", + "Roles": [ + { + "ID": "3356c67c-5535-403a-ad79-c1d5f9df8fc7", + "Name": "demo" + } + ], + "ServiceIdentities": [ + { + "ServiceName": "example" + } + ], + "Local": true, + "AuthMethod": "minikube", + "CreateTime": "2019-04-29T10:08:08.404370762-05:00", + "Hash": "nLimyD+7l6miiHEBmN/tvCelAmE/SbIXxcnTzG3pbGY=", + "CreateIndex": 36, + "ModifyIndex": 36 +}` + + // Sample response from https://www.consul.io/api-docs/acl/tokens#read-self-token. + testTokenReadSelfResponse = ` +{ + "AccessorID": "6a1253d2-1785-24fd-91c2-f8e78c745511", + "SecretID": "45a3bd52-07c7-47a4-52fd-0745e0cfe967", + "Description": "Agent token for 'node1'", + "Policies": [ + { + "ID": "165d4317-e379-f732-ce70-86278c4558f7", + "Name": "node1-write" + }, + { + "ID": "e359bd81-baca-903e-7e64-1ccd9fdc78f5", + "Name": "node-read" + } + ], + "Local": false, + "CreateTime": "2018-10-24T12:25:06.921933-04:00", + "Hash": "UuiRkOQPRCvoRZHRtUxxbrmwZ5crYrOdZ0Z1FTFbTbA=", + "CreateIndex": 59, + "ModifyIndex": 59 +} +` + + testServiceListResponse = `{ + "counting-counting": { + "ID": "counting-counting", + "Service": "counting", + "Tags": [], + "Meta": { + "k8s-namespace": "default", + "pod-name": "counting-pod", + "k8s-service-name": "counting" + }, + "Port": 9001, + "Address": "10.32.3.26", + "TaggedAddresses": { + "lan_ipv4": { + "Address": "10.32.3.26", + "Port": 9001 + }, + "wan_ipv4": { + "Address": "10.32.3.26", + "Port": 9001 + } + }, + "Weights": { + "Passing": 1, + "Warning": 1 + }, + "EnableTagOverride": false, + "Datacenter": "dc1" + }, + "counting-counting-sidecar-proxy": { + "Kind": "connect-proxy", + "ID": "counting-counting-sidecar-proxy", + "Service": "counting-sidecar-proxy", + "Tags": [], + "Meta": { + "k8s-namespace": "default", + "pod-name": "counting-pod", + "k8s-service-name": "counting" + }, + "Port": 20000, + "Address": "10.32.3.26", + "TaggedAddresses": { + "lan_ipv4": { + "Address": "10.32.3.26", + "Port": 20000 + }, + "wan_ipv4": { + "Address": "10.32.3.26", + "Port": 20000 + } + }, + "Weights": { + "Passing": 1, + "Warning": 1 + }, + "EnableTagOverride": false, + "Proxy": { + "DestinationServiceName": "counting", + "DestinationServiceID": "counting-counting", + "LocalServiceAddress": "127.0.0.1", + "LocalServicePort": 9001, + "MeshGateway": {}, + "Expose": {} + }, + "Datacenter": "dc1" + } +}` ) var ( - consulCountingSvc = api.AgentService{ + consulCountingSvc = api.AgentServiceRegistration{ ID: "counting-counting", - Service: "counting", + Name: "counting", Address: "127.0.0.1", Meta: map[string]string{ metaKeyPodName: "counting-pod", @@ -908,13 +987,15 @@ var ( metaKeyKubeServiceName: "counting", }, } - consulCountingSvcSidecar = api.AgentService{ - ID: "counting-counting-sidecar-proxy", - Service: "counting-sidecar-proxy", - Kind: "connect-proxy", + consulCountingSvcSidecar = api.AgentServiceRegistration{ + ID: "counting-counting-sidecar-proxy", + Name: "counting-sidecar-proxy", + Kind: "connect-proxy", Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "counting", DestinationServiceID: "counting-counting", + Config: nil, + Upstreams: nil, }, Port: 9999, Address: "127.0.0.1", @@ -924,9 +1005,9 @@ var ( metaKeyKubeServiceName: "counting", }, } - consulCountingSvcMultiport = api.AgentService{ + consulCountingSvcMultiport = api.AgentServiceRegistration{ ID: "counting-admin-id", - Service: "counting-admin", + Name: "counting-admin", Address: "127.0.0.1", Meta: map[string]string{ metaKeyPodName: "counting-pod", @@ -934,13 +1015,15 @@ var ( metaKeyKubeServiceName: "counting-admin", }, } - consulCountingSvcSidecarMultiport = api.AgentService{ - ID: "counting-admin-sidecar-proxy-id", - Service: "counting-admin-sidecar-proxy", - Kind: "connect-proxy", + consulCountingSvcSidecarMultiport = api.AgentServiceRegistration{ + ID: "counting-admin-sidecar-proxy-id", + Name: "counting-admin-sidecar-proxy", + Kind: "connect-proxy", Proxy: &api.AgentServiceConnectProxyConfig{ DestinationServiceName: "counting-admin", DestinationServiceID: "counting-admin-id", + Config: nil, + Upstreams: nil, }, Port: 9999, Address: "127.0.0.1", @@ -951,21 +1034,3 @@ var ( }, } ) - -type fakeIptablesProvider struct { - applyCalled bool - rules []string -} - -func (f *fakeIptablesProvider) AddRule(_ string, args ...string) { - f.rules = append(f.rules, strings.Join(args, " ")) -} - -func (f *fakeIptablesProvider) ApplyRules() error { - f.applyCalled = true - return nil -} - -func (f *fakeIptablesProvider) Rules() []string { - return f.rules -} diff --git a/control-plane/subcommand/consul-logout/command_test.go b/control-plane/subcommand/consul-logout/command_test.go index 22412ea752..877898056d 100644 --- a/control-plane/subcommand/consul-logout/command_test.go +++ b/control-plane/subcommand/consul-logout/command_test.go @@ -63,7 +63,7 @@ func TestRun_InvalidSinkFile(t *testing.T) { func Test_UnableToLogoutDueToInvalidToken(t *testing.T) { tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) t.Cleanup(func() { - os.RemoveAll(tokenFile) + os.Remove(tokenFile) }) var caFile, certFile, keyFile string @@ -118,7 +118,7 @@ func Test_RunUsingLogin(t *testing.T) { // This is the test file that we will write the token to so consul-logout can read it. tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) t.Cleanup(func() { - os.RemoveAll(tokenFile) + os.Remove(tokenFile) }) // Start Consul server with ACLs enabled and default deny policy. diff --git a/control-plane/subcommand/consul-sidecar/command.go b/control-plane/subcommand/consul-sidecar/command.go new file mode 100644 index 0000000000..6ea77f635c --- /dev/null +++ b/control-plane/subcommand/consul-sidecar/command.go @@ -0,0 +1,427 @@ +package consulsidecar + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "os/signal" + "strings" + "sync" + "syscall" + "time" + + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" +) + +const ( + metricsServerShutdownTimeout = 5 * time.Second + envoyMetricsAddr = "http://127.0.0.1:19000/stats/prometheus" + // prometheusServiceMetricsSuccessKey is the key of the prometheus metric used to + // indicate if service metrics were scraped successfully. + prometheusServiceMetricsSuccessKey = "consul_merged_service_metrics_success" +) + +type Command struct { + UI cli.Ui + + http *flags.HTTPFlags + flagEnableServiceRegistration bool + flagServiceConfig string + flagConsulBinary string + flagSyncPeriod time.Duration + flagSet *flag.FlagSet + flagLogLevel string + flagLogJSON bool + + // Flags to configure metrics merging + flagEnableMetricsMerging bool + flagMergedMetricsPort string + flagServiceMetricsPort string + flagServiceMetricsPath string + + envoyMetricsGetter metricsGetter + serviceMetricsGetter metricsGetter + + consulCommand []string + + logger hclog.Logger + once sync.Once + help string + sigCh chan os.Signal +} + +// metricsGetter abstracts the function of retrieving metrics. It is used to +// enable easier unit testing. +type metricsGetter interface { + Get(url string) (resp *http.Response, err error) +} + +func (c *Command) init() { + c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + c.flagSet.BoolVar(&c.flagEnableServiceRegistration, "enable-service-registration", true, "Enables consul sidecar to register the service with consul every sync period. Defaults to true.") + c.flagSet.StringVar(&c.flagServiceConfig, "service-config", "", "Path to the service config file") + c.flagSet.StringVar(&c.flagConsulBinary, "consul-binary", "consul", "Path to a consul binary") + c.flagSet.DurationVar(&c.flagSyncPeriod, "sync-period", 10*time.Second, "Time between syncing the service registration. Defaults to 10s.") + c.flagSet.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\". Defaults to info.") + c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + c.flagSet.BoolVar(&c.flagEnableMetricsMerging, "enable-metrics-merging", false, "Enables consul sidecar to run a merged metrics endpoint. Defaults to false.") + // -merged-metrics-port, -service-metrics-port, and -service-metrics-path + // are only used if metrics merging is enabled. -merged-metrics-port and + // -service-metrics-path have defaults, and -service-metrics-port is + // expected to be set by the connect-inject handler to a valid value. The + // connect-inject handler will only enable metrics merging in the consul + // sidecar if it finds a service metrics port greater than 0. + c.flagSet.StringVar(&c.flagMergedMetricsPort, "merged-metrics-port", "20100", "Port to serve merged Envoy and application metrics. Defaults to 20100.") + c.flagSet.StringVar(&c.flagServiceMetricsPort, "service-metrics-port", "0", "Port where application metrics are being served. Defaults to 0.") + c.flagSet.StringVar(&c.flagServiceMetricsPath, "service-metrics-path", "/metrics", "Path where application metrics are being served. Defaults to /metrics.") + c.help = flags.Usage(help, c.flagSet) + c.http = &flags.HTTPFlags{} + flags.Merge(c.flagSet, c.http.Flags()) + c.help = flags.Usage(help, c.flagSet) + + // Wait on an interrupt or terminate to exit. This channel must be initialized before + // Run() is called so that there are no race conditions where the channel + // is not defined. + if c.sigCh == nil { + c.sigCh = make(chan os.Signal, 1) + signal.Notify(c.sigCh, syscall.SIGINT, syscall.SIGTERM) + } +} + +// Run continually re-registers the service with Consul. +// This is needed because if the Consul Client pod is restarted, it loses all +// its service registrations. +// This command expects to be run as a sidecar and to be injected by the +// mutating webhook. +func (c *Command) Run(args []string) int { + c.once.Do(c.init) + if err := c.flagSet.Parse(args); err != nil { + return 1 + } + + err := c.validateFlags() + if err != nil { + c.UI.Error("Error: " + err.Error()) + return 1 + } + + logger, err := common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + c.logger = logger + + // Log initial configuration + c.logger.Info("Command configuration", "enable-service-registration", c.flagEnableServiceRegistration, + "service-config", c.flagServiceConfig, + "consul-binary", c.flagConsulBinary, + "sync-period", c.flagSyncPeriod, + "log-level", c.flagLogLevel, + "enable-metrics-merging", c.flagEnableMetricsMerging, + "merged-metrics-port", c.flagMergedMetricsPort, + "service-metrics-port", c.flagServiceMetricsPort, + "service-metrics-path", c.flagServiceMetricsPath, + ) + + // signalCtx that we pass in to the main work loop, signal handling is handled in another thread + // due to the length of time it can take for the cmd to complete causing synchronization issues + // on shutdown. Also passing a context in so that it can interrupt the cmd and exit cleanly. + signalCtx, cancelFunc := context.WithCancel(context.Background()) + go func() { + sig := <-c.sigCh + c.logger.Info(fmt.Sprintf("%s received, shutting down", sig)) + cancelFunc() + }() + + // If metrics merging is enabled, run a merged metrics server in a goroutine + // that serves Envoy sidecar metrics and Connect service metrics. The merged + // metrics server will be shut down when a signal is received by the main + // for loop using shutdownMetricsServer(). + var server *http.Server + srvExitCh := make(chan error) + if c.flagEnableMetricsMerging { + c.logger.Info("Metrics is enabled, creating merged metrics server.") + server = c.createMergedMetricsServer() + + // Run the merged metrics server. + c.logger.Info("Running merged metrics server.") + go func() { + if err = server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + srvExitCh <- err + } + }() + } + + // The work loop for re-registering the service. We continually re-register + // our service every syncPeriod. Consul is smart enough to know when the + // service hasn't changed and so won't update any indices. This means we + // won't be causing a lot of traffic within the cluster. We tolerate Consul + // Clients going down and will simply re-register once it's back up. + if c.flagEnableServiceRegistration { + c.consulCommand = []string{"services", "register"} + c.consulCommand = append(c.consulCommand, c.parseConsulFlags()...) + c.consulCommand = append(c.consulCommand, c.flagServiceConfig) + + go func() { + for { + start := time.Now() + cmd := exec.CommandContext(signalCtx, c.flagConsulBinary, c.consulCommand...) + + // Run the command and record the stdout and stderr output. + output, err := cmd.CombinedOutput() + if err != nil { + c.logger.Error("failed to sync service", "output", strings.TrimSpace(string(output)), "err", err, "duration", time.Since(start)) + } else { + c.logger.Info("successfully synced service", "output", strings.TrimSpace(string(output)), "duration", time.Since(start)) + } + select { + // Re-loop after syncPeriod or exit if we receive interrupt or terminate signals. + case <-time.After(c.flagSyncPeriod): + continue + case <-signalCtx.Done(): + return + } + } + }() + } + + // Block and wait for a signal or for the metrics server to exit. + select { + case <-signalCtx.Done(): + // After the signal is received, wait for the merged metrics server + // to gracefully shutdown as well if it has been enabled. This can + // take up to metricsServerShutdownTimeout seconds. + if c.flagEnableMetricsMerging { + c.logger.Info("Attempting to shut down metrics server.") + c.shutdownMetricsServer(server) + } + return 0 + case err := <-srvExitCh: + c.logger.Error(fmt.Sprintf("Metrics server error: %v", err)) + return 1 + } + +} + +// shutdownMetricsServer handles gracefully shutting down the server. This will +// call server.Shutdown(), which will indefinitely wait for connections to turn +// idle. To avoid potentially waiting forever, we pass a context to +// server.Shutdown() that will timeout in metricsServerShutdownTimeout (5) seconds. +func (c *Command) shutdownMetricsServer(server *http.Server) { + // The shutdownCancelFunc will be unused since it is unnecessary to call it as we + // are already about to call shutdown with a timeout. We'd only need to + // shutdownCancelFunc if we needed to trigger something to happen when the + // shutdownCancelFunc is called, which we do not. The reason for not + // discarding it with _ is for the go vet check. + shutdownCtx, shutdownCancelFunc := context.WithTimeout(context.Background(), metricsServerShutdownTimeout) + defer shutdownCancelFunc() + + c.logger.Info("Merged metrics server exists, attempting to gracefully shut down server") + if err := server.Shutdown(shutdownCtx); err != nil { + c.logger.Error(fmt.Sprintf("Server shutdown failed: %s", err)) + return + } + c.logger.Info("Server has been shut down") +} + +// createMergedMetricsServer sets up the merged metrics server. +func (c *Command) createMergedMetricsServer() *http.Server { + mux := http.NewServeMux() + mux.HandleFunc("/stats/prometheus", c.mergedMetricsHandler) + + mergedMetricsServerAddr := fmt.Sprintf("127.0.0.1:%s", c.flagMergedMetricsPort) + server := &http.Server{Addr: mergedMetricsServerAddr, Handler: mux} + + // http.Client satisfies the metricsGetter interface. + // The default http.Client timeout is indefinite, so adding a timeout makes + // sure that requests don't hang. + client := &http.Client{ + Timeout: time.Second * 10, + } + + // During tests these may already be set to mocks. + if c.envoyMetricsGetter == nil { + c.envoyMetricsGetter = client + } + if c.serviceMetricsGetter == nil { + c.serviceMetricsGetter = client + } + + return server +} + +// mergedMetricsHandler has the logic to append both Envoy and service metrics +// together, logging if it's unsuccessful at either. +// If the Envoy scrape fails, we respond with a 500 code which follows the Prometheus +// exporter guidelines. If the service scrape fails, we respond with a 200 so +// that the Envoy metrics are still scraped. +// We also include a metric line in each response indicating the success or +// failure of the service metric scraping. +func (c *Command) mergedMetricsHandler(rw http.ResponseWriter, _ *http.Request) { + envoyMetrics, err := c.envoyMetricsGetter.Get(envoyMetricsAddr) + if err != nil { + c.logger.Error("Error scraping Envoy proxy metrics", "err", err) + http.Error(rw, fmt.Sprintf("Error scraping Envoy proxy metrics: %s", err), http.StatusInternalServerError) + return + } + + // Write Envoy metrics to the response. + defer func() { + err = envoyMetrics.Body.Close() + if err != nil { + c.logger.Error(fmt.Sprintf("Error closing envoy metrics body: %s", err.Error())) + } + }() + envoyMetricsBody, err := io.ReadAll(envoyMetrics.Body) + if err != nil { + c.logger.Error("Could not read Envoy proxy metrics", "err", err) + http.Error(rw, fmt.Sprintf("Could not read Envoy proxy metrics: %s", err), http.StatusInternalServerError) + return + } + if non2xxCode(envoyMetrics.StatusCode) { + c.logger.Error("Received non-2xx status code scraping Envoy proxy metrics", "code", envoyMetrics.StatusCode, "response", string(envoyMetricsBody)) + http.Error(rw, fmt.Sprintf("Received non-2xx status code scraping Envoy proxy metrics: %d: %s", envoyMetrics.StatusCode, string(envoyMetricsBody)), http.StatusInternalServerError) + return + } + writeResponse(rw, envoyMetricsBody, "envoy metrics", c.logger) + + serviceMetricsAddr := fmt.Sprintf("http://127.0.0.1:%s%s", c.flagServiceMetricsPort, c.flagServiceMetricsPath) + serviceMetrics, err := c.serviceMetricsGetter.Get(serviceMetricsAddr) + if err != nil { + c.logger.Warn("Error scraping service metrics", "err", err) + writeResponse(rw, serviceMetricSuccess(false), "service metrics success", c.logger) + // Since we've already written the Envoy metrics to the response, we can + // return at this point if we were unable to get service metrics. + return + } + + // Since serviceMetrics will be non-nil if there are no errors, write the + // service metrics to the response as well. + defer func() { + err = serviceMetrics.Body.Close() + if err != nil { + c.logger.Error(fmt.Sprintf("Error closing service metrics body: %s", err.Error())) + } + }() + serviceMetricsBody, err := io.ReadAll(serviceMetrics.Body) + if err != nil { + c.logger.Error("Could not read service metrics", "err", err) + writeResponse(rw, serviceMetricSuccess(false), "service metrics success", c.logger) + return + } + if non2xxCode(serviceMetrics.StatusCode) { + c.logger.Error("Received non-2xx status code scraping service metrics", "code", serviceMetrics.StatusCode, "response", string(serviceMetricsBody)) + writeResponse(rw, serviceMetricSuccess(false), "service metrics success", c.logger) + return + } + writeResponse(rw, serviceMetricsBody, "service metrics", c.logger) + writeResponse(rw, serviceMetricSuccess(true), "service metrics success", c.logger) +} + +// writeResponse is a helper method to write resp to rw and log if there is an error writing. +// respName is the name of this response that will be used in the error log. +func writeResponse(rw http.ResponseWriter, resp []byte, respName string, logger hclog.Logger) { + _, err := rw.Write(resp) + if err != nil { + logger.Error(fmt.Sprintf("Error writing %s: %s", respName, err.Error())) + } +} + +// validateFlags validates the flags. +func (c *Command) validateFlags() error { + if !c.flagEnableServiceRegistration && !c.flagEnableMetricsMerging { + return errors.New("at least one of -enable-service-registration or -enable-metrics-merging must be true") + } + if c.flagEnableServiceRegistration { + if c.flagSyncPeriod == 0 { + // if sync period is 0, then the select loop will + // always pick the first case, and it'll be impossible + // to terminate the command gracefully with SIGINT. + return errors.New("-sync-period must be greater than 0") + } + if c.flagServiceConfig == "" { + return errors.New("-service-config must be set") + } + if c.flagConsulBinary == "" { + return errors.New("-consul-binary must be set") + } + if c.http.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") + } + _, err := os.Stat(c.flagServiceConfig) + if os.IsNotExist(err) { + return fmt.Errorf("-service-config file %q not found", c.flagServiceConfig) + } + _, err = exec.LookPath(c.flagConsulBinary) + if err != nil { + return fmt.Errorf("-consul-binary %q not found: %s", c.flagConsulBinary, err) + } + } + return nil +} + +// non2xxCode returns true if code is not in the range of 200-299 inclusive. +func non2xxCode(code int) bool { + return code < 200 || code >= 300 +} + +// serviceMetricSuccess returns a prometheus metric line indicating +// the success of the metrics merging. +func serviceMetricSuccess(success bool) []byte { + boolAsInt := 0 + if success { + boolAsInt = 1 + } + return []byte(fmt.Sprintf("%s %d\n", prometheusServiceMetricsSuccessKey, boolAsInt)) +} + +// parseConsulFlags creates Consul client command flags +// from command's HTTP flags and returns them as an array of strings. +func (c *Command) parseConsulFlags() []string { + var consulCommandFlags []string + c.http.Flags().VisitAll(func(f *flag.Flag) { + // not adding -consul-api-timeout since consul does not use this flag + if f.Value.String() != "" && f.Name != "consul-api-timeout" { + consulCommandFlags = append(consulCommandFlags, fmt.Sprintf("-%s=%s", f.Name, f.Value.String())) + } + }) + return consulCommandFlags +} + +// interrupt sends os.Interrupt signal to the command +// so it can exit gracefully. This function is needed for tests. +func (c *Command) interrupt() { + c.sendSignal(syscall.SIGINT) +} + +func (c *Command) sendSignal(sig os.Signal) { + c.sigCh <- sig +} + +func (c *Command) Synopsis() string { return synopsis } +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +const synopsis = "Consul sidecar for Connect." +const help = ` +Usage: consul-k8s-control-plane consul-sidecar [options] + + Run as a sidecar to your Connect service. Ensures that your service + is registered with the local Consul client. + +` diff --git a/control-plane/subcommand/consul-sidecar/command_ent_test.go b/control-plane/subcommand/consul-sidecar/command_ent_test.go new file mode 100644 index 0000000000..d3a198d59a --- /dev/null +++ b/control-plane/subcommand/consul-sidecar/command_ent_test.go @@ -0,0 +1,90 @@ +//go:build enterprise + +package consulsidecar + +import ( + "os" + "testing" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +// Test that we register the services with namespaces. +func TestRun_ServicesRegistration_Namespaces(t *testing.T) { + t.Parallel() + tmpDir, configFile := createServicesTmpFile(t, servicesRegistrationWithNamespaces) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", a.HTTPAddr, + "-service-config", configFile, + "-sync-period", "100ms", + "-consul-api-timeout", "5s", + }) + defer stopCommand(t, &cmd, exitChan) + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + // create necessary namespaces first + _, _, err = client.Namespaces().Create(&api.Namespace{Name: "namespace"}, nil) + require.NoError(t, err) + + timer := &retry.Timer{Timeout: 1 * time.Second, Wait: 100 * time.Millisecond} + retry.RunWith(timer, t, func(r *retry.R) { + svc, _, err := client.Agent().Service("service-id", &api.QueryOptions{Namespace: "namespace"}) + require.NoError(r, err) + require.Equal(r, 80, svc.Port) + require.Equal(r, "namespace", svc.Namespace) + + svcProxy, _, err := client.Agent().Service("service-id-sidecar-proxy", &api.QueryOptions{Namespace: "namespace"}) + require.NoError(r, err) + require.Equal(r, 2000, svcProxy.Port) + require.Equal(r, svcProxy.Namespace, "namespace") + require.Len(r, svcProxy.Proxy.Upstreams, 1) + require.Equal(r, svcProxy.Proxy.Upstreams[0].DestinationNamespace, "dest-namespace") + }) +} + +const servicesRegistrationWithNamespaces = ` +services { + id = "service-id" + name = "service" + port = 80 + namespace = "namespace" +} +services { + id = "service-id-sidecar-proxy" + name = "service-sidecar-proxy" + namespace = "namespace" + port = 2000 + kind = "connect-proxy" + proxy { + destination_service_name = "service" + destination_service_id = "service-id" + local_service_port = 80 + upstreams { + destination_type = "service" + destination_name = "dest-name" + destination_namespace = "dest-namespace" + local_bind_port = 1234 + } + } +}` diff --git a/control-plane/subcommand/consul-sidecar/command_test.go b/control-plane/subcommand/consul-sidecar/command_test.go new file mode 100644 index 0000000000..cd2d024ec5 --- /dev/null +++ b/control-plane/subcommand/consul-sidecar/command_test.go @@ -0,0 +1,643 @@ +package consulsidecar + +import ( + "bytes" + "fmt" + "io" + + "net" + "net/http" + "os" + "path/filepath" + "syscall" + "testing" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestRun_Defaults(t *testing.T) { + t.Parallel() + var cmd Command + cmd.init() + require.Equal(t, 10*time.Second, cmd.flagSyncPeriod) + require.Equal(t, "info", cmd.flagLogLevel) + require.Equal(t, "consul", cmd.flagConsulBinary) +} + +func TestRunSignalHandlingRegistrationOnly(t *testing.T) { + cases := map[string]os.Signal{ + "SIGINT": syscall.SIGINT, + "SIGTERM": syscall.SIGTERM, + } + for name, signal := range cases { + t.Run(name, func(t *testing.T) { + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-service-config", configFile, + "-http-addr", a.HTTPAddr, + "-sync-period", "1s", + "-consul-api-timeout", "5s", + }) + cmd.sendSignal(signal) + + // Assert that it exits cleanly or timeout. + select { + case exitCode := <-exitChan: + require.Equal(t, 0, exitCode, ui.ErrorWriter.String()) + case <-time.After(time.Second * 1): + // Fail if the signal was not caught. + require.Fail(t, "timeout waiting for command to exit") + } + // Assert that the services were not created because the cmd has exited. + _, _, err = client.Agent().Service("service-id", nil) + require.Error(t, err) + _, _, err = client.Agent().Service("service-id-sidecar-proxy", nil) + require.Error(t, err) + }) + } +} + +func TestRunSignalHandlingMetricsOnly(t *testing.T) { + cases := map[string]os.Signal{ + "SIGINT": syscall.SIGINT, + "SIGTERM": syscall.SIGTERM, + } + for name, signal := range cases { + t.Run(name, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + randomPorts := freeport.GetN(t, 1) + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-enable-service-registration=false", + "-enable-metrics-merging=true", + "-merged-metrics-port", fmt.Sprint(randomPorts[0]), + "-service-metrics-port", "8080", + "-service-metrics-path", "/metrics", + "-consul-api-timeout", "5s", + }) + + // Keep an open connection to the server by continuously sending bytes + // on the connection so it will have to be drained. + var conn net.Conn + var err error + retry.Run(t, func(r *retry.R) { + conn, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", randomPorts[0])) + if err != nil { + require.NoError(r, err) + } + }) + go func() { + for { + _, err := conn.Write([]byte("hello")) + // Once the server has been shut down there will be an error writing to that connection. So, this + // will break out of the for loop and the goroutine will exit (and be cleaned up). + if err != nil { + break + } + } + }() + + // Send a signal to consul-sidecar. The merged metrics server can take + // up to metricsServerShutdownTimeout to finish cleaning up. + cmd.sendSignal(signal) + + // Will need to wait for slightly longer than the shutdown timeout to + // make sure that the command has exited shortly after the timeout. + waitForShutdown := metricsServerShutdownTimeout + 100*time.Millisecond + + // Assert that it exits cleanly or timeout. + select { + case exitCode := <-exitChan: + require.Equal(t, 0, exitCode, ui.ErrorWriter.String()) + case <-time.After(waitForShutdown): + // Fail if the signal was not caught. + require.Fail(t, "timeout waiting for command to exit") + } + }) + } +} + +func TestRunSignalHandlingAllProcessesEnabled(t *testing.T) { + cases := map[string]os.Signal{ + "SIGINT": syscall.SIGINT, + "SIGTERM": syscall.SIGTERM, + } + for name, signal := range cases { + t.Run(name, func(t *testing.T) { + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + require.NoError(t, err) + + randomPorts := freeport.GetN(t, 1) + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-service-config", configFile, + "-http-addr", a.HTTPAddr, + "-enable-metrics-merging=true", + "-merged-metrics-port", fmt.Sprint(randomPorts[0]), + "-service-metrics-port", "8080", + "-service-metrics-path", "/metrics", + "-consul-api-timeout", "5s", + }) + + // Keep an open connection to the server by continuously sending bytes + // on the connection so it will have to be drained. + var conn net.Conn + retry.Run(t, func(r *retry.R) { + conn, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", randomPorts[0])) + if err != nil { + require.NoError(r, err) + } + }) + go func() { + for { + _, err := conn.Write([]byte("hello")) + // Once the server has been shut down there will be an error writing to that connection. So, this + // will break out of the for loop and the goroutine will exit (and be cleaned up). + if err != nil { + break + } + } + }() + + // Send a signal to consul-sidecar. The merged metrics server can take + // up to metricsServerShutdownTimeout to finish cleaning up. + cmd.sendSignal(signal) + + // Will need to wait for slightly longer than the shutdown timeout to + // make sure that the command has exited shortly after the timeout. + waitForShutdown := metricsServerShutdownTimeout + 100*time.Millisecond + + // Assert that it exits cleanly or timeout. + select { + case exitCode := <-exitChan: + require.Equal(t, 0, exitCode, ui.ErrorWriter.String()) + case <-time.After(waitForShutdown): + // Fail if the signal was not caught. + require.Fail(t, "timeout waiting for command to exit") + } + }) + } +} + +type mockEnvoyMetricsGetter struct { + respStatusCode int +} + +func (em *mockEnvoyMetricsGetter) Get(_ string) (resp *http.Response, err error) { + response := &http.Response{} + response.StatusCode = em.respStatusCode + response.Body = io.NopCloser(bytes.NewReader([]byte("envoy metrics\n"))) + return response, nil +} + +// mockServiceMetricsGetter. +type mockServiceMetricsGetter struct { + // reqURL is the last URL that was passed to Get(url) + reqURL string + + // respStatusCode is the status code to use for the response. + respStatusCode int +} + +func (sm *mockServiceMetricsGetter) Get(url string) (resp *http.Response, err error) { + // Record the URL that we were called with. + sm.reqURL = url + + response := &http.Response{} + response.Body = io.NopCloser(bytes.NewReader([]byte("service metrics\n"))) + response.StatusCode = sm.respStatusCode + + return response, nil +} + +func TestMergedMetricsServer(t *testing.T) { + cases := []struct { + name string + envoyMetricsGetter *mockEnvoyMetricsGetter + serviceMetricsGetter *mockServiceMetricsGetter + expectedStatusCode int + expectedOutput string + }{ + { + name: "happy path: envoy and service metrics are merged", + envoyMetricsGetter: &mockEnvoyMetricsGetter{ + respStatusCode: 200, + }, + serviceMetricsGetter: &mockServiceMetricsGetter{ + respStatusCode: 200, + }, + expectedStatusCode: 200, + expectedOutput: "envoy metrics\nservice metrics\nconsul_merged_service_metrics_success 1\n", + }, + { + name: "service metrics non-200", + envoyMetricsGetter: &mockEnvoyMetricsGetter{ + respStatusCode: 200, + }, + serviceMetricsGetter: &mockServiceMetricsGetter{ + respStatusCode: 404, + }, + expectedStatusCode: 200, + expectedOutput: "envoy metrics\nconsul_merged_service_metrics_success 0\n", + }, + { + name: "envoy metrics non-200", + envoyMetricsGetter: &mockEnvoyMetricsGetter{ + respStatusCode: 404, + }, + serviceMetricsGetter: &mockServiceMetricsGetter{ + respStatusCode: 200, + }, + expectedStatusCode: 500, + expectedOutput: "Received non-2xx status code scraping Envoy proxy metrics: 404: envoy metrics\n\n", + }, + { + name: "envoy and service metrics non-200", + envoyMetricsGetter: &mockEnvoyMetricsGetter{ + respStatusCode: 500, + }, + serviceMetricsGetter: &mockServiceMetricsGetter{ + respStatusCode: 500, + }, + expectedStatusCode: 500, + expectedOutput: "Received non-2xx status code scraping Envoy proxy metrics: 500: envoy metrics\n\n", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + randomPorts := freeport.GetN(t, 2) + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + flagEnableMetricsMerging: true, + flagMergedMetricsPort: fmt.Sprint(randomPorts[0]), + flagServiceMetricsPort: fmt.Sprint(randomPorts[1]), + flagServiceMetricsPath: "/metrics", + logger: hclog.Default(), + envoyMetricsGetter: c.envoyMetricsGetter, + serviceMetricsGetter: c.serviceMetricsGetter, + } + + server := cmd.createMergedMetricsServer() + go func() { + _ = server.ListenAndServe() + }() + defer server.Close() + + // Call the merged metrics endpoint and make assertions on the + // output. retry.Run times out in 7 seconds, which should give the + // merged metrics server enough time to come up. + retry.Run(t, func(r *retry.R) { + resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/stats/prometheus", randomPorts[0])) + require.NoError(r, err) + bytes, err := io.ReadAll(resp.Body) + require.NoError(r, err) + require.Equal(r, c.expectedOutput, string(bytes)) + // Verify the correct service metrics url was used. The service + // metrics endpoint is only called if the Envoy metrics endpoint + // call succeeds. + if c.envoyMetricsGetter.respStatusCode == 200 { + require.Equal(r, fmt.Sprintf("http://127.0.0.1:%d%s", randomPorts[1], "/metrics"), c.serviceMetricsGetter.reqURL) + } + }) + }) + } +} + +func TestRun_FlagValidation(t *testing.T) { + t.Parallel() + cases := []struct { + Flags []string + ExpErr string + }{ + { + Flags: []string{""}, + ExpErr: "-service-config must be set", + }, + { + Flags: []string{ + "-service-config=/config.hcl", + "-consul-binary=", + }, + ExpErr: "-consul-binary must be set", + }, + { + Flags: []string{ + "-service-config=/config.hcl", + "-consul-binary=consul", + "-sync-period=0s", + }, + ExpErr: "-sync-period must be greater than 0", + }, + { + Flags: []string{ + "-enable-service-registration=false", + "-enable-metrics-merging=false", + }, + ExpErr: " at least one of -enable-service-registration or -enable-metrics-merging must be true", + }, + { + Flags: []string{ + "-service-config=/config.hcl", + "-consul-binary=consul", + "-sync-period=5s", + "-enable-service-registration=true", + }, + ExpErr: "-consul-api-timeout must be set to a value greater than 0", + }, + } + + for _, c := range cases { + t.Run(c.ExpErr, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + responseCode := cmd.Run(c.Flags) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), c.ExpErr) + }) + } +} + +func TestRun_FlagValidation_ServiceConfigFileMissing(t *testing.T) { + t.Parallel() + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + responseCode := cmd.Run([]string{"-service-config=/does/not/exist", "-consul-binary=/not/a/valid/path", "-consul-api-timeout=5s"}) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "-service-config file \"/does/not/exist\" not found") +} + +func TestRun_FlagValidation_ConsulBinaryMissing(t *testing.T) { + t.Parallel() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + configFlag := "-service-config=" + configFile + + responseCode := cmd.Run([]string{configFlag, "-consul-binary=/not/a/valid/path", "-consul-api-timeout=5s"}) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "-consul-binary \"/not/a/valid/path\" not found") +} + +func TestRun_FlagValidation_InvalidLogLevel(t *testing.T) { + t.Parallel() + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + responseCode := cmd.Run([]string{"-service-config", configFile, "-consul-binary=consul", "-log-level=foo", "-consul-api-timeout=5s"}) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "unknown log level: foo") +} + +// Test that we register the services. +func TestRun_ServicesRegistration(t *testing.T) { + t.Parallel() + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", a.HTTPAddr, + "-service-config", configFile, + "-sync-period", "100ms", + "-consul-api-timeout", "5s", + }) + defer stopCommand(t, &cmd, exitChan) + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + svc, _, err := client.Agent().Service("service-id", nil) + require.NoError(r, err) + require.Equal(r, 80, svc.Port) + + svcProxy, _, err := client.Agent().Service("service-id-sidecar-proxy", nil) + require.NoError(r, err) + require.Equal(r, 2000, svcProxy.Port) + }) +} + +// Test that we register services when the Consul agent is down at first. +func TestRun_ServicesRegistration_ConsulDown(t *testing.T) { + t.Parallel() + + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // we need to reserve all 6 ports to avoid potential + // port collisions with other tests + randomPorts := freeport.GetN(t, 6) + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", fmt.Sprintf("127.0.0.1:%d", randomPorts[1]), + "-service-config", configFile, + "-sync-period", "100ms", + "-consul-api-timeout", "5s", + }) + defer stopCommand(t, &cmd, exitChan) + + // Start the Consul agent after 500ms. + time.Sleep(500 * time.Millisecond) + a, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.Ports = &testutil.TestPortConfig{ + DNS: randomPorts[0], + HTTP: randomPorts[1], + HTTPS: randomPorts[2], + SerfLan: randomPorts[3], + SerfWan: randomPorts[4], + Server: randomPorts[5], + } + }) + require.NoError(t, err) + defer a.Stop() + + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(t, err) + + // The services should be registered when the Consul agent comes up + retry.Run(t, func(r *retry.R) { + svc, _, err := client.Agent().Service("service-id", nil) + require.NoError(r, err) + require.Equal(r, 80, svc.Port) + + svcProxy, _, err := client.Agent().Service("service-id-sidecar-proxy", nil) + require.NoError(r, err) + require.Equal(r, 2000, svcProxy.Port) + }) +} + +// Test that we parse all flags and pass them down to the underlying Consul command. +func TestRun_ConsulCommandFlags(t *testing.T) { + t.Parallel() + tmpDir, configFile := createServicesTmpFile(t, servicesRegistration) + defer os.RemoveAll(tmpDir) + + a, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer a.Stop() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + + // Run async because we need to kill it when the test is over. + exitChan := runCommandAsynchronously(&cmd, []string{ + "-http-addr", a.HTTPAddr, + "-service-config", configFile, + "-sync-period", "1s", + "-consul-binary", "consul", + "-token=abc", + "-token-file=/token/file", + "-ca-file=/ca/file", + "-ca-path=/ca/path", + "-consul-api-timeout", "5s", + }) + defer stopCommand(t, &cmd, exitChan) + + expectedCommand := []string{ + "services", + "register", + "-http-addr=" + a.HTTPAddr, + "-token=abc", + "-token-file=/token/file", + "-ca-file=/ca/file", + "-ca-path=/ca/path", + configFile, + } + retry.Run(t, func(r *retry.R) { + require.ElementsMatch(r, expectedCommand, cmd.consulCommand) + }) +} + +// This function starts the command asynchronously and returns a non-blocking chan. +// When finished, the command will send its exit code to the channel. +// Note that it's the responsibility of the caller to terminate the command by calling stopCommand, +// otherwise it can run forever. +func runCommandAsynchronously(cmd *Command, args []string) chan int { + // We have to run cmd.init() to ensure that the channel the command is + // using to watch for os interrupts is initialized. If we don't do this, + // then if stopCommand is called immediately, it will block forever + // because it calls interrupt() which will attempt to send on a nil channel. + cmd.init() + exitChan := make(chan int, 1) + go func() { + exitChan <- cmd.Run(args) + }() + return exitChan +} + +func stopCommand(t *testing.T, cmd *Command, exitChan chan int) { + if len(exitChan) == 0 { + cmd.interrupt() + } + c := <-exitChan + require.Equal(t, 0, c, string(cmd.UI.(*cli.MockUi).ErrorWriter.Bytes())) +} + +// createServicesTmpFile creates a temp directory +// and writes servicesRegistration as an HCL file there. +func createServicesTmpFile(t *testing.T, serviceHCL string) (string, string) { + tmpDir, err := os.MkdirTemp("", "") + require.NoError(t, err) + + configFile := filepath.Join(tmpDir, "svc.hcl") + err = os.WriteFile(configFile, []byte(serviceHCL), 0600) + require.NoError(t, err) + + return tmpDir, configFile +} + +const servicesRegistration = ` +services { + id = "service-id" + name = "service" + port = 80 +} +services { + id = "service-id-sidecar-proxy" + name = "service-sidecar-proxy" + port = 2000 + kind = "connect-proxy" + proxy { + destination_service_name = "service" + destination_service_id = "service-id" + local_service_port = 80 + } +}` diff --git a/control-plane/subcommand/controller/command.go b/control-plane/subcommand/controller/command.go new file mode 100644 index 0000000000..083eb72711 --- /dev/null +++ b/control-plane/subcommand/controller/command.go @@ -0,0 +1,410 @@ +package controller + +import ( + "context" + "errors" + "flag" + "fmt" + "os" + "sync" + + "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/controller" + mutatingwebhookconfiguration "github.com/hashicorp/consul-k8s/control-plane/helper/mutating-webhook-configuration" + cmdCommon "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/consul/api" + "github.com/mitchellh/cli" + "go.uber.org/zap/zapcore" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +const WebhookCAFilename = "ca.crt" + +type Command struct { + UI cli.Ui + + flagSet *flag.FlagSet + httpFlags *flags.HTTPFlags + + flagWebhookTLSCertDir string + flagEnableLeaderElection bool + flagEnableWebhooks bool + flagDatacenter string + flagLogLevel string + flagLogJSON bool + flagResourcePrefix string + flagEnableWebhookCAUpdate bool + + // Flags to support Consul Enterprise namespaces. + flagEnableNamespaces bool + flagConsulDestinationNamespace string + flagEnableNSMirroring bool + flagNSMirroringPrefix string + flagCrossNSACLPolicy string + + once sync.Once + help string +} + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func (c *Command) init() { + c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + c.flagSet.BoolVar(&c.flagEnableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller. "+ + "Enabling this will ensure there is only one active controller manager.") + c.flagSet.StringVar(&c.flagDatacenter, "datacenter", "", + "Name of the Consul datacenter the controller is operating in. This is added as metadata on managed custom resources.") + c.flagSet.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, + "[Enterprise Only] Enables Consul Enterprise namespaces, in either a single Consul namespace or mirrored.") + c.flagSet.StringVar(&c.flagConsulDestinationNamespace, "consul-destination-namespace", "default", + "[Enterprise Only] Defines which Consul namespace to create all config entries in, regardless of their source Kubernetes namespace."+ + " If '-enable-k8s-namespace-mirroring' is true, this is not used.") + c.flagSet.BoolVar(&c.flagEnableNSMirroring, "enable-k8s-namespace-mirroring", false, "[Enterprise Only] Enables "+ + "k8s namespace mirroring.") + c.flagSet.StringVar(&c.flagNSMirroringPrefix, "k8s-namespace-mirroring-prefix", "", + "[Enterprise Only] Prefix that will be added to all k8s namespaces mirrored into Consul if mirroring is enabled.") + c.flagSet.StringVar(&c.flagCrossNSACLPolicy, "consul-cross-namespace-acl-policy", "", + "[Enterprise Only] Name of the ACL policy to attach to all created Consul namespaces to allow service "+ + "discovery across Consul namespaces. Only necessary if ACLs are enabled.") + c.flagSet.StringVar(&c.flagWebhookTLSCertDir, "webhook-tls-cert-dir", "", + "Directory that contains the TLS cert and key required for the webhook. The cert and key files must be named 'tls.crt' and 'tls.key' respectively.") + c.flagSet.BoolVar(&c.flagEnableWebhooks, "enable-webhooks", true, + "Enable webhooks. Disable when running locally since Kube API server won't be able to route to local server.") + c.flagSet.StringVar(&c.flagResourcePrefix, "resource-prefix", "", + "Release prefix of the Consul installation used to prepend on the webhook name that will have its CA bundle updated.") + c.flagSet.BoolVar(&c.flagEnableWebhookCAUpdate, "enable-webhook-ca-update", false, + "Enables updating the CABundle on the webhook within this controller rather than using the webhook-cert-manager.") + c.flagSet.StringVar(&c.flagLogLevel, "log-level", zapcore.InfoLevel.String(), + fmt.Sprintf("Log verbosity level. Supported values (in order of detail) are "+ + "%q, %q, %q, and %q.", zapcore.DebugLevel.String(), zapcore.InfoLevel.String(), zapcore.WarnLevel.String(), zapcore.ErrorLevel.String())) + c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + c.httpFlags = &flags.HTTPFlags{} + flags.Merge(c.flagSet, c.httpFlags.Flags()) + c.help = flags.Usage(help, c.flagSet) +} + +func (c *Command) Run(args []string) int { + c.once.Do(c.init) + if err := c.flagSet.Parse(args); err != nil { + c.UI.Error(fmt.Sprintf("Parsing flagset: %s", err.Error())) + return 1 + } + // Validate flags + if err := c.validateFlags(); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + zapLogger, err := cmdCommon.ZapLogger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) + return 1 + } + ctrl.SetLogger(zapLogger) + klog.SetLogger(zapLogger) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Port: 9443, + LeaderElection: c.flagEnableLeaderElection, + LeaderElectionID: "consul.hashicorp.com", + Logger: zapLogger, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + return 1 + } + + cfg := api.DefaultConfig() + c.httpFlags.MergeOntoConfig(cfg) + consulClient, err := consul.NewClient(cfg, c.httpFlags.ConsulAPITimeout()) + if err != nil { + setupLog.Error(err, "connecting to Consul agent") + return 1 + } + + partitionsEnabled := c.httpFlags.Partition() != "" + consulMeta := common.ConsulMeta{ + PartitionsEnabled: partitionsEnabled, + Partition: c.httpFlags.Partition(), + NamespacesEnabled: c.flagEnableNamespaces, + DestinationNamespace: c.flagConsulDestinationNamespace, + Mirroring: c.flagEnableNSMirroring, + Prefix: c.flagNSMirroringPrefix, + } + + configEntryReconciler := &controller.ConfigEntryController{ + ConsulClient: consulClient, + DatacenterName: c.flagDatacenter, + EnableConsulNamespaces: c.flagEnableNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableNSMirroring: c.flagEnableNSMirroring, + NSMirroringPrefix: c.flagNSMirroringPrefix, + CrossNSACLPolicy: c.flagCrossNSACLPolicy, + } + if err = (&controller.ServiceDefaultsController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ServiceDefaults), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ServiceDefaults) + return 1 + } + if err = (&controller.ServiceResolverController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ServiceResolver), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ServiceResolver) + return 1 + } + if err = (&controller.ProxyDefaultsController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ProxyDefaults), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ProxyDefaults) + return 1 + } + if err = (&controller.MeshController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.Mesh), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.Mesh) + return 1 + } + if err = (&controller.ExportedServicesController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ExportedServices), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ExportedServices) + return 1 + } + if err = (&controller.ServiceRouterController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ServiceRouter), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ServiceRouter) + return 1 + } + if err = (&controller.ServiceSplitterController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ServiceSplitter), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ServiceSplitter) + return 1 + } + if err = (&controller.ServiceIntentionsController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.ServiceIntentions), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.ServiceIntentions) + return 1 + } + if err = (&controller.IngressGatewayController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.IngressGateway), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.IngressGateway) + return 1 + } + if err = (&controller.TerminatingGatewayController{ + ConfigEntryController: configEntryReconciler, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controller").WithName(common.TerminatingGateway), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", common.TerminatingGateway) + return 1 + } + + if c.flagEnableWebhooks { + // This webhook server sets up a Cert Watcher on the CertDir. This watches for file changes and updates the webhook certificates + // automatically when new certificates are available. + mgr.GetWebhookServer().CertDir = c.flagWebhookTLSCertDir + + // Note: The path here should be identical to the one on the kubebuilder + // annotation in each webhook file. + mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicedefaults", + &webhook.Admission{Handler: &v1alpha1.ServiceDefaultsWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceDefaults), + ConsulMeta: consulMeta, + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-serviceresolver", + &webhook.Admission{Handler: &v1alpha1.ServiceResolverWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceResolver), + ConsulMeta: consulMeta, + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-proxydefaults", + &webhook.Admission{Handler: &v1alpha1.ProxyDefaultsWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ProxyDefaults), + ConsulMeta: consulMeta, + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-mesh", + &webhook.Admission{Handler: &v1alpha1.MeshWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.Mesh), + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-exportedservices", + &webhook.Admission{Handler: &v1alpha1.ExportedServicesWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ExportedServices), + ConsulMeta: consulMeta, + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicerouter", + &webhook.Admission{Handler: &v1alpha1.ServiceRouterWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceRouter), + ConsulMeta: consulMeta, + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicesplitter", + &webhook.Admission{Handler: &v1alpha1.ServiceSplitterWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceSplitter), + ConsulMeta: consulMeta, + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-serviceintentions", + &webhook.Admission{Handler: &v1alpha1.ServiceIntentionsWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceIntentions), + ConsulMeta: consulMeta, + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-ingressgateway", + &webhook.Admission{Handler: &v1alpha1.IngressGatewayWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.IngressGateway), + ConsulMeta: consulMeta, + }}) + mgr.GetWebhookServer().Register("/mutate-v1alpha1-terminatinggateway", + &webhook.Admission{Handler: &v1alpha1.TerminatingGatewayWebhook{ + Client: mgr.GetClient(), + ConsulClient: consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName(common.TerminatingGateway), + ConsulMeta: consulMeta, + }}) + } + // +kubebuilder:scaffold:builder + + if c.flagEnableWebhookCAUpdate { + err := c.updateWebhookCABundle() + if err != nil { + setupLog.Error(err, "problem getting CA Cert") + return 1 + } + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + return 1 + } + return 0 +} + +func (c *Command) updateWebhookCABundle() error { + // Create a context to be used by the processes started in this command. + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + config, err := rest.InClusterConfig() + if err != nil { + return err + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return err + } + + webhookConfigName := fmt.Sprintf("%s-controller", c.flagResourcePrefix) + caPath := fmt.Sprintf("%s/%s", c.flagWebhookTLSCertDir, WebhookCAFilename) + caCert, err := os.ReadFile(caPath) + if err != nil { + return err + } + err = mutatingwebhookconfiguration.UpdateWithCABundle(ctx, clientset, webhookConfigName, caCert) + if err != nil { + return err + } + return nil +} + +func (c *Command) validateFlags() error { + if len(c.flagSet.Args()) > 0 { + return errors.New("Invalid arguments: should have no non-flag arguments") + } + if c.flagEnableWebhooks && c.flagWebhookTLSCertDir == "" { + return errors.New("Invalid arguments: -webhook-tls-cert-dir must be set") + } + if c.flagDatacenter == "" { + return errors.New("Invalid arguments: -datacenter must be set") + } + if c.httpFlags.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") + } + + return nil +} +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +func (c *Command) Synopsis() string { + return synopsis +} + +const synopsis = "Starts the Consul Kubernetes controller" +const help = ` +Usage: consul-k8s-control-plane controller [options] + + Starts the Consul Kubernetes controller that manages Consul Custom Resource Definitions + +` diff --git a/control-plane/subcommand/controller/command_test.go b/control-plane/subcommand/controller/command_test.go new file mode 100644 index 0000000000..016299d125 --- /dev/null +++ b/control-plane/subcommand/controller/command_test.go @@ -0,0 +1,49 @@ +package controller + +import ( + "testing" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestRun_FlagValidation(t *testing.T) { + t.Parallel() + + cases := []struct { + flags []string + expErr string + }{ + { + flags: nil, + expErr: "-webhook-tls-cert-dir must be set", + }, + { + flags: []string{"-datacenter", "foo"}, + expErr: "-webhook-tls-cert-dir must be set", + }, + { + flags: []string{"-webhook-tls-cert-dir", "/foo"}, + expErr: "-datacenter must be set", + }, + { + flags: []string{"-webhook-tls-cert-dir", "/foo", "-datacenter", "foo"}, + expErr: "-consul-api-timeout must be set to a value greater than 0", + }, + { + flags: []string{"-webhook-tls-cert-dir", "/foo", "-datacenter", "foo", + "-consul-api-timeout", "5s", "-log-level", "invalid"}, + expErr: `unknown log level "invalid": unrecognized level: "invalid"`, + }, + } + + for _, c := range cases { + t.Run(c.expErr, func(tt *testing.T) { + ui := cli.NewMockUi() + cmd := Command{UI: ui} + exitCode := cmd.Run(c.flags) + require.Equal(tt, 1, exitCode, ui.ErrorWriter.String()) + require.Contains(tt, ui.ErrorWriter.String(), c.expErr) + }) + } +} diff --git a/control-plane/subcommand/create-federation-secret/command_test.go b/control-plane/subcommand/create-federation-secret/command_test.go index d0f85fa686..dca207d8f1 100644 --- a/control-plane/subcommand/create-federation-secret/command_test.go +++ b/control-plane/subcommand/create-federation-secret/command_test.go @@ -28,7 +28,7 @@ func TestRun_FlagValidation(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) cases := []struct { flags []string @@ -101,7 +101,7 @@ func TestRun_CAFileMissing(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -124,7 +124,7 @@ func TestRun_ServerCACertFileMissing(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -147,7 +147,7 @@ func TestRun_ServerCAKeyFileMissing(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -170,7 +170,7 @@ func TestRun_GossipEncryptionKeyFileMissing(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -194,7 +194,7 @@ func TestRun_GossipEncryptionKeyFileEmpty(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() cmd := Command{ @@ -220,7 +220,7 @@ func TestRun_ReplicationTokenMissingExpectedKey(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(f.Name()) + defer os.Remove(f.Name()) ui := cli.NewMockUi() k8s := fake.NewSimpleClientset() diff --git a/control-plane/subcommand/flags/consul.go b/control-plane/subcommand/flags/consul.go deleted file mode 100644 index 1294729a6b..0000000000 --- a/control-plane/subcommand/flags/consul.go +++ /dev/null @@ -1,276 +0,0 @@ -package flags - -import ( - "crypto/tls" - "flag" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/consul-k8s/control-plane/consul" - "github.com/hashicorp/consul-server-connection-manager/discovery" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-rootcerts" -) - -const ( - AddressesEnvVar = "CONSUL_ADDRESSES" - GRPCPortEnvVar = "CONSUL_GRPC_PORT" - HTTPPortEnvVar = "CONSUL_HTTP_PORT" - - NamespaceEnvVar = "CONSUL_NAMESPACE" - PartitionEnvVar = "CONSUL_PARTITION" - DatacenterEnvVar = "CONSUL_DATACENTER" - - UseTLSEnvVar = "CONSUL_USE_TLS" - CACertFileEnvVar = "CONSUL_CACERT_FILE" - CACertPEMEnvVar = "CONSUL_CACERT_PEM" - TLSServerNameEnvVar = "CONSUL_TLS_SERVER_NAME" - - ACLTokenEnvVar = "CONSUL_ACL_TOKEN" - ACLTokenFileEnvVar = "CONSUL_ACL_TOKEN_FILE" - - LoginAuthMethodEnvVar = "CONSUL_LOGIN_AUTH_METHOD" - LoginBearerTokenFileEnvVar = "CONSUL_LOGIN_BEARER_TOKEN_FILE" - LoginDatacenterEnvVar = "CONSUL_LOGIN_DATACENTER" - LoginPartitionEnvVar = "CONSUL_LOGIN_PARTITION" - LoginNamespaceEnvVar = "CONSUL_LOGIN_NAMESPACE" - LoginMetaEnvVar = "CONSUL_LOGIN_META" - - SkipServerWatchEnvVar = "CONSUL_SKIP_SERVER_WATCH" - - APITimeoutEnvVar = "CONSUL_API_TIMEOUT" -) - -// ConsulFlags is a set of flags used to connect to Consul (servers). -type ConsulFlags struct { - Addresses string - GRPCPort int - HTTPPort int - APITimeout time.Duration - - Namespace string - Partition string - Datacenter string - - SkipServerWatch bool - - ConsulTLSFlags - ConsulACLFlags -} - -type ConsulTLSFlags struct { - UseTLS bool - CACertFile string - CACertPEM string - TLSServerName string -} - -type ConsulACLFlags struct { - ConsulLogin ConsulLoginFlags - - Token string - TokenFile string -} - -type ConsulLoginFlags struct { - AuthMethod string - BearerTokenFile string - Datacenter string - Namespace string - Partition string - Meta map[string]string -} - -func (f *ConsulFlags) Flags() *flag.FlagSet { - fs := flag.NewFlagSet("consul", flag.ContinueOnError) - - // Ignore parsing errors below because if we can't parse env variable because we want to - // behave as if that env variable is not provided. - grpcPort, _ := strconv.Atoi(os.Getenv(GRPCPortEnvVar)) - httpPort, _ := strconv.Atoi(os.Getenv(HTTPPortEnvVar)) - useTLS, _ := strconv.ParseBool(os.Getenv(UseTLSEnvVar)) - skipServerWatch, _ := strconv.ParseBool(os.Getenv(SkipServerWatchEnvVar)) - consulLoginMetaFromEnv := os.Getenv(LoginMetaEnvVar) - if consulLoginMetaFromEnv != "" { - // Parse meta from env var. - metaKeyValuePairs := strings.Split(consulLoginMetaFromEnv, ",") - for _, metaKeyValue := range metaKeyValuePairs { - kvList := strings.Split(metaKeyValue, "=") - // We want to skip setting meta from env var if the key-value pairs are not provided correctly. - if len(kvList) == 2 { - if f.ConsulLogin.Meta == nil { - f.ConsulLogin.Meta = make(map[string]string) - } - f.ConsulLogin.Meta[kvList[0]] = kvList[1] - } - } - } - - defaultConsulLoginBearerTokenFile := "/var/run/secrets/kubernetes.io/serviceaccount/token" - if bearerTokenFileEnvVar := os.Getenv(LoginBearerTokenFileEnvVar); bearerTokenFileEnvVar != "" { - defaultConsulLoginBearerTokenFile = bearerTokenFileEnvVar - } - - defaultAPITimeout := 5 * time.Second - if apiTimeoutEnv := os.Getenv(APITimeoutEnvVar); apiTimeoutEnv != "" { - parsedAPITimeout, _ := time.ParseDuration(apiTimeoutEnv) - if parsedAPITimeout != 0 { - defaultAPITimeout = parsedAPITimeout - } - } - - fs.StringVar(&f.Addresses, "addresses", os.Getenv(AddressesEnvVar), - "Consul server addresses. Can also be provided via CONSUL_ADDRESSES environment variable. "+ - "Value can be:\n"+ - "1. DNS name (that resolves to servers or DNS name of a load-balancer front of Consul servers) or an IP address; OR\n"+ - "2.'exec='. The executable\n"+ - " a) on success - should exit 0 and print to stdout whitespace delimited IP (v4/v6) addresses\n"+ - " b) on failure - exit with a non-zero code and optionally print an error message of upto 1024 bytes to stderr.\n"+ - " Refer to https://github.com/hashicorp/go-netaddrs#summary for more details and examples.") - fs.IntVar(&f.GRPCPort, "grpc-port", grpcPort, - "gRPC port to use when connecting to Consul servers.") - fs.IntVar(&f.HTTPPort, "http-port", httpPort, - "HTTP or HTTPs port to use when connecting to Consul servers.") - fs.StringVar(&f.Namespace, "namespace", os.Getenv(NamespaceEnvVar), - "[Enterprise only] Consul namespace.") - fs.StringVar(&f.Partition, "partition", os.Getenv(PartitionEnvVar), - "[Enterprise only] Consul admin partition. Default to \"default\" if Admin Partitions are enabled.") - fs.StringVar(&f.Datacenter, "datacenter", os.Getenv(DatacenterEnvVar), - "Consul datacenter.") - fs.StringVar(&f.CACertFile, "ca-cert-file", os.Getenv(CACertFileEnvVar), - "Path to a CA certificate to use for TLS when communicating with Consul.") - fs.StringVar(&f.CACertPEM, "ca-cert-pem", os.Getenv(CACertPEMEnvVar), - "CA certificate PEM to use for TLS when communicating with Consul.") - fs.StringVar(&f.TLSServerName, "tls-server-name", os.Getenv(TLSServerNameEnvVar), - "The server name to use as the SNI host when connecting via TLS. "+ - "This can also be specified via the CONSUL_TLS_SERVER_NAME environment variable.") - fs.BoolVar(&f.UseTLS, "use-tls", useTLS, "If true, use TLS for connections to Consul.") - fs.StringVar(&f.Token, "token", os.Getenv(ACLTokenEnvVar), - "ACL token to use for connection to Consul."+ - "This can also be specified via the CONSUL_ACL_TOKEN environment variable.") - fs.StringVar(&f.TokenFile, "token-file", os.Getenv(ACLTokenFileEnvVar), - "ACL token file to use for connection to Consul."+ - "This can also be specified via the CONSUL_ACL_TOKEN_FILE environment variable.") - fs.StringVar(&f.ConsulLogin.AuthMethod, "auth-method-name", os.Getenv(LoginAuthMethodEnvVar), - "Auth method name to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_AUTH_METHOD environment variable.") - fs.StringVar(&f.ConsulLogin.BearerTokenFile, "consul-login-bearer-token-file", defaultConsulLoginBearerTokenFile, - "Bearer token file to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_BEARER_TOKEN_FILE environment variable.") - fs.StringVar(&f.ConsulLogin.Datacenter, "consul-login-datacenter", os.Getenv(LoginDatacenterEnvVar), - "Auth method datacenter to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_DATACENTER environment variable.") - fs.StringVar(&f.ConsulLogin.Partition, "consul-login-partition", os.Getenv(LoginPartitionEnvVar), - "Auth method partition to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_PARTITION environment variable.") - fs.StringVar(&f.ConsulLogin.Namespace, "consul-login-namespace", os.Getenv(LoginNamespaceEnvVar), - "Auth method namespace to use for login to Consul."+ - "This can also be specified via the CONSUL_LOGIN_NAMESPACE environment variable.") - fs.Var((*FlagMapValue)(&f.ConsulLogin.Meta), "consul-login-meta", - "Metadata to set on the token, formatted as key=value. This flag "+ - "may be specified multiple times to set multiple meta fields.") - fs.DurationVar(&f.APITimeout, "api-timeout", defaultAPITimeout, - "The time in seconds that the consul API client will wait for a response from the API before cancelling the request.") - fs.BoolVar(&f.SkipServerWatch, "skip-server-watch", skipServerWatch, "If true, skip watching server upstream."+ - "This can also be specified via the CONSUL_SKIP_SERVER_WATCH environment variable.") - return fs -} - -func (f *ConsulFlags) ConsulServerConnMgrConfig() (discovery.Config, error) { - cfg := discovery.Config{ - Addresses: f.Addresses, - GRPCPort: f.GRPCPort, - } - - if f.UseTLS { - tlsConfig := &tls.Config{} - if f.CACertFile != "" { - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: f.CACertFile, - }) - if err != nil { - return discovery.Config{}, err - } - } else if f.CACertPEM != "" { - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CACertificate: []byte(f.CACertPEM), - }) - if err != nil { - return discovery.Config{}, err - } - } - tlsConfig.ServerName = f.TLSServerName - cfg.TLS = tlsConfig - } - - if f.ConsulLogin.AuthMethod != "" { - cfg.Credentials.Type = discovery.CredentialsTypeLogin - cfg.Credentials.Login.AuthMethod = f.ConsulLogin.AuthMethod - cfg.Credentials.Login.Namespace = f.ConsulLogin.Namespace - cfg.Credentials.Login.Partition = f.ConsulLogin.Partition - cfg.Credentials.Login.Datacenter = f.ConsulLogin.Datacenter - cfg.Credentials.Login.Meta = f.ConsulLogin.Meta - - bearerToken, err := os.ReadFile(f.ConsulLogin.BearerTokenFile) - if err != nil { - return discovery.Config{}, err - } - cfg.Credentials.Login.BearerToken = string(bearerToken) - } else if f.Token != "" { - cfg.Credentials.Type = discovery.CredentialsTypeStatic - cfg.Credentials.Static.Token = f.Token - } else if f.TokenFile != "" { - token, err := os.ReadFile(f.TokenFile) - if err != nil { - return discovery.Config{}, err - } - cfg.Credentials.Type = discovery.CredentialsTypeStatic - cfg.Credentials.Static.Token = string(token) - } - - if f.SkipServerWatch { - cfg.ServerWatchDisabled = true - } - - return cfg, nil -} - -func (f *ConsulFlags) ConsulClientConfig() *consul.Config { - cfg := &api.Config{ - Namespace: f.Namespace, - Partition: f.Partition, - Datacenter: f.Datacenter, - Scheme: "http", - } - - if f.UseTLS { - cfg.Scheme = "https" - if f.CACertFile != "" { - cfg.TLSConfig.CAFile = f.CACertFile - } else if f.CACertPEM != "" { - cfg.TLSConfig.CAPem = []byte(f.CACertPEM) - } - - // Infer TLS server name from addresses. - if f.TLSServerName == "" && !strings.HasPrefix(f.Addresses, "exec=") { - cfg.TLSConfig.Address = f.Addresses - } else if f.TLSServerName != "" { - cfg.TLSConfig.Address = f.TLSServerName - } - } - - if f.Token != "" { - cfg.Token = f.Token - } else if f.TokenFile != "" { - cfg.TokenFile = f.TokenFile - } - - return &consul.Config{ - APIClientConfig: cfg, - HTTPPort: f.HTTPPort, - GRPCPort: f.GRPCPort, - APITimeout: f.APITimeout, - } -} diff --git a/control-plane/subcommand/flags/consul_test.go b/control-plane/subcommand/flags/consul_test.go deleted file mode 100644 index b53025daa6..0000000000 --- a/control-plane/subcommand/flags/consul_test.go +++ /dev/null @@ -1,450 +0,0 @@ -package flags - -import ( - "crypto/tls" - "os" - "testing" - "time" - - "github.com/hashicorp/consul-server-connection-manager/discovery" - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/require" -) - -func TestConsulFlags_Flags(t *testing.T) { - cases := map[string]struct { - env map[string]string - expFlags *ConsulFlags - }{ - "env vars": { - env: map[string]string{ - AddressesEnvVar: "consul.address", - GRPCPortEnvVar: "8503", - HTTPPortEnvVar: "8501", - NamespaceEnvVar: "test-ns", - PartitionEnvVar: "test-partition", - DatacenterEnvVar: "test-dc", - APITimeoutEnvVar: "10s", - - UseTLSEnvVar: "true", - CACertFileEnvVar: "path/to/ca.pem", - CACertPEMEnvVar: "test-ca-pem", - TLSServerNameEnvVar: "server.consul", - - ACLTokenEnvVar: "test-token", - ACLTokenFileEnvVar: "/path/to/token", - LoginAuthMethodEnvVar: "test-auth-method", - LoginBearerTokenFileEnvVar: "path/to/token", - LoginDatacenterEnvVar: "other-test-dc", - LoginPartitionEnvVar: "other-test-partition", - LoginNamespaceEnvVar: "other-test-ns", - LoginMetaEnvVar: "key1=value1,key2=value2", - SkipServerWatchEnvVar: "true", - }, - expFlags: &ConsulFlags{ - Addresses: "consul.address", - GRPCPort: 8503, - HTTPPort: 8501, - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - APITimeout: 10 * time.Second, - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertFile: "path/to/ca.pem", - CACertPEM: "test-ca-pem", - TLSServerName: "server.consul", - }, - ConsulACLFlags: ConsulACLFlags{ - Token: "test-token", - TokenFile: "/path/to/token", - ConsulLogin: ConsulLoginFlags{ - AuthMethod: "test-auth-method", - BearerTokenFile: "path/to/token", - Datacenter: "other-test-dc", - Partition: "other-test-partition", - Namespace: "other-test-ns", - Meta: map[string]string{"key1": "value1", "key2": "value2"}, - }, - }, - SkipServerWatch: true, - }, - }, - "defaults": { - expFlags: &ConsulFlags{ - APITimeout: 5 * time.Second, - ConsulACLFlags: ConsulACLFlags{ - ConsulLogin: ConsulLoginFlags{ - BearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - }, - }, - }, - "ignore invalid env vars": { - env: map[string]string{ - GRPCPortEnvVar: "not-int-grpc-port", - HTTPPortEnvVar: "not-int-http-port", - APITimeoutEnvVar: "10sec", - - UseTLSEnvVar: "not-a-bool", - - LoginMetaEnvVar: "key1:value1;key2:value2", - }, - expFlags: &ConsulFlags{ - APITimeout: 5 * time.Second, - ConsulACLFlags: ConsulACLFlags{ - ConsulLogin: ConsulLoginFlags{ - BearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token", - }, - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - for k, v := range c.env { - err := os.Setenv(k, v) - require.NoError(t, err) - } - t.Cleanup(func() { - for k := range c.env { - _ = os.Unsetenv(k) - } - }) - - cf := &ConsulFlags{} - consulFlags := cf.Flags() - err := consulFlags.Parse(nil) - require.NoError(t, err) - require.Equal(t, c.expFlags, cf) - }) - } -} - -func TestConsulFlags_ConsulServerConnMgrConfig(t *testing.T) { - cases := map[string]struct { - flags ConsulFlags - expConfig discovery.Config - }{ - "basic flags without TLS or ACLs": { - flags: ConsulFlags{ - Addresses: "consul.address", - GRPCPort: 8502, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - GRPCPort: 8502, - }, - }, - "default TLS": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - TLS: &tls.Config{}, - }, - }, - "ACL Auth method": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulACLFlags: ConsulACLFlags{ - ConsulLogin: ConsulLoginFlags{ - AuthMethod: "test-auth-method", - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - Meta: map[string]string{"key1": "value1", "key2": "value2"}, - }, - }, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - Credentials: discovery.Credentials{ - Type: discovery.CredentialsTypeLogin, - Login: discovery.LoginCredential{ - AuthMethod: "test-auth-method", - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - BearerToken: "bearer-token", - Meta: map[string]string{"key1": "value1", "key2": "value2"}, - }, - }, - }, - }, - "Static ACL token": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulACLFlags: ConsulACLFlags{ - Token: "test-token", - }, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - Credentials: discovery.Credentials{ - Type: discovery.CredentialsTypeStatic, - Static: discovery.StaticTokenCredential{ - Token: "test-token", - }, - }, - }, - }, - "Static ACL token file": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulACLFlags: ConsulACLFlags{ - // This is the content of the token that we will - // write to a temp file and expect the config to have this in its contents - TokenFile: "test-token", - }, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - Credentials: discovery.Credentials{ - Type: discovery.CredentialsTypeStatic, - Static: discovery.StaticTokenCredential{ - Token: "test-token", - }, - }, - }, - }, - "skip server watch to server watch disabled": { - flags: ConsulFlags{ - Addresses: "consul.address", - GRPCPort: 8502, - SkipServerWatch: true, - }, - expConfig: discovery.Config{ - Addresses: "consul.address", - GRPCPort: 8502, - ServerWatchDisabled: true, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - if c.flags.ConsulLogin.AuthMethod != "" { - tokenFile, err := os.CreateTemp("", "") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(tokenFile.Name()) - }) - _, err = tokenFile.WriteString("bearer-token") - require.NoError(t, err) - c.flags.ConsulLogin.BearerTokenFile = tokenFile.Name() - } else if c.flags.TokenFile != "" { - tokenFile, err := os.CreateTemp("", "") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(tokenFile.Name()) - }) - _, err = tokenFile.WriteString(c.flags.TokenFile) - require.NoError(t, err) - c.flags.TokenFile = tokenFile.Name() - } - cfg, err := c.flags.ConsulServerConnMgrConfig() - require.NoError(t, err) - require.Equal(t, c.expConfig, cfg) - }) - } -} - -func TestConsulFlags_ConsulServerConnMgrConfig_TLS(t *testing.T) { - caFile, err := os.CreateTemp("", "") - t.Cleanup(func() { - _ = os.RemoveAll(caFile.Name()) - }) - require.NoError(t, err) - _, err = caFile.WriteString(testCA) - require.NoError(t, err) - - cases := map[string]struct { - flags ConsulFlags - }{ - "default TLS": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - }, - "TLS with CA File": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertFile: caFile.Name(), - }, - }, - }, - "TLS with CA Pem": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertPEM: testCA, - }, - }, - }, - "TLS server name": { - flags: ConsulFlags{ - Addresses: "consul.address", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - TLSServerName: "server.consul", - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - cfg, err := c.flags.ConsulServerConnMgrConfig() - require.NoError(t, err) - require.NotNil(t, cfg.TLS) - if c.flags.CACertFile != "" || c.flags.CACertPEM != "" { - require.NotNil(t, cfg.TLS.RootCAs) - } - require.Equal(t, c.flags.TLSServerName, cfg.TLS.ServerName) - }) - } -} - -func TestConsulFlags_ConsulAPIClientConfig(t *testing.T) { - cases := map[string]struct { - flags ConsulFlags - expConfig *api.Config - }{ - "basic config": { - flags: ConsulFlags{ - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - }, - expConfig: &api.Config{ - Namespace: "test-ns", - Partition: "test-partition", - Datacenter: "test-dc", - Scheme: "http", - }, - }, - "with TLS": { - flags: ConsulFlags{ - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - expConfig: &api.Config{ - Scheme: "https", - }, - }, - "TLS: infer TLS server name when addresses is not an executable": { - flags: ConsulFlags{ - Addresses: "consul", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - expConfig: &api.Config{ - Scheme: "https", - TLSConfig: api.TLSConfig{ - Address: "consul", - }, - }, - }, - "TLS: doesn't infer TLS server name when addresses is an executable": { - flags: ConsulFlags{ - Addresses: "exec=echo 1.1.1.1", - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - }, - }, - expConfig: &api.Config{ - Scheme: "https", - }, - }, - "TLS CA File provided": { - flags: ConsulFlags{ - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertFile: "path/to/ca", - }, - }, - expConfig: &api.Config{ - Scheme: "https", - TLSConfig: api.TLSConfig{ - CAFile: "path/to/ca", - }, - }, - }, - "TLS CA PEM provided": { - flags: ConsulFlags{ - ConsulTLSFlags: ConsulTLSFlags{ - UseTLS: true, - CACertPEM: testCA, - }, - }, - expConfig: &api.Config{ - Scheme: "https", - TLSConfig: api.TLSConfig{ - CAPem: []byte(testCA), - }, - }, - }, - "ACL token provided": { - flags: ConsulFlags{ - ConsulACLFlags: ConsulACLFlags{ - Token: "test-token", - }, - }, - expConfig: &api.Config{ - Scheme: "http", - Token: "test-token", - }, - }, - "ACL token file provided": { - flags: ConsulFlags{ - ConsulACLFlags: ConsulACLFlags{ - TokenFile: "/path/to/token", - }, - }, - expConfig: &api.Config{ - Scheme: "http", - TokenFile: "/path/to/token", - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - require.Equal(t, c.expConfig, c.flags.ConsulClientConfig().APIClientConfig) - }) - } -} - -const testCA = ` ------BEGIN CERTIFICATE----- -MIIC7TCCApOgAwIBAgIQbHoocPoQq7qR3MTNUXdLVDAKBggqhkjOPQQDAjCBuTEL -MAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv -MRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV -BgNVBAoTDkhhc2hpQ29ycCBJbmMuMUAwPgYDVQQDEzdDb25zdWwgQWdlbnQgQ0Eg -MTQ0MTkwOTA0MDA4ODQxOTE3MTQzNDM4MjEzMTEzMjA0NjU2OTgwMB4XDTIyMDkx -NjE4NDUwNloXDTI3MDkxNTE4NDUwNlowgbkxCzAJBgNVBAYTAlVTMQswCQYDVQQI -EwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEaMBgGA1UECRMRMTAxIFNlY29u -ZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcwFQYDVQQKEw5IYXNoaUNvcnAgSW5j -LjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENBIDE0NDE5MDkwNDAwODg0MTkxNzE0 -MzQzODIxMzExMzIwNDY1Njk4MDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABA9w -J9aqbpdoVXQLdYTfUpBM2bgElznRYQP/GcNQUtvopvVywPjC7obFuZP1oM7YX7Wy -hGyeudV4pvF1lz9nVeOjezB5MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD -AQH/MCkGA1UdDgQiBCA9dZuoEX3yrbebyEEzsN4L2rr7FJd6FsjIioR6KbMIhTAr -BgNVHSMEJDAigCA9dZuoEX3yrbebyEEzsN4L2rr7FJd6FsjIioR6KbMIhTAKBggq -hkjOPQQDAgNIADBFAiARhJR88w9EXLsq5A932auHvLFAw+uQ0a2TLSaJF54fyAIh -APQczkCoIFiLlGp0GYeHEfjvrdm2g8Q3BUDjeAUfZPaW ------END CERTIFICATE-----` diff --git a/control-plane/subcommand/flags/http.go b/control-plane/subcommand/flags/http.go index 74db3c26dc..c9232b26e2 100644 --- a/control-plane/subcommand/flags/http.go +++ b/control-plane/subcommand/flags/http.go @@ -90,10 +90,6 @@ func (f *HTTPFlags) SetTokenFile(v string) error { return f.tokenFile.Set(v) } -func (f *HTTPFlags) TLSServerName() string { - return f.tlsServerName.String() -} - func (f *HTTPFlags) ReadTokenFile() (string, error) { tokenFile := f.tokenFile.String() if tokenFile == "" { diff --git a/control-plane/subcommand/get-consul-client-ca/command_test.go b/control-plane/subcommand/get-consul-client-ca/command_test.go index 9c48e63712..37eecd5434 100644 --- a/control-plane/subcommand/get-consul-client-ca/command_test.go +++ b/control-plane/subcommand/get-consul-client-ca/command_test.go @@ -76,7 +76,7 @@ func TestRun(t *testing.T) { t.Parallel() outputFile, err := os.CreateTemp("", "ca") require.NoError(t, err) - defer os.RemoveAll(outputFile.Name()) + defer os.Remove(outputFile.Name()) caFile, certFile, keyFile := test.GenerateServerCerts(t) @@ -138,7 +138,7 @@ func TestRun_ConsulServerAvailableLater(t *testing.T) { t.Parallel() outputFile, err := os.CreateTemp("", "ca") require.NoError(t, err) - defer os.RemoveAll(outputFile.Name()) + defer os.Remove(outputFile.Name()) caFile, certFile, keyFile := test.GenerateServerCerts(t) @@ -225,7 +225,7 @@ func TestRun_GetsOnlyActiveRoot(t *testing.T) { t.Parallel() outputFile, err := os.CreateTemp("", "ca") require.NoError(t, err) - defer os.RemoveAll(outputFile.Name()) + defer os.Remove(outputFile.Name()) caFile, certFile, keyFile := test.GenerateServerCerts(t) @@ -308,7 +308,7 @@ func TestRun_WithProvider(t *testing.T) { t.Parallel() outputFile, err := os.CreateTemp("", "ca") require.NoError(t, err) - defer os.RemoveAll(outputFile.Name()) + defer os.Remove(outputFile.Name()) ui := cli.NewMockUi() diff --git a/control-plane/subcommand/inject-connect/command.go b/control-plane/subcommand/inject-connect/command.go index 93566387cd..71e6473297 100644 --- a/control-plane/subcommand/inject-connect/command.go +++ b/control-plane/subcommand/inject-connect/command.go @@ -5,24 +5,19 @@ import ( "errors" "flag" "fmt" + "net/url" "os" - "os/signal" "strconv" "strings" "sync" - "syscall" - apicommon "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/controllers/endpoints" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/controllers/peering" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/metrics" - "github.com/hashicorp/consul-k8s/control-plane/connect-inject/webhook" - "github.com/hashicorp/consul-k8s/control-plane/controller" + connectinject "github.com/hashicorp/consul-k8s/control-plane/connect-inject" + "github.com/hashicorp/consul-k8s/control-plane/consul" mutatingwebhookconfiguration "github.com/hashicorp/consul-k8s/control-plane/helper/mutating-webhook-configuration" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" + "github.com/hashicorp/consul/api" "github.com/mitchellh/cli" "go.uber.org/zap/zapcore" corev1 "k8s.io/api/core/v1" @@ -34,7 +29,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" - ctrlRuntimeWebhook "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) const WebhookCAFilename = "ca.crt" @@ -46,9 +41,12 @@ type Command struct { flagCertDir string // Directory with TLS certs for listening (PEM) flagDefaultInject bool // True to inject by default flagConsulImage string // Docker image for Consul - flagConsulDataplaneImage string // Docker image for Envoy + flagEnvoyImage string // Docker image for Envoy flagConsulK8sImage string // Docker image for consul-k8s flagACLAuthMethod string // Auth Method to use for ACLs, if enabled + flagWriteServiceDefaults bool // True to enable central config injection + flagDefaultProtocol string // Default protocol for use with central config + flagConsulCACert string // [Deprecated] Path to CA Certificate to use when communicating with Consul clients flagEnvoyExtraArgs string // Extra envoy args when starting envoy flagEnableWebhookCAUpdate bool flagLogLevel string @@ -79,18 +77,27 @@ type Command struct { // Metrics settings. flagDefaultEnableMetrics bool - flagEnableGatewayMetrics bool flagDefaultEnableMetricsMerging bool flagDefaultMergedMetricsPort string flagDefaultPrometheusScrapePort string flagDefaultPrometheusScrapePath string + // Consul sidecar resource settings. + flagDefaultConsulSidecarCPULimit string + flagDefaultConsulSidecarCPURequest string + flagDefaultConsulSidecarMemoryLimit string + flagDefaultConsulSidecarMemoryRequest string + // Init container resource settings. flagInitContainerCPULimit string flagInitContainerCPURequest string flagInitContainerMemoryLimit string flagInitContainerMemoryRequest string + // Server address flags. + flagReadServerExposeService bool + flagTokenServerAddresses []string + // Transparent proxy flags. flagDefaultEnableTransparentProxy bool flagTransparentProxyDefaultOverwriteProbes bool @@ -98,17 +105,9 @@ type Command struct { // CNI flag. flagEnableCNI bool - // Additional metadata to get applied to nodes. - flagNodeMeta map[string]string - // Peering flags. flagEnablePeering bool - // WAN Federation flags. - flagEnableFederation bool - - flagEnableAutoEncrypt bool - // Consul DNS flags. flagEnableConsulDNS bool flagResourcePrefix string @@ -116,9 +115,10 @@ type Command struct { flagEnableOpenShift bool flagSet *flag.FlagSet - consul *flags.ConsulFlags + http *flags.HTTPFlags - clientset kubernetes.Interface + consulClient *api.Client + clientset kubernetes.Interface once sync.Once help string @@ -139,23 +139,26 @@ func init() { func (c *Command) init() { c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) c.flagSet.StringVar(&c.flagListen, "listen", ":8080", "Address to bind listener to.") - c.flagSet.Var((*flags.FlagMapValue)(&c.flagNodeMeta), "node-meta", - "Metadata to set on the node, formatted as key=value. This flag may be specified multiple times to set multiple meta fields.") c.flagSet.BoolVar(&c.flagDefaultInject, "default-inject", true, "Inject by default.") c.flagSet.StringVar(&c.flagCertDir, "tls-cert-dir", "", "Directory with PEM-encoded TLS certificate and key to serve.") c.flagSet.StringVar(&c.flagConsulImage, "consul-image", "", "Docker image for Consul.") - c.flagSet.StringVar(&c.flagConsulDataplaneImage, "consul-dataplane-image", "", - "Docker image for Consul Dataplane.") + c.flagSet.StringVar(&c.flagEnvoyImage, "envoy-image", "", + "Docker image for Envoy.") c.flagSet.StringVar(&c.flagConsulK8sImage, "consul-k8s-image", "", "Docker image for consul-k8s. Used for the connect sidecar.") c.flagSet.BoolVar(&c.flagEnablePeering, "enable-peering", false, "Enable cluster peering controllers.") - c.flagSet.BoolVar(&c.flagEnableFederation, "enable-federation", false, "Enable Consul WAN Federation.") c.flagSet.StringVar(&c.flagEnvoyExtraArgs, "envoy-extra-args", "", "Extra envoy command line args to be set when starting envoy (e.g \"--log-level debug --disable-hot-restart\").") c.flagSet.StringVar(&c.flagACLAuthMethod, "acl-auth-method", "", "The name of the Kubernetes Auth Method to use for connectInjection if ACLs are enabled.") + c.flagSet.BoolVar(&c.flagWriteServiceDefaults, "enable-central-config", false, + "Write a service-defaults config for every Connect service using protocol from -default-protocol or Pod annotation.") + c.flagSet.StringVar(&c.flagDefaultProtocol, "default-protocol", "", + "The default protocol to use in central config registrations.") + c.flagSet.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", + "[Deprecated] Please use '-ca-file' flag instead. Path to CA certificate to use if communicating with Consul clients over HTTPS.") c.flagSet.Var((*flags.AppendSliceValue)(&c.flagAllowK8sNamespacesList), "allow-k8s-namespace", "K8s namespaces to explicitly allow. May be specified multiple times.") c.flagSet.Var((*flags.AppendSliceValue)(&c.flagDenyK8sNamespacesList), "deny-k8s-namespace", @@ -190,13 +193,15 @@ func (c *Command) init() { "Indicates that the command runs in an OpenShift cluster.") c.flagSet.BoolVar(&c.flagEnableWebhookCAUpdate, "enable-webhook-ca-update", false, "Enables updating the CABundle on the webhook within this controller rather than using the web cert manager.") - c.flagSet.BoolVar(&c.flagEnableAutoEncrypt, "enable-auto-encrypt", false, - "Indicates whether TLS with auto-encrypt should be used when talking to Consul clients.") c.flagSet.StringVar(&c.flagLogLevel, "log-level", zapcore.InfoLevel.String(), fmt.Sprintf("Log verbosity level. Supported values (in order of detail) are "+ "%q, %q, %q, and %q.", zapcore.DebugLevel.String(), zapcore.InfoLevel.String(), zapcore.WarnLevel.String(), zapcore.ErrorLevel.String())) c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") + c.flagSet.BoolVar(&c.flagReadServerExposeService, "read-server-expose-service", false, + "Enables polling the Consul servers' external service for its IP(s).") + c.flagSet.Var((*flags.AppendSliceValue)(&c.flagTokenServerAddresses), "token-server-address", + "An address of the Consul server(s) as saved in the peering token, formatted host:port, where host may be an IP or DNS name and port must be a gRPC port. May be specified multiple times for multiple addresses.") // Proxy sidecar resource setting flags. c.flagSet.StringVar(&c.flagDefaultSidecarProxyCPURequest, "default-sidecar-proxy-cpu-request", "", "Default sidecar proxy CPU request.") @@ -206,7 +211,6 @@ func (c *Command) init() { // Metrics setting flags. c.flagSet.BoolVar(&c.flagDefaultEnableMetrics, "default-enable-metrics", false, "Default for enabling connect service metrics.") - c.flagSet.BoolVar(&c.flagEnableGatewayMetrics, "enable-gateway-metrics", false, "Allows enabling Consul gateway metrics.") c.flagSet.BoolVar(&c.flagDefaultEnableMetricsMerging, "default-enable-metrics-merging", false, "Default for enabling merging of connect service metrics and envoy proxy metrics.") c.flagSet.StringVar(&c.flagDefaultMergedMetricsPort, "default-merged-metrics-port", "20100", "Default port for merged metrics endpoint on the consul-sidecar.") c.flagSet.StringVar(&c.flagDefaultPrometheusScrapePort, "default-prometheus-scrape-port", "20200", "Default port where Prometheus scrapes connect metrics from.") @@ -218,11 +222,16 @@ func (c *Command) init() { c.flagSet.StringVar(&c.flagInitContainerMemoryRequest, "init-container-memory-request", "25Mi", "Init container memory request.") c.flagSet.StringVar(&c.flagInitContainerMemoryLimit, "init-container-memory-limit", "150Mi", "Init container memory limit.") + // Consul sidecar resource setting flags. + c.flagSet.StringVar(&c.flagDefaultConsulSidecarCPURequest, "default-consul-sidecar-cpu-request", "20m", "Default consul sidecar CPU request.") + c.flagSet.StringVar(&c.flagDefaultConsulSidecarCPULimit, "default-consul-sidecar-cpu-limit", "20m", "Default consul sidecar CPU limit.") + c.flagSet.StringVar(&c.flagDefaultConsulSidecarMemoryRequest, "default-consul-sidecar-memory-request", "25Mi", "Default consul sidecar memory request.") + c.flagSet.StringVar(&c.flagDefaultConsulSidecarMemoryLimit, "default-consul-sidecar-memory-limit", "50Mi", "Default consul sidecar memory limit.") c.flagSet.IntVar(&c.flagDefaultEnvoyProxyConcurrency, "default-envoy-proxy-concurrency", 2, "Default Envoy proxy concurrency.") - c.consul = &flags.ConsulFlags{} + c.http = &flags.HTTPFlags{} - flags.Merge(c.flagSet, c.consul.Flags()) + flags.Merge(c.flagSet, c.http.Flags()) // flag.CommandLine is a package level variable representing the default flagSet. The init() function in // "sigs.k8s.io/controller-runtime/pkg/client/config", which is imported by ctrl, registers the flag --kubeconfig to // the default flagSet. That's why we need to merge it to have access with our flagSet. @@ -252,7 +261,6 @@ func (c *Command) Run(args []string) int { return 1 } } - if c.flagDefaultSidecarProxyCPULimit != "" { sidecarProxyCPULimit, err = resource.ParseQuantity(c.flagDefaultSidecarProxyCPULimit) if err != nil { @@ -301,7 +309,7 @@ func (c *Command) Run(args []string) int { } // Validate resource request/limit flags and parse into corev1.ResourceRequirements - initResources, err := c.parseAndValidateResourceFlags() + initResources, consulSidecarResources, err := c.parseAndValidateResourceFlags() if err != nil { c.UI.Error(err.Error()) return 1 @@ -321,73 +329,68 @@ func (c *Command) Run(args []string) int { } } - // Convert allow/deny lists to sets. - allowK8sNamespaces := flags.ToSet(c.flagAllowK8sNamespacesList) - denyK8sNamespaces := flags.ToSet(c.flagDenyK8sNamespacesList) - - zapLogger, err := common.ZapLogger(c.flagLogLevel, c.flagLogJSON) - if err != nil { - c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) - return 1 - } - ctrl.SetLogger(zapLogger) - klog.SetLogger(zapLogger) - - // TODO (agentless): find a way to integrate zap logger (via having a generic logger interface in connection manager). - hcLog, err := common.NamedLogger(c.flagLogLevel, c.flagLogJSON, "consul-server-connection-manager") - if err != nil { - c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) - return 1 + // Create Consul API config object. + cfg := api.DefaultConfig() + c.http.MergeOntoConfig(cfg) + if cfg.TLSConfig.CAFile == "" && c.flagConsulCACert != "" { + cfg.TLSConfig.CAFile = c.flagConsulCACert } - - listenSplits := strings.SplitN(c.flagListen, ":", 2) - if len(listenSplits) < 2 { - c.UI.Error(fmt.Sprintf("missing port in address: %s", c.flagListen)) - return 1 + consulURLRaw := cfg.Address + // cfg.Address may or may not be prefixed with scheme. + if !strings.Contains(cfg.Address, "://") { + consulURLRaw = fmt.Sprintf("%s://%s", cfg.Scheme, cfg.Address) } - port, err := strconv.Atoi(listenSplits[1]) + consulURL, err := url.Parse(consulURLRaw) if err != nil { - c.UI.Error(fmt.Sprintf("unable to parse port string: %s", err)) + c.UI.Error(fmt.Sprintf("error parsing consul address %q: %s", consulURLRaw, err)) return 1 } - // Create Consul API config object. - consulConfig := c.consul.ConsulClientConfig() + // Load CA file contents. + var consulCACert []byte + if cfg.TLSConfig.CAFile != "" { + var err error + consulCACert, err = os.ReadFile(cfg.TLSConfig.CAFile) + if err != nil { + c.UI.Error(fmt.Sprintf("error reading Consul's CA cert file %q: %s", cfg.TLSConfig.CAFile, err)) + return 1 + } + } - var caCertPem []byte - if c.consul.CACertFile != "" { + // Set up Consul client. + if c.consulClient == nil { var err error - caCertPem, err = os.ReadFile(c.consul.CACertFile) + c.consulClient, err = consul.NewClient(cfg, c.http.ConsulAPITimeout()) if err != nil { - c.UI.Error(fmt.Sprintf("error reading Consul's CA cert file %q", c.consul.CACertFile)) + c.UI.Error(fmt.Sprintf("error connecting to Consul agent: %s", err)) return 1 } } // Create a context to be used by the processes started in this command. - ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() - // Start Consul server Connection manager. - serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() + // Convert allow/deny lists to sets. + allowK8sNamespaces := flags.ToSet(c.flagAllowK8sNamespacesList) + denyK8sNamespaces := flags.ToSet(c.flagDenyK8sNamespacesList) + + zapLogger, err := common.ZapLogger(c.flagLogLevel, c.flagLogJSON) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) + c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) return 1 } - watcher, err := discovery.NewWatcher(ctx, serverConnMgrCfg, hcLog) - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + ctrl.SetLogger(zapLogger) + klog.SetLogger(zapLogger) + + listenSplits := strings.SplitN(c.flagListen, ":", 2) + if len(listenSplits) < 2 { + c.UI.Error(fmt.Sprintf("missing port in address: %s", c.flagListen)) return 1 } - - go watcher.Run() - defer watcher.Stop() - - // This is a blocking command that is run in order to ensure we only start the - // connect-inject controllers only after we have access to the Consul server. - _, err = watcher.State() + port, err := strconv.Atoi(listenSplits[1]) if err != nil { - c.UI.Error(fmt.Sprintf("unable to start Consul server watcher: %s", err)) + c.UI.Error(fmt.Sprintf("unable to parse port string: %s", err)) return 1 } @@ -406,22 +409,23 @@ func (c *Command) Run(args []string) int { return 1 } - metricsConfig := metrics.Config{ + metricsConfig := connectinject.MetricsConfig{ DefaultEnableMetrics: c.flagDefaultEnableMetrics, - EnableGatewayMetrics: c.flagEnableGatewayMetrics, DefaultEnableMetricsMerging: c.flagDefaultEnableMetricsMerging, DefaultMergedMetricsPort: c.flagDefaultMergedMetricsPort, DefaultPrometheusScrapePort: c.flagDefaultPrometheusScrapePort, DefaultPrometheusScrapePath: c.flagDefaultPrometheusScrapePath, } - if err = (&endpoints.Controller{ + if err = (&connectinject.EndpointsController{ Client: mgr.GetClient(), - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: watcher, + ConsulClient: c.consulClient, + ConsulScheme: consulURL.Scheme, + ConsulPort: consulURL.Port(), AllowK8sNamespacesSet: allowK8sNamespaces, DenyK8sNamespacesSet: denyK8sNamespaces, MetricsConfig: metricsConfig, + ConsulClientCfg: cfg, EnableConsulPartitions: c.flagEnablePartitions, EnableConsulNamespaces: c.flagEnableNamespaces, ConsulDestinationNamespace: c.flagConsulDestinationNamespace, @@ -429,290 +433,114 @@ func (c *Command) Run(args []string) int { NSMirroringPrefix: c.flagK8SNSMirroringPrefix, CrossNSACLPolicy: c.flagCrossNamespaceACLPolicy, EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, - EnableWANFederation: c.flagEnableFederation, TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, AuthMethod: c.flagACLAuthMethod, - NodeMeta: c.flagNodeMeta, Log: ctrl.Log.WithName("controller").WithName("endpoints"), Scheme: mgr.GetScheme(), ReleaseName: c.flagReleaseName, ReleaseNamespace: c.flagReleaseNamespace, - EnableAutoEncrypt: c.flagEnableAutoEncrypt, Context: ctx, + ConsulAPITimeout: c.http.ConsulAPITimeout(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", endpoints.Controller{}) - return 1 - } - - consulMeta := apicommon.ConsulMeta{ - PartitionsEnabled: c.flagEnablePartitions, - Partition: c.consul.Partition, - NamespacesEnabled: c.flagEnableNamespaces, - DestinationNamespace: c.flagConsulDestinationNamespace, - Mirroring: c.flagEnableK8SNSMirroring, - Prefix: c.flagK8SNSMirroringPrefix, - } - - configEntryReconciler := &controller.ConfigEntryController{ - ConsulClientConfig: c.consul.ConsulClientConfig(), - ConsulServerConnMgr: watcher, - DatacenterName: c.consul.Datacenter, - EnableConsulNamespaces: c.flagEnableNamespaces, - ConsulDestinationNamespace: c.flagConsulDestinationNamespace, - EnableNSMirroring: c.flagEnableK8SNSMirroring, - NSMirroringPrefix: c.flagK8SNSMirroringPrefix, - CrossNSACLPolicy: c.flagCrossNamespaceACLPolicy, - } - if err = (&controller.ServiceDefaultsController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.ServiceDefaults), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.ServiceDefaults) - return 1 - } - if err = (&controller.ServiceResolverController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.ServiceResolver), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.ServiceResolver) - return 1 - } - if err = (&controller.ProxyDefaultsController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.ProxyDefaults), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.ProxyDefaults) - return 1 - } - if err = (&controller.MeshController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.Mesh), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.Mesh) - return 1 - } - if err = (&controller.ExportedServicesController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.ExportedServices), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.ExportedServices) - return 1 - } - if err = (&controller.ServiceRouterController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.ServiceRouter), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.ServiceRouter) - return 1 - } - if err = (&controller.ServiceSplitterController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.ServiceSplitter), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.ServiceSplitter) - return 1 - } - if err = (&controller.ServiceIntentionsController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.ServiceIntentions), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.ServiceIntentions) - return 1 - } - if err = (&controller.IngressGatewayController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.IngressGateway), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.IngressGateway) - return 1 - } - if err = (&controller.TerminatingGatewayController{ - ConfigEntryController: configEntryReconciler, - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controller").WithName(apicommon.TerminatingGateway), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", apicommon.TerminatingGateway) + setupLog.Error(err, "unable to create controller", "controller", connectinject.EndpointsController{}) return 1 } - if err = mgr.AddReadyzCheck("ready", webhook.ReadinessCheck{CertDir: c.flagCertDir}.Ready); err != nil { - setupLog.Error(err, "unable to create readiness check", "controller", endpoints.Controller{}) + if err = mgr.AddReadyzCheck("ready", connectinject.ReadinessCheck{CertDir: c.flagCertDir}.Ready); err != nil { + setupLog.Error(err, "unable to create readiness check", "controller", connectinject.EndpointsController{}) return 1 } if c.flagEnablePeering { - if err = (&peering.AcceptorController{ - Client: mgr.GetClient(), - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: watcher, - ExposeServersServiceName: c.flagResourcePrefix + "-expose-servers", - ReleaseNamespace: c.flagReleaseNamespace, - Log: ctrl.Log.WithName("controller").WithName("peering-acceptor"), - Scheme: mgr.GetScheme(), - Context: ctx, + if err = (&connectinject.PeeringAcceptorController{ + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + ExposeServersServiceName: c.flagResourcePrefix + "-expose-servers", + ReadServerExternalService: c.flagReadServerExposeService, + TokenServerAddresses: c.flagTokenServerAddresses, + ReleaseNamespace: c.flagReleaseNamespace, + Log: ctrl.Log.WithName("controller").WithName("peering-acceptor"), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "peering-acceptor") return 1 } - if err = (&peering.PeeringDialerController{ - Client: mgr.GetClient(), - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: watcher, - Log: ctrl.Log.WithName("controller").WithName("peering-dialer"), - Scheme: mgr.GetScheme(), - Context: ctx, + if err = (&connectinject.PeeringDialerController{ + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + Log: ctrl.Log.WithName("controller").WithName("peering-dialer"), + Scheme: mgr.GetScheme(), + Context: ctx, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "peering-dialer") return 1 } mgr.GetWebhookServer().Register("/mutate-v1alpha1-peeringacceptors", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.PeeringAcceptorWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName("peering-acceptor"), + &webhook.Admission{Handler: &v1alpha1.PeeringAcceptorWebhook{ + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName("peering-acceptor"), }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-peeringdialers", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.PeeringDialerWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName("peering-dialer"), + &webhook.Admission{Handler: &v1alpha1.PeeringDialerWebhook{ + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + Logger: ctrl.Log.WithName("webhooks").WithName("peering-dialer"), }}) } mgr.GetWebhookServer().CertDir = c.flagCertDir mgr.GetWebhookServer().Register("/mutate", - &ctrlRuntimeWebhook.Admission{Handler: &webhook.MeshWebhook{ - Clientset: c.clientset, - ReleaseNamespace: c.flagReleaseNamespace, - ConsulConfig: consulConfig, - ConsulServerConnMgr: watcher, - ImageConsul: c.flagConsulImage, - ImageConsulDataplane: c.flagConsulDataplaneImage, - EnvoyExtraArgs: c.flagEnvoyExtraArgs, - ImageConsulK8S: c.flagConsulK8sImage, - RequireAnnotation: !c.flagDefaultInject, - AuthMethod: c.flagACLAuthMethod, - ConsulCACert: string(caCertPem), - TLSEnabled: c.consul.UseTLS, - ConsulAddress: c.consul.Addresses, - SkipServerWatch: c.consul.SkipServerWatch, - ConsulTLSServerName: c.consul.TLSServerName, - DefaultProxyCPURequest: sidecarProxyCPURequest, - DefaultProxyCPULimit: sidecarProxyCPULimit, - DefaultProxyMemoryRequest: sidecarProxyMemoryRequest, - DefaultProxyMemoryLimit: sidecarProxyMemoryLimit, - DefaultEnvoyProxyConcurrency: c.flagDefaultEnvoyProxyConcurrency, - MetricsConfig: metricsConfig, - InitContainerResources: initResources, - ConsulPartition: c.consul.Partition, - AllowK8sNamespacesSet: allowK8sNamespaces, - DenyK8sNamespacesSet: denyK8sNamespaces, - EnableNamespaces: c.flagEnableNamespaces, - ConsulDestinationNamespace: c.flagConsulDestinationNamespace, - EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, - K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, - CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, - EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, - EnableCNI: c.flagEnableCNI, - TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, - EnableConsulDNS: c.flagEnableConsulDNS, - EnableOpenShift: c.flagEnableOpenShift, - Log: ctrl.Log.WithName("handler").WithName("connect"), - LogLevel: c.flagLogLevel, - LogJSON: c.flagLogJSON, - }}) - - // Note: The path here should be identical to the one on the kubebuilder - // annotation in each webhook file. - mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicedefaults", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.ServiceDefaultsWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.ServiceDefaults), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-serviceresolver", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.ServiceResolverWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.ServiceResolver), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-proxydefaults", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.ProxyDefaultsWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.ProxyDefaults), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-mesh", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.MeshWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.Mesh), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-exportedservices", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.ExportedServicesWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.ExportedServices), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicerouter", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.ServiceRouterWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.ServiceRouter), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicesplitter", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.ServiceSplitterWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.ServiceSplitter), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-serviceintentions", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.ServiceIntentionsWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.ServiceIntentions), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-ingressgateway", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.IngressGatewayWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.IngressGateway), - ConsulMeta: consulMeta, - }}) - mgr.GetWebhookServer().Register("/mutate-v1alpha1-terminatinggateway", - &ctrlRuntimeWebhook.Admission{Handler: &v1alpha1.TerminatingGatewayWebhook{ - Client: mgr.GetClient(), - Logger: ctrl.Log.WithName("webhooks").WithName(apicommon.TerminatingGateway), - ConsulMeta: consulMeta, + &webhook.Admission{Handler: &connectinject.MeshWebhook{ + Clientset: c.clientset, + ConsulClient: c.consulClient, + ImageConsul: c.flagConsulImage, + ImageEnvoy: c.flagEnvoyImage, + EnvoyExtraArgs: c.flagEnvoyExtraArgs, + ImageConsulK8S: c.flagConsulK8sImage, + RequireAnnotation: !c.flagDefaultInject, + AuthMethod: c.flagACLAuthMethod, + ConsulCACert: string(consulCACert), + DefaultProxyCPURequest: sidecarProxyCPURequest, + DefaultProxyCPULimit: sidecarProxyCPULimit, + DefaultProxyMemoryRequest: sidecarProxyMemoryRequest, + DefaultProxyMemoryLimit: sidecarProxyMemoryLimit, + DefaultEnvoyProxyConcurrency: c.flagDefaultEnvoyProxyConcurrency, + MetricsConfig: metricsConfig, + InitContainerResources: initResources, + DefaultConsulSidecarResources: consulSidecarResources, + ConsulPartition: c.http.Partition(), + AllowK8sNamespacesSet: allowK8sNamespaces, + DenyK8sNamespacesSet: denyK8sNamespaces, + EnableNamespaces: c.flagEnableNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, + K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + EnableTransparentProxy: c.flagDefaultEnableTransparentProxy, + EnableCNI: c.flagEnableCNI, + TProxyOverwriteProbes: c.flagTransparentProxyDefaultOverwriteProbes, + EnableConsulDNS: c.flagEnableConsulDNS, + ResourcePrefix: c.flagResourcePrefix, + EnableOpenShift: c.flagEnableOpenShift, + Log: ctrl.Log.WithName("handler").WithName("connect"), + LogLevel: c.flagLogLevel, + LogJSON: c.flagLogJSON, + ConsulAPITimeout: c.http.ConsulAPITimeout(), }}) if c.flagEnableWebhookCAUpdate { - err = c.updateWebhookCABundle(ctx) + err := c.updateWebhookCABundle(ctx) if err != nil { setupLog.Error(err, "problem getting CA Cert") return 1 } } - if err = mgr.Start(ctx); err != nil { + if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") return 1 } @@ -733,66 +561,74 @@ func (c *Command) updateWebhookCABundle(ctx context.Context) error { } return nil } - func (c *Command) validateFlags() error { if c.flagConsulK8sImage == "" { return errors.New("-consul-k8s-image must be set") + } if c.flagConsulImage == "" { return errors.New("-consul-image must be set") } - if c.flagConsulDataplaneImage == "" { - return errors.New("-consul-dataplane-image must be set") + if c.flagEnvoyImage == "" { + return errors.New("-envoy-image must be set") + } + if c.flagWriteServiceDefaults { + return errors.New("-enable-central-config is no longer supported") + } + if c.flagDefaultProtocol != "" { + return errors.New("-default-protocol is no longer supported") } - if c.flagEnablePartitions && c.consul.Partition == "" { - return errors.New("-partition must set if -enable-partitions is set to 'true'") + if c.flagEnablePartitions && c.http.Partition() == "" { + return errors.New("-partition-name must set if -enable-partitions is set to 'true'") } - if c.consul.Partition != "" && !c.flagEnablePartitions { - return errors.New("-enable-partitions must be set to 'true' if -partition is set") + if c.http.Partition() != "" && !c.flagEnablePartitions { + return errors.New("-enable-partitions must be set to 'true' if -partition-name is set") } if c.flagDefaultEnvoyProxyConcurrency < 0 { return errors.New("-default-envoy-proxy-concurrency must be >= 0 if set") } + if c.http.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") + } return nil } - -func (c *Command) parseAndValidateResourceFlags() (corev1.ResourceRequirements, error) { +func (c *Command) parseAndValidateResourceFlags() (corev1.ResourceRequirements, corev1.ResourceRequirements, error) { // Init container var initContainerCPULimit, initContainerCPURequest, initContainerMemoryLimit, initContainerMemoryRequest resource.Quantity // Parse and validate the initContainer resources. initContainerCPURequest, err := resource.ParseQuantity(c.flagInitContainerCPURequest) if err != nil { - return corev1.ResourceRequirements{}, + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf("-init-container-cpu-request '%s' is invalid: %s", c.flagInitContainerCPURequest, err) } initContainerCPULimit, err = resource.ParseQuantity(c.flagInitContainerCPULimit) if err != nil { - return corev1.ResourceRequirements{}, + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf("-init-container-cpu-limit '%s' is invalid: %s", c.flagInitContainerCPULimit, err) } if initContainerCPULimit.Value() != 0 && initContainerCPURequest.Cmp(initContainerCPULimit) > 0 { - return corev1.ResourceRequirements{}, fmt.Errorf( + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf( "request must be <= limit: -init-container-cpu-request value of %q is greater than the -init-container-cpu-limit value of %q", c.flagInitContainerCPURequest, c.flagInitContainerCPULimit) } initContainerMemoryRequest, err = resource.ParseQuantity(c.flagInitContainerMemoryRequest) if err != nil { - return corev1.ResourceRequirements{}, + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf("-init-container-memory-request '%s' is invalid: %s", c.flagInitContainerMemoryRequest, err) } initContainerMemoryLimit, err = resource.ParseQuantity(c.flagInitContainerMemoryLimit) if err != nil { - return corev1.ResourceRequirements{}, + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf("-init-container-memory-limit '%s' is invalid: %s", c.flagInitContainerMemoryLimit, err) } if initContainerMemoryLimit.Value() != 0 && initContainerMemoryRequest.Cmp(initContainerMemoryLimit) > 0 { - return corev1.ResourceRequirements{}, fmt.Errorf( + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf( "request must be <= limit: -init-container-memory-request value of %q is greater than the -init-container-memory-limit value of %q", c.flagInitContainerMemoryRequest, c.flagInitContainerMemoryLimit) } @@ -809,7 +645,55 @@ func (c *Command) parseAndValidateResourceFlags() (corev1.ResourceRequirements, }, } - return initResources, nil + // Consul sidecar + var consulSidecarCPULimit, consulSidecarCPURequest, consulSidecarMemoryLimit, consulSidecarMemoryRequest resource.Quantity + + // Parse and validate the Consul sidecar resources + consulSidecarCPURequest, err = resource.ParseQuantity(c.flagDefaultConsulSidecarCPURequest) + if err != nil { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, + fmt.Errorf("-default-consul-sidecar-cpu-request '%s' is invalid: %s", c.flagDefaultConsulSidecarCPURequest, err) + } + consulSidecarCPULimit, err = resource.ParseQuantity(c.flagDefaultConsulSidecarCPULimit) + if err != nil { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, + fmt.Errorf("-default-consul-sidecar-cpu-limit '%s' is invalid: %s", c.flagDefaultConsulSidecarCPULimit, err) + } + if consulSidecarCPULimit.Value() != 0 && consulSidecarCPURequest.Cmp(consulSidecarCPULimit) > 0 { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf( + "request must be <= limit: -default-consul-sidecar-cpu-request value of %q is greater than the -default-consul-sidecar-cpu-limit value of %q", + c.flagDefaultConsulSidecarCPURequest, c.flagDefaultConsulSidecarCPULimit) + } + + consulSidecarMemoryRequest, err = resource.ParseQuantity(c.flagDefaultConsulSidecarMemoryRequest) + if err != nil { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, + fmt.Errorf("-default-consul-sidecar-memory-request '%s' is invalid: %s", c.flagDefaultConsulSidecarMemoryRequest, err) + } + consulSidecarMemoryLimit, err = resource.ParseQuantity(c.flagDefaultConsulSidecarMemoryLimit) + if err != nil { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, + fmt.Errorf("-default-consul-sidecar-memory-limit '%s' is invalid: %s", c.flagDefaultConsulSidecarMemoryLimit, err) + } + if consulSidecarMemoryLimit.Value() != 0 && consulSidecarMemoryRequest.Cmp(consulSidecarMemoryLimit) > 0 { + return corev1.ResourceRequirements{}, corev1.ResourceRequirements{}, fmt.Errorf( + "request must be <= limit: -default-consul-sidecar-memory-request value of %q is greater than the -default-consul-sidecar-memory-limit value of %q", + c.flagDefaultConsulSidecarMemoryRequest, c.flagDefaultConsulSidecarMemoryLimit) + } + + // Put into corev1.ResourceRequirements form + consulSidecarResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: consulSidecarCPURequest, + corev1.ResourceMemory: consulSidecarMemoryRequest, + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: consulSidecarCPULimit, + corev1.ResourceMemory: consulSidecarMemoryLimit, + }, + } + + return initResources, consulSidecarResources, nil } func (c *Command) Synopsis() string { return synopsis } @@ -818,12 +702,11 @@ func (c *Command) Help() string { return c.help } -const ( - synopsis = "Inject the proxy sidecar, run endpoints controller and peering controllers." - help = ` +const synopsis = "Inject Connect proxy sidecar." +const help = ` Usage: consul-k8s-control-plane inject-connect [options] - Run the admission webhook server for injecting the sidecar proxy, - the endpoints controller, and the peering controllers. + Run the admission webhook server for injecting the Consul Connect + proxy sidecar. The sidecar uses Envoy by default. + ` -) diff --git a/control-plane/subcommand/inject-connect/command_test.go b/control-plane/subcommand/inject-connect/command_test.go index 5f067cf7c2..93fff95a94 100644 --- a/control-plane/subcommand/inject-connect/command_test.go +++ b/control-plane/subcommand/inject-connect/command_test.go @@ -1,8 +1,10 @@ package connectinject import ( + "os" "testing" + "github.com/hashicorp/consul/api" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes/fake" @@ -23,109 +25,161 @@ func TestRun_FlagValidation(t *testing.T) { }, { flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo"}, - expErr: "-consul-dataplane-image must be set", + expErr: "-envoy-image must be set", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-log-level", "invalid"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0"}, + expErr: "-consul-api-timeout must be set to a value greater than 0", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-log-level", "invalid"}, expErr: "unknown log level \"invalid\": unrecognized level: \"invalid\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-ca-cert-file", "bar"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-enable-central-config", "true"}, + expErr: "-enable-central-config is no longer supported", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-protocol", "http"}, + expErr: "-default-protocol is no longer supported", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-ca-file", "bar"}, expErr: "error reading Consul's CA cert file \"bar\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-enable-partitions", "true"}, - expErr: "-partition must set if -enable-partitions is set to 'true'", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-enable-partitions", "true"}, + expErr: "-partition-name must set if -enable-partitions is set to 'true'", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-partition", "default"}, - expErr: "-enable-partitions must be set to 'true' if -partition is set", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-partition", "default"}, + expErr: "-enable-partitions must be set to 'true' if -partition-name is set", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-sidecar-proxy-cpu-limit=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-cpu-limit=unparseable"}, expErr: "-default-sidecar-proxy-cpu-limit is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-sidecar-proxy-cpu-request=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-cpu-request=unparseable"}, expErr: "-default-sidecar-proxy-cpu-request is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-sidecar-proxy-memory-limit=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-memory-limit=unparseable"}, expErr: "-default-sidecar-proxy-memory-limit is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-sidecar-proxy-memory-request=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-memory-request=unparseable"}, expErr: "-default-sidecar-proxy-memory-request is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-memory-request=50Mi", "-default-sidecar-proxy-memory-limit=25Mi", }, expErr: "request must be <= limit: -default-sidecar-proxy-memory-request value of \"50Mi\" is greater than the -default-sidecar-proxy-memory-limit value of \"25Mi\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-sidecar-proxy-cpu-request=50m", "-default-sidecar-proxy-cpu-limit=25m", }, expErr: "request must be <= limit: -default-sidecar-proxy-cpu-request value of \"50m\" is greater than the -default-sidecar-proxy-cpu-limit value of \"25m\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-cpu-limit=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-cpu-limit=unparseable"}, expErr: "-init-container-cpu-limit 'unparseable' is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-cpu-request=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-cpu-request=unparseable"}, expErr: "-init-container-cpu-request 'unparseable' is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-memory-limit=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-memory-limit=unparseable"}, expErr: "-init-container-memory-limit 'unparseable' is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-memory-request=unparseable"}, + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-memory-request=unparseable"}, expErr: "-init-container-memory-request 'unparseable' is invalid", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-memory-request=50Mi", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-memory-request=50Mi", "-init-container-memory-limit=25Mi", }, expErr: "request must be <= limit: -init-container-memory-request value of \"50Mi\" is greater than the -init-container-memory-limit value of \"25Mi\"", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-init-container-cpu-request=50m", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-init-container-cpu-request=50m", "-init-container-cpu-limit=25m", }, expErr: "request must be <= limit: -init-container-cpu-request value of \"50m\" is greater than the -init-container-cpu-limit value of \"25m\"", }, { - flags: []string{"-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-cpu-limit=unparseable"}, + expErr: "-default-consul-sidecar-cpu-limit 'unparseable' is invalid", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-cpu-request=unparseable"}, + expErr: "-default-consul-sidecar-cpu-request 'unparseable' is invalid", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-memory-limit=unparseable"}, + expErr: "-default-consul-sidecar-memory-limit 'unparseable' is invalid", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-memory-request=unparseable"}, + expErr: "-default-consul-sidecar-memory-request 'unparseable' is invalid", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-memory-request=50Mi", + "-default-consul-sidecar-memory-limit=25Mi", + }, + expErr: "request must be <= limit: -default-consul-sidecar-memory-request value of \"50Mi\" is greater than the -default-consul-sidecar-memory-limit value of \"25Mi\"", + }, + { + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-consul-sidecar-cpu-request=50m", + "-default-consul-sidecar-cpu-limit=25m", + }, + expErr: "request must be <= limit: -default-consul-sidecar-cpu-request value of \"50m\" is greater than the -default-consul-sidecar-cpu-limit value of \"25m\"", + }, + { + flags: []string{"-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-http-addr=http://0.0.0.0:9999", "-listen", "999999"}, expErr: "missing port in address: 999999", }, { - flags: []string{"-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", + flags: []string{"-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-http-addr=http://0.0.0.0:9999", "-listen", ":foobar"}, expErr: "unable to parse port string: strconv.Atoi: parsing \"foobar\": invalid syntax", }, { - flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-consul-dataplane-image", "consul-dataplane:1.14.0", - "-default-envoy-proxy-concurrency=-42", + flags: []string{"-consul-k8s-image", "foo", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", "-default-envoy-proxy-concurrency=-42", }, expErr: "-default-envoy-proxy-concurrency must be >= 0 if set", }, @@ -155,4 +209,32 @@ func TestRun_ResourceLimitDefaults(t *testing.T) { require.Equal(t, cmd.flagInitContainerCPULimit, "50m") require.Equal(t, cmd.flagInitContainerMemoryRequest, "25Mi") require.Equal(t, cmd.flagInitContainerMemoryLimit, "150Mi") + + // Consul sidecar container defaults + require.Equal(t, cmd.flagDefaultConsulSidecarCPURequest, "20m") + require.Equal(t, cmd.flagDefaultConsulSidecarCPULimit, "20m") + require.Equal(t, cmd.flagDefaultConsulSidecarMemoryRequest, "25Mi") + require.Equal(t, cmd.flagDefaultConsulSidecarMemoryLimit, "50Mi") +} + +func TestRun_ValidationConsulHTTPAddr(t *testing.T) { + k8sClient := fake.NewSimpleClientset() + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8sClient, + } + flags := []string{ + "-consul-k8s-image", "hashicorp/consul-k8s", + "-consul-image", "foo", + "-envoy-image", "envoy:1.16.0", + "-consul-api-timeout", "5s", + } + + os.Setenv(api.HTTPAddrEnvName, "%") + code := cmd.Run(flags) + os.Unsetenv(api.HTTPAddrEnvName) + + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "error parsing consul address \"http://%\": parse \"http://%\": invalid URL escape \"%") } diff --git a/control-plane/subcommand/install-cni/binary.go b/control-plane/subcommand/install-cni/binary.go index 2429770109..472c8bece7 100644 --- a/control-plane/subcommand/install-cni/binary.go +++ b/control-plane/subcommand/install-cni/binary.go @@ -49,7 +49,7 @@ func removeFile(path string) error { return nil } - if err := os.RemoveAll(path); err != nil { + if err := os.Remove(path); err != nil { return fmt.Errorf("error removing file %s: %w", path, err) } return nil diff --git a/control-plane/subcommand/install-cni/cniconfig.go b/control-plane/subcommand/install-cni/cniconfig.go index 922d7283dd..e4d2078ad7 100644 --- a/control-plane/subcommand/install-cni/cniconfig.go +++ b/control-plane/subcommand/install-cni/cniconfig.go @@ -111,19 +111,18 @@ func confListFileFromConfFile(cfgFile string) (string, error) { // The format of the main cni config file is unstructured json consisting of a header and list of plugins // -// { -// "cniVersion": "0.3.1", -// "name": "kindnet", -// "plugins": [ -// { -// -// }, -// { -// -// } -// ] -// } -// +// { +// "cniVersion": "0.3.1", +// "name": "kindnet", +// "plugins": [ +// { +// +// }, +// { +// +// } +// ] +// } // appendCNIConfig appends the consul-cni configuration to the main configuration file. func appendCNIConfig(consulCfg *config.CNIConfig, cfgFile string) error { // Read the config file and convert it to a map. diff --git a/control-plane/subcommand/install-cni/cniconfig_test.go b/control-plane/subcommand/install-cni/cniconfig_test.go index b6e2154adb..640b9d93cb 100644 --- a/control-plane/subcommand/install-cni/cniconfig_test.go +++ b/control-plane/subcommand/install-cni/cniconfig_test.go @@ -2,7 +2,7 @@ package installcni import ( "fmt" - "os" + "io/ioutil" "path/filepath" "testing" @@ -93,10 +93,10 @@ func TestConfListFromConfFile(t *testing.T) { actualFile, err := confListFileFromConfFile(tempCfgFile) require.NoError(t, err) - actual, err := os.ReadFile(actualFile) + actual, err := ioutil.ReadFile(actualFile) require.NoError(t, err) - expected, err := os.ReadFile(expectedCfgFile) + expected, err := ioutil.ReadFile(expectedCfgFile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) @@ -168,10 +168,10 @@ func TestAppendCNIConfig(t *testing.T) { err = appendCNIConfig(c.consulConfig, tempDestFile) require.NoError(t, err) - actual, err := os.ReadFile(tempDestFile) + actual, err := ioutil.ReadFile(tempDestFile) require.NoError(t, err) - expected, err := os.ReadFile(c.goldenFile) + expected, err := ioutil.ReadFile(c.goldenFile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) @@ -298,10 +298,10 @@ func TestRemoveCNIConfig(t *testing.T) { t.Fatal(err) } - actual, err := os.ReadFile(tempDestFile) + actual, err := ioutil.ReadFile(tempDestFile) require.NoError(t, err) - expected, err := os.ReadFile(c.cfgFile) + expected, err := ioutil.ReadFile(c.cfgFile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) diff --git a/control-plane/subcommand/install-cni/command_test.go b/control-plane/subcommand/install-cni/command_test.go index 5cb9bea91e..a7e97a4aa9 100644 --- a/control-plane/subcommand/install-cni/command_test.go +++ b/control-plane/subcommand/install-cni/command_test.go @@ -3,6 +3,7 @@ package installcni import ( "context" "fmt" + "io/ioutil" "os" "path/filepath" "testing" @@ -64,11 +65,11 @@ func TestRun_DirectoryWatcher(t *testing.T) { require.NoError(t, err) time.Sleep(50 * time.Millisecond) // The golden file contains the consul config. - expected, err := os.ReadFile(goldenFile) + expected, err := ioutil.ReadFile(goldenFile) require.NoError(t, err) // Get the name of the config file in the tempDir and read it. tempDestFile := filepath.Join(tempDir, configFile) - actual, err := os.ReadFile(tempDestFile) + actual, err := ioutil.ReadFile(tempDestFile) require.NoError(t, err) // Filewatcher should have detected a change and appended to the config file. Make sure // files match. @@ -81,7 +82,7 @@ func TestRun_DirectoryWatcher(t *testing.T) { require.NoError(t, err) time.Sleep(50 * time.Millisecond) // Re-read the config file so we can compare the updated config file. - actual, err = os.ReadFile(tempDestFile) + actual, err = ioutil.ReadFile(tempDestFile) require.NoError(t, err) // Filewatcher should have detected change, fixed and appended to the config file. Make sure // files match. diff --git a/control-plane/subcommand/install-cni/kubeconfig.go b/control-plane/subcommand/install-cni/kubeconfig.go index ca93759578..e611828e36 100644 --- a/control-plane/subcommand/install-cni/kubeconfig.go +++ b/control-plane/subcommand/install-cni/kubeconfig.go @@ -3,6 +3,7 @@ package installcni import ( "errors" "fmt" + "io/ioutil" "os" "path/filepath" @@ -119,7 +120,7 @@ func serviceAccountToken(tokenPath string) (string, error) { if _, err := os.Stat(tokenPath); errors.Is(err, os.ErrNotExist) { return "", fmt.Errorf("tokenPath does not exist: %w", err) } - token, err := os.ReadFile(tokenPath) + token, err := ioutil.ReadFile(tokenPath) if err != nil { return "", fmt.Errorf("could not read service account token: %w", err) } diff --git a/control-plane/subcommand/install-cni/kubeconfig_test.go b/control-plane/subcommand/install-cni/kubeconfig_test.go index 899ad3f600..22a7eae9b1 100644 --- a/control-plane/subcommand/install-cni/kubeconfig_test.go +++ b/control-plane/subcommand/install-cni/kubeconfig_test.go @@ -1,7 +1,7 @@ package installcni import ( - "os" + "io/ioutil" "path/filepath" "testing" @@ -39,7 +39,7 @@ func TestKubeConfigYaml(t *testing.T) { require.NoError(t, err) golden := filepath.Join("testdata", c.goldenFile) - expected, err := os.ReadFile(golden) + expected, err := ioutil.ReadFile(golden) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) diff --git a/control-plane/subcommand/partition-init/command.go b/control-plane/subcommand/partition-init/command.go index 7ca70b50a7..f539b4c62a 100644 --- a/control-plane/subcommand/partition-init/command.go +++ b/control-plane/subcommand/partition-init/command.go @@ -11,8 +11,9 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" + k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" ) @@ -20,8 +21,16 @@ import ( type Command struct { UI cli.Ui - flags *flag.FlagSet - consul *flags.ConsulFlags + flags *flag.FlagSet + k8s *k8sflags.K8SFlags + http *flags.HTTPFlags + + flagPartitionName string + + // Flags to configure Consul connection + flagServerAddresses []string + flagServerPort uint + flagUseHTTPS bool flagLogLevel string flagLogJSON bool @@ -36,11 +45,21 @@ type Command struct { once sync.Once help string + + providers map[string]discover.Provider } func (c *Command) init() { c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.StringVar(&c.flagPartitionName, "partition-name", "", "The name of the partition being created.") + + c.flags.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", + "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ + "At least one value is required.") + c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") + c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, + "Toggle for using HTTPS for all API calls to Consul.") c.flags.DurationVar(&c.flagTimeout, "timeout", 10*time.Minute, "How long we'll try to bootstrap Partitions for before timing out, e.g. 1ms, 2s, 3m") c.flags.StringVar(&c.flagLogLevel, "log-level", "info", @@ -49,8 +68,10 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") - c.consul = &flags.ConsulFlags{} - flags.Merge(c.flags, c.consul.Flags()) + c.k8s = &k8sflags.K8SFlags{} + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.k8s.Flags()) + flags.Merge(c.flags, c.http.Flags()) c.help = flags.Usage(help, c.flags) // Default retry to 1s. This is exposed for setting in tests. @@ -95,52 +116,45 @@ func (c *Command) Run(args []string) int { return 1 } - // Start Consul server Connection manager - serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() - serverConnMgrCfg.ServerWatchDisabled = true - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 - } - watcher, err := discovery.NewWatcher(c.ctx, serverConnMgrCfg, c.log.Named("consul-server-connection-manager")) + serverAddresses, err := common.GetResolvedServerAddresses(c.flagServerAddresses, c.providers, c.log) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + c.UI.Error(fmt.Sprintf("Unable to discover any Consul addresses from %q: %s", c.flagServerAddresses[0], err)) return 1 } - go watcher.Run() - defer watcher.Stop() - - state, err := watcher.State() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to get Consul server addresses from watcher: %s", err)) - return 1 + scheme := "http" + if c.flagUseHTTPS { + scheme = "https" } - - consulClient, err := consul.NewClientFromConnMgrState(c.consul.ConsulClientConfig(), state) + // For all of the next operations we'll need a Consul client. + serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) + cfg := api.DefaultConfig() + cfg.Address = serverAddr + cfg.Scheme = scheme + c.http.MergeOntoConfig(cfg) + consulClient, err := consul.NewClient(cfg, c.http.ConsulAPITimeout()) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul client: %s", err)) + c.UI.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) return 1 } - for { - partition, _, err := consulClient.Partitions().Read(c.ctx, c.consul.Partition, nil) + partition, _, err := consulClient.Partitions().Read(c.ctx, c.flagPartitionName, nil) // The API does not return an error if the Partition does not exist. It returns a nil Partition. if err != nil { - c.log.Error("Error reading Partition from Consul", "name", c.consul.Partition, "error", err.Error()) + c.log.Error("Error reading Partition from Consul", "name", c.flagPartitionName, "error", err.Error()) } else if partition == nil { // Retry Admin Partition creation until it succeeds, or we reach the command timeout. _, _, err = consulClient.Partitions().Create(c.ctx, &api.Partition{ - Name: c.consul.Partition, + Name: c.flagPartitionName, Description: "Created by Helm installation", }, nil) if err == nil { - c.log.Info("Successfully created Admin Partition", "name", c.consul.Partition) + c.log.Info("Successfully created Admin Partition", "name", c.flagPartitionName) return 0 } - c.log.Error("Error creating partition", "name", c.consul.Partition, "error", err.Error()) + c.log.Error("Error creating partition", "name", c.flagPartitionName, "error", err.Error()) } else { - c.log.Info("Admin Partition already exists", "name", c.consul.Partition) + c.log.Info("Admin Partition already exists", "name", c.flagPartitionName) return 0 } // Wait on either the retry duration (in which case we continue) or the @@ -150,28 +164,28 @@ func (c *Command) Run(args []string) int { case <-time.After(c.retryDuration): continue case <-c.ctx.Done(): - c.log.Error("Timed out attempting to create partition", "name", c.consul.Partition) + c.log.Error("Timed out attempting to create partition", "name", c.flagPartitionName) return 1 } } } func (c *Command) validateFlags() error { - if len(c.consul.Addresses) == 0 { - return errors.New("-addresses must be set") + if len(c.flagServerAddresses) == 0 { + return errors.New("-server-address must be set at least once") } - if c.consul.Partition == "" { - return errors.New("-partition must be set") + if c.flagPartitionName == "" { + return errors.New("-partition-name must be set") } - if c.consul.APITimeout <= 0 { - return errors.New("-api-timeout must be set to a value greater than 0") + if c.http.ConsulAPITimeout() <= 0 { + return errors.New("-consul-api-timeout must be set to a value greater than 0") } return nil } -const synopsis = "Initialize an Admin Partition in Consul." +const synopsis = "Initialize an Admin Partition on Consul." const help = ` Usage: consul-k8s-control-plane partition-init [options] diff --git a/control-plane/subcommand/partition-init/command_ent_test.go b/control-plane/subcommand/partition-init/command_ent_test.go index 5bb1868b39..1e833430f9 100644 --- a/control-plane/subcommand/partition-init/command_ent_test.go +++ b/control-plane/subcommand/partition-init/command_ent_test.go @@ -23,21 +23,22 @@ func TestRun_FlagValidation(t *testing.T) { }{ { flags: nil, - expErr: "addresses must be set", + expErr: "-server-address must be set at least once", }, { - flags: []string{"-addresses", "foo"}, - expErr: "-partition must be set", + flags: []string{"-server-address", "foo"}, + expErr: "-partition-name must be set", }, { flags: []string{ - "-addresses", "foo", "-partition", "bar", "-api-timeout", "0s"}, - expErr: "-api-timeout must be set to a value greater than 0", + "-server-address", "foo", "-partition-name", "bar"}, + expErr: "-consul-api-timeout must be set to a value greater than 0", }, { flags: []string{ - "-addresses", "foo", - "-partition", "bar", + "-server-address", "foo", + "-partition-name", "bar", + "-consul-api-timeout", "5s", "-log-level", "invalid", }, expErr: "unknown log level: invalid", @@ -74,10 +75,10 @@ func TestRun_PartitionCreate(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + "127.0.0.1", - "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], - "-partition", partitionName, + "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-partition-name", partitionName, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(args) @@ -113,10 +114,10 @@ func TestRun_PartitionExists(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + "127.0.0.1", - "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], - "-partition", partitionName, + "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-partition-name", partitionName, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(args) @@ -142,11 +143,11 @@ func TestRun_ExitsAfterTimeout(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + "127.0.0.1", - "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-partition-name", partitionName, "-timeout", "500ms", - "-partition", partitionName, + "-consul-api-timeout", "5s", } server.Stop() startTime := time.Now() @@ -158,3 +159,5 @@ func TestRun_ExitsAfterTimeout(t *testing.T) { // some buffer time required for the task to run and assignments to occur. require.WithinDuration(t, completeTime, startTime, 1*time.Second) } + +// TODO: Write tests with ACLs enabled diff --git a/control-plane/subcommand/server-acl-init/command.go b/control-plane/subcommand/server-acl-init/command.go index 698da2a25c..9f54feac20 100644 --- a/control-plane/subcommand/server-acl-init/command.go +++ b/control-plane/subcommand/server-acl-init/command.go @@ -5,23 +5,20 @@ import ( "errors" "flag" "fmt" - "net" "os" "regexp" "strings" "sync" "time" - "github.com/cenkalti/backoff" "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/subcommand" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-netaddrs" "github.com/mitchellh/cli" "github.com/mitchellh/mapstructure" "golang.org/x/text/cases" @@ -34,9 +31,8 @@ import ( type Command struct { UI cli.Ui - flags *flag.FlagSet - k8s *k8sflags.K8SFlags - consulFlags *flags.ConsulFlags + flags *flag.FlagSet + k8s *k8sflags.K8SFlags flagResourcePrefix string flagK8sNamespace string @@ -54,6 +50,8 @@ type Command struct { flagAuthMethodHost string flagBindingRuleSelector string + flagController bool + flagCreateEntLicenseToken bool flagSnapshotAgent bool @@ -65,13 +63,20 @@ type Command struct { flagAPIGatewayController bool // Flags to configure Consul connection. - flagServerPort uint + flagServerAddresses []string + flagServerPort uint + flagConsulCACert string + flagConsulTLSServerName string + flagUseHTTPS bool + flagConsulAPITimeout time.Duration // Flags for ACL replication. flagCreateACLReplicationToken bool flagACLReplicationTokenFile string // Flags to support partitions. + flagEnablePartitions bool // true if Admin Partitions are enabled + flagPartitionName string // name of the Admin Partition flagPartitionTokenFile string // Flags to support peering. @@ -100,8 +105,6 @@ type Command struct { clientset kubernetes.Interface - watcher consul.ServerConnectionManager - // ctx is cancelled when the command timeout is reached. ctx context.Context retryDuration time.Duration @@ -109,10 +112,10 @@ type Command struct { // log log hclog.Logger - state discovery.State - once sync.Once help string + + providers map[string]discover.Provider } func (c *Command) init() { @@ -143,6 +146,9 @@ func (c *Command) init() { c.flags.StringVar(&c.flagBindingRuleSelector, "acl-binding-rule-selector", "", "Selector string for connectInject ACL Binding Rule.") + c.flags.BoolVar(&c.flagController, "controller", false, + "Toggle for configuring ACL login for the controller.") + c.flags.BoolVar(&c.flagCreateEntLicenseToken, "create-enterprise-license-token", false, "Toggle for creating a token for the enterprise license job.") c.flags.BoolVar(&c.flagSnapshotAgent, "snapshot-agent", false, @@ -160,8 +166,21 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagAPIGatewayController, "api-gateway-controller", false, "Toggle for configuring ACL login for the API gateway controller.") + c.flags.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", + "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ + "At least one value is required.") c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") - + c.flags.StringVar(&c.flagConsulCACert, "consul-ca-cert", "", + "Path to the PEM-encoded CA certificate of the Consul cluster.") + c.flags.StringVar(&c.flagConsulTLSServerName, "consul-tls-server-name", "", + "The server name to set as the SNI header when sending HTTPS requests to Consul.") + c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, + "Toggle for using HTTPS for all API calls to Consul.") + + c.flags.BoolVar(&c.flagEnablePartitions, "enable-partitions", false, + "[Enterprise Only] Enables Admin Partitions") + c.flags.StringVar(&c.flagPartitionName, "partition", "", + "[Enterprise Only] Name of the Admin Partition") c.flags.StringVar(&c.flagPartitionTokenFile, "partition-token-file", "", "[Enterprise Only] Path to file containing ACL token to be used in non-default partitions.") @@ -206,10 +225,11 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") + c.flags.DurationVar(&c.flagConsulAPITimeout, "consul-api-timeout", 0, + "The time in seconds that the consul API client will wait for a response from the API before cancelling the request.") + c.k8s = &k8sflags.K8SFlags{} - c.consulFlags = &flags.ConsulFlags{} flags.Merge(c.flags, c.k8s.Flags()) - flags.Merge(c.flags, c.consulFlags.Flags()) c.help = flags.Usage(help, c.flags) // Default retry to 1s. This is exposed for setting in tests. @@ -294,16 +314,14 @@ func (c *Command) Run(args []string) int { } } - var ipAddrs []net.IPAddr - if err := backoff.Retry(func() error { - ipAddrs, err = netaddrs.IPAddrs(c.ctx, c.consulFlags.Addresses, c.log) - if err != nil { - c.log.Error("Error resolving IP Address", "err", err) - return err - } - return nil - }, exponentialBackoffWithMaxInterval()); err != nil { - c.UI.Error(err.Error()) + serverAddresses, err := common.GetResolvedServerAddresses(c.flagServerAddresses, c.providers, c.log) + if err != nil { + c.UI.Error(fmt.Sprintf("Unable to discover any Consul addresses from %q: %s", c.flagServerAddresses[0], err)) + return 1 + } + scheme := "http" + if c.flagUseHTTPS { + scheme = "https" } var bootstrapToken string @@ -330,44 +348,30 @@ func (c *Command) Run(args []string) int { } } - bootstrapToken, err = c.bootstrapServers(ipAddrs, bootstrapToken, bootTokenSecretName) + bootstrapToken, err = c.bootstrapServers(serverAddresses, bootstrapToken, bootTokenSecretName, scheme) if err != nil { c.log.Error(err.Error()) return 1 } } - // Start Consul server Connection manager - var watcher consul.ServerConnectionManager - serverConnMgrCfg, err := c.consulFlags.ConsulServerConnMgrConfig() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 - } - serverConnMgrCfg.Credentials.Type = discovery.CredentialsTypeStatic - serverConnMgrCfg.Credentials.Static = discovery.StaticTokenCredential{Token: bootstrapToken} - if c.watcher == nil { - watcher, err = discovery.NewWatcher(c.ctx, serverConnMgrCfg, c.log.Named("consul-server-connection-manager")) - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) - return 1 - } - } else { - watcher = c.watcher + // For all of the next operations we'll need a Consul client. + serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) + clientConfig := api.DefaultConfig() + clientConfig.Address = serverAddr + clientConfig.Scheme = scheme + clientConfig.Token = bootstrapToken + clientConfig.TLSConfig = api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, } - go watcher.Run() - defer watcher.Stop() - - c.state, err = watcher.State() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to get Consul server addresses from watcher: %s", err)) - return 1 + if c.flagEnablePartitions { + clientConfig.Partition = c.flagPartitionName } - - consulClient, err := consul.NewClientFromConnMgrState(c.consulFlags.ConsulClientConfig(), c.state) + consulClient, err := consul.NewClient(clientConfig, c.flagConsulAPITimeout) if err != nil { - c.log.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", c.state.Address, err)) + c.log.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) return 1 } consulDC, primaryDC, err := c.consulDatacenterList(consulClient) @@ -378,7 +382,7 @@ func (c *Command) Run(args []string) int { c.log.Info("Current datacenter", "datacenter", consulDC, "primaryDC", primaryDC) primary := consulDC == primaryDC - if c.consulFlags.Partition == consulDefaultPartition && primary { + if c.flagEnablePartitions && c.flagPartitionName == consulDefaultPartition && primary { // Partition token is local because only the Primary datacenter can have Admin Partitions. if c.flagPartitionTokenFile != "" { err = c.createACLWithSecretID("partitions", partitionRules, consulDC, primary, consulClient, partitionToken, true) @@ -478,11 +482,11 @@ func (c *Command) Run(args []string) int { // DNS lookups. The anonymous policy in the default partition needs to be updated in order to // support this use-case. Creating a separate anonymous token client that updates the anonymous // policy and token in the default partition ensures this works. - anonTokenConfig := c.consulFlags.ConsulClientConfig() - if c.consulFlags.Partition != "" { - anonTokenConfig.APIClientConfig.Partition = consulDefaultPartition + anonTokenConfig := clientConfig + if c.flagEnablePartitions { + anonTokenConfig.Partition = consulDefaultPartition } - anonTokenClient, err := consul.NewClientFromConnMgrState(anonTokenConfig, c.state) + anonTokenClient, err := consul.NewClient(anonTokenConfig, c.flagConsulAPITimeout) if err != nil { c.log.Error(err.Error()) return 1 @@ -541,13 +545,19 @@ func (c *Command) Run(args []string) int { serviceAccountName := c.withPrefix("connect-injector") componentAuthMethodName := localComponentAuthMethodName - // Create the connect-inject ACL Policy, Role and BindingRule but do not issue any ACLTokens or create Kube Secrets. - // ConnectInjector token must be global. This means secondary datacenters need - // a token that is known by the primary datacenters. - if !primary { - componentAuthMethodName = globalComponentAuthMethodName + // If namespaces are enabled, the policy and token need to be global + // to be allowed to create namespaces. + if c.flagEnableNamespaces { + // Create the connect-inject ACL Policy, Role and BindingRule but do not issue any ACLTokens or create Kube Secrets. + // ConnectInjector token must be global when namespaces are enabled. This means secondary datacenters need + // a token that is known by the primary datacenters. + if !primary { + componentAuthMethodName = globalComponentAuthMethodName + } + err = c.createACLPolicyRoleAndBindingRule("connect-inject", injectRules, consulDC, primaryDC, globalPolicy, primary, componentAuthMethodName, serviceAccountName, consulClient) + } else { + err = c.createACLPolicyRoleAndBindingRule("connect-inject", injectRules, consulDC, primaryDC, localPolicy, primary, componentAuthMethodName, serviceAccountName, consulClient) } - err = c.createACLPolicyRoleAndBindingRule("connect-inject", injectRules, consulDC, primaryDC, globalPolicy, primary, componentAuthMethodName, serviceAccountName, consulClient) if err != nil { c.log.Error(err.Error()) return 1 @@ -556,7 +566,7 @@ func (c *Command) Run(args []string) int { if c.flagCreateEntLicenseToken { var err error - if c.consulFlags.Partition != "" { + if c.flagEnablePartitions { err = c.createLocalACL("enterprise-license", entPartitionLicenseRules, consulDC, primary, consulClient) } else { err = c.createLocalACL("enterprise-license", entLicenseRules, consulDC, primary, consulClient) @@ -568,7 +578,7 @@ func (c *Command) Run(args []string) int { } if c.flagSnapshotAgent { - serviceAccountName := c.withPrefix("server") + serviceAccountName := c.withPrefix("snapshot-agent") if err := c.createACLPolicyRoleAndBindingRule("snapshot-agent", snapshotAgentRules, consulDC, primaryDC, localPolicy, primary, localComponentAuthMethodName, serviceAccountName, consulClient); err != nil { c.log.Error(err.Error()) return 1 @@ -671,20 +681,33 @@ func (c *Command) Run(args []string) int { } } + if c.flagController { + rules, err := c.controllerRules() + if err != nil { + c.log.Error("Error templating controller token rules", "err", err) + return 1 + } + + serviceAccountName := c.withPrefix("controller") + + // Create the controller ACL Policy, Role and BindingRule but do not issue any ACLTokens or create Kube Secrets. + // Controller token must be global because config entry writes all + // go to the primary datacenter. This means secondary datacenters need + // a token that is known by the primary datacenters. + authMethodName := localComponentAuthMethodName + if !primary { + authMethodName = globalComponentAuthMethodName + } + err = c.createACLPolicyRoleAndBindingRule("controller", rules, consulDC, primaryDC, globalPolicy, primary, authMethodName, serviceAccountName, consulClient) + if err != nil { + c.log.Error(err.Error()) + return 1 + } + } c.log.Info("server-acl-init completed successfully") return 0 } -// exponentialBackoffWithMaxInterval creates an exponential backoff but limits the -// maximum backoff to 10 seconds so that we don't find ourselves in a situation -// where we are waiting for minutes before retries. -func exponentialBackoffWithMaxInterval() *backoff.ExponentialBackOff { - backoff := backoff.NewExponentialBackOff() - backoff.MaxInterval = 10 * time.Second - backoff.Reset() - return backoff -} - // configureGlobalComponentAuthMethod sets up an AuthMethod in the primary datacenter, // that the Consul components will use to issue global ACL tokens with. func (c *Command) configureGlobalComponentAuthMethod(consulClient *api.Client, authMethodName, primaryDC string) error { @@ -795,7 +818,7 @@ func (c *Command) configureGateway(gatewayParams ConfigureGatewayParams, consulC // the words "ingress-gateway" or "terminating-gateway". We need to create unique names for tokens // across all gateway types and so must suffix with either `-ingress-gateway` of `-terminating-gateway`. serviceAccountName := c.withPrefix(name) - err = c.createACLPolicyRoleAndBindingRule(name, rules, + err = c.createACLPolicyRoleAndBindingRule(serviceAccountName, rules, gatewayParams.ConsulDC, gatewayParams.PrimaryDC, localPolicy, gatewayParams.Primary, gatewayParams.AuthMethodName, serviceAccountName, consulClient) if err != nil { @@ -929,8 +952,8 @@ func (c *Command) createAnonymousPolicy(isPrimary bool) bool { } func (c *Command) validateFlags() error { - if c.consulFlags.Addresses == "" { - return errors.New("-addresses must be set") + if len(c.flagServerAddresses) == 0 { + return errors.New("-server-address must be set at least once") } if c.flagResourcePrefix == "" { @@ -958,7 +981,14 @@ func (c *Command) validateFlags() error { ) } - if c.consulFlags.APITimeout <= 0 { + if c.flagEnablePartitions && c.flagPartitionName == "" { + return errors.New("-partition must be set if -enable-partitions is true") + } + if !c.flagEnablePartitions && c.flagPartitionName != "" { + return errors.New("-enable-partitions must be 'true' if -partition is set") + } + + if c.flagConsulAPITimeout <= 0 { return errors.New("-consul-api-timeout must be set to a value greater than 0") } diff --git a/control-plane/subcommand/server-acl-init/command_ent_test.go b/control-plane/subcommand/server-acl-init/command_ent_test.go index e31b787e4b..27c18f82a4 100644 --- a/control-plane/subcommand/server-acl-init/command_ent_test.go +++ b/control-plane/subcommand/server-acl-init/command_ent_test.go @@ -33,7 +33,9 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { for _, consulDestNamespace := range consulDestNamespaces { t.Run(consulDestNamespace, func(tt *testing.T) { k8s, testAgent := completeSetup(tt) + defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) + require := require.New(tt) ui := cli.NewMockUi() cmd := Command{ @@ -42,90 +44,91 @@ func TestRun_ConnectInject_SingleDestinationNamespace(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-connect-inject", + "-enable-partitions", "-partition=default", "-enable-namespaces", "-consul-inject-destination-namespace", consulDestNamespace, "-acl-binding-rule-selector=serviceaccount.name!=default", + "-consul-api-timeout=5s", } responseCode := cmd.Run(args) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Ensure there's only one auth method. namespaceQuery := &api.QueryOptions{ Namespace: consulDestNamespace, } methods, _, err := consul.ACL().AuthMethodList(namespaceQuery) - require.NoError(t, err) + require.NoError(err) if consulDestNamespace == "default" { // If the destination mamespace is default then AuthMethodList // will return the component-auth-method as well. - require.Len(t, methods, 2) + require.Len(methods, 2) } else { - require.Len(t, methods, 1) + require.Len(methods, 1) } // Check the ACL auth method is created in the expected namespace. authMethodName := resourcePrefix + "-k8s-auth-method" actMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, namespaceQuery) - require.NoError(t, err) - require.NotNil(t, actMethod) - require.Equal(t, "kubernetes", actMethod.Type) - require.Equal(t, "Kubernetes Auth Method", actMethod.Description) - require.NotContains(t, actMethod.Config, "MapNamespaces") - require.NotContains(t, actMethod.Config, "ConsulNamespacePrefix") + require.NoError(err) + require.NotNil(actMethod) + require.Equal("kubernetes", actMethod.Type) + require.Equal("Kubernetes Auth Method", actMethod.Description) + require.NotContains(actMethod.Config, "MapNamespaces") + require.NotContains(actMethod.Config, "ConsulNamespacePrefix") // Check the binding rule is as expected. rules, _, err := consul.ACL().BindingRuleList(authMethodName, namespaceQuery) - require.NoError(t, err) - require.Len(t, rules, 1) + require.NoError(err) + require.Len(rules, 1) actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, namespaceQuery) - require.NoError(t, err) - require.NotNil(t, actRule) - require.Equal(t, "Kubernetes binding rule", actRule.Description) - require.Equal(t, api.BindingRuleBindTypeService, actRule.BindType) - require.Equal(t, "${serviceaccount.name}", actRule.BindName) - require.Equal(t, "serviceaccount.name!=default", actRule.Selector) + require.NoError(err) + require.NotNil(actRule) + require.Equal("Kubernetes binding rule", actRule.Description) + require.Equal(api.BindingRuleBindTypeService, actRule.BindType) + require.Equal("${serviceaccount.name}", actRule.BindName) + require.Equal("serviceaccount.name!=default", actRule.Selector) // Check that the default namespace got an attached ACL policy defNamespace, _, err := consul.Namespaces().Read("default", &api.QueryOptions{}) - require.NoError(t, err) - require.NotNil(t, defNamespace) - require.NotNil(t, defNamespace.ACLs) - require.Len(t, defNamespace.ACLs.PolicyDefaults, 1) - require.Equal(t, "cross-namespace-policy", defNamespace.ACLs.PolicyDefaults[0].Name) + require.NoError(err) + require.NotNil(defNamespace) + require.NotNil(defNamespace.ACLs) + require.Len(defNamespace.ACLs.PolicyDefaults, 1) + require.Equal("cross-namespace-policy", defNamespace.ACLs.PolicyDefaults[0].Name) if consulDestNamespace != "default" { // Check that only one namespace was created besides the // already existing `default` namespace namespaces, _, err := consul.Namespaces().List(&api.QueryOptions{}) - require.NoError(t, err) - require.Len(t, namespaces, 2) + require.NoError(err) + require.Len(namespaces, 2) // Check the created namespace properties actNamespace, _, err := consul.Namespaces().Read(consulDestNamespace, &api.QueryOptions{}) - require.NoError(t, err) - require.NotNil(t, actNamespace) - require.Equal(t, consulDestNamespace, actNamespace.Name) - require.Equal(t, "Auto-generated by consul-k8s", actNamespace.Description) - require.NotNil(t, actNamespace.ACLs) - require.Len(t, actNamespace.ACLs.PolicyDefaults, 1) - require.Equal(t, "cross-namespace-policy", actNamespace.ACLs.PolicyDefaults[0].Name) - require.Contains(t, actNamespace.Meta, "external-source") - require.Equal(t, "kubernetes", actNamespace.Meta["external-source"]) + require.NoError(err) + require.NotNil(actNamespace) + require.Equal(consulDestNamespace, actNamespace.Name) + require.Equal("Auto-generated by consul-k8s", actNamespace.Description) + require.NotNil(actNamespace.ACLs) + require.Len(actNamespace.ACLs.PolicyDefaults, 1) + require.Equal("cross-namespace-policy", actNamespace.ACLs.PolicyDefaults[0].Name) + require.Contains(actNamespace.Meta, "external-source") + require.Equal("kubernetes", actNamespace.Meta["external-source"]) } }) } @@ -159,7 +162,9 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { k8s, testAgent := completeSetup(tt) + defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) + require := require.New(tt) ui := cli.NewMockUi() cmd := Command{ @@ -168,52 +173,53 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-connect-inject", + "-enable-partitions", "-partition=default", "-enable-namespaces", "-enable-inject-k8s-namespace-mirroring", "-inject-k8s-namespace-mirroring-prefix", c.MirroringPrefix, "-acl-binding-rule-selector=serviceaccount.name!=default", + "-consul-api-timeout=5s", } args = append(args, c.ExtraFlags...) responseCode := cmd.Run(args) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) bootToken := getBootToken(tt, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Check the ACL auth method is as expected. authMethodName := resourcePrefix + "-k8s-auth-method" method, _, err := consul.ACL().AuthMethodRead(authMethodName, nil) - require.NoError(t, err) - require.NotNil(t, method, authMethodName+" not found") - require.Equal(t, "kubernetes", method.Type) - require.Equal(t, "Kubernetes Auth Method", method.Description) - require.Contains(t, method.Config, "MapNamespaces") - require.Contains(t, method.Config, "ConsulNamespacePrefix") - require.Equal(t, true, method.Config["MapNamespaces"]) - require.Equal(t, c.MirroringPrefix, method.Config["ConsulNamespacePrefix"]) + require.NoError(err) + require.NotNil(method, authMethodName+" not found") + require.Equal("kubernetes", method.Type) + require.Equal("Kubernetes Auth Method", method.Description) + require.Contains(method.Config, "MapNamespaces") + require.Contains(method.Config, "ConsulNamespacePrefix") + require.Equal(true, method.Config["MapNamespaces"]) + require.Equal(c.MirroringPrefix, method.Config["ConsulNamespacePrefix"]) // Check the binding rule is as expected. rules, _, err := consul.ACL().BindingRuleList(authMethodName, nil) - require.NoError(t, err) - require.Len(t, rules, 1) + require.NoError(err) + require.Len(rules, 1) actRule, _, err := consul.ACL().BindingRuleRead(rules[0].ID, nil) - require.NoError(t, err) - require.NotNil(t, actRule) - require.Equal(t, "Kubernetes binding rule", actRule.Description) - require.Equal(t, api.BindingRuleBindTypeService, actRule.BindType) - require.Equal(t, "${serviceaccount.name}", actRule.BindName) - require.Equal(t, "serviceaccount.name!=default", actRule.Selector) + require.NoError(err) + require.NotNil(actRule) + require.Equal("Kubernetes binding rule", actRule.Description) + require.Equal(api.BindingRuleBindTypeService, actRule.BindType) + require.Equal("${serviceaccount.name}", actRule.BindName) + require.Equal("serviceaccount.name!=default", actRule.Selector) }) } } @@ -223,7 +229,8 @@ func TestRun_ConnectInject_NamespaceMirroring(t *testing.T) { func TestRun_AnonymousToken_CreatedFromNonDefaultPartition(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - server := partitionedSetup(t, bootToken, "test") + server, stopFn := partitionedSetup(t, bootToken, "test") + defer stopFn() k8s := fake.NewSimpleClientset() setUpK8sServiceAccount(t, k8s, ns) @@ -234,15 +241,16 @@ func TestRun_AnonymousToken_CreatedFromNonDefaultPartition(t *testing.T) { } cmd.init() args := []string{ - "-addresses=" + strings.Split(server.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-bootstrap-token-file", tokenFile, + "-enable-partitions", "-allow-dns", "-partition=test", "-enable-namespaces", + "-consul-api-timeout=5s", } responseCode := cmd.Run(args) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -274,12 +282,13 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { t.Run(k8sNamespaceFlag, func(t *testing.T) { k8s, testAgent := completeSetup(t) setUpK8sServiceAccount(t, k8s, k8sNamespaceFlag) + defer testAgent.Stop() + require := require.New(t) ui := cli.NewMockUi() firstRunArgs := []string{ - "-addresses=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace", k8sNamespaceFlag, "-client", @@ -293,10 +302,13 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "-ingress-gateway-name=anotherigw", "-terminating-gateway-name=tgw", "-terminating-gateway-name=anothertgw", + "-controller", + "-consul-api-timeout=5s", } // Our second run, we're going to update from partitions and namespaces disabled to // namespaces enabled with a single destination ns and partitions enabled. secondRunArgs := append(firstRunArgs, + "-enable-partitions", "-partition=default", "-enable-namespaces", "-consul-sync-destination-namespace=sync", @@ -308,14 +320,14 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { clientset: k8s, } responseCode := cmd.Run(firstRunArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) bootToken := getBootToken(t, k8s, resourcePrefix, k8sNamespaceFlag) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Check that the expected policies were created. firstRunExpectedPolicies := []string{ @@ -325,36 +337,37 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "mesh-gateway-policy", "snapshot-agent-policy", "enterprise-license-token", - "igw-policy", - "anotherigw-policy", - "tgw-policy", - "anothertgw-policy", + resourcePrefix + "-igw-policy", + resourcePrefix + "-anotherigw-policy", + resourcePrefix + "-tgw-policy", + resourcePrefix + "-anothertgw-policy", "connect-inject-policy", + "controller-policy", } policies, _, err := consul.ACL().PolicyList(nil) - require.NoError(t, err) + require.NoError(err) // Check that we have the right number of policies. The actual // policies will have two more than expected because of the // global management and namespace management polices that // are automatically created, the latter in consul-ent v1.7+. - require.Equal(t, len(firstRunExpectedPolicies), len(policies)-2) + require.Equal(len(firstRunExpectedPolicies), len(policies)-2) // Collect the actual policies into a map to make it easier to assert // on their existence and contents. actualPolicies := make(map[string]string) for _, p := range policies { policy, _, err := consul.ACL().PolicyRead(p.ID, nil) - require.NoError(t, err) + require.NoError(err) actualPolicies[p.Name] = policy.Rules } for _, expected := range firstRunExpectedPolicies { actRules, ok := actualPolicies[expected] - require.True(t, ok, "Did not find policy %s", expected) + require.True(ok, "Did not find policy %s", expected) // We assert that the policy doesn't have any namespace config // in it because later that's what we're using to test that it // got updated. - require.NotContains(t, actRules, "namespace") + require.NotContains(actRules, "namespace") } // Re-run the command with namespace flags. The policies should be updated. @@ -365,7 +378,7 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { clientset: k8s, } responseCode = cmd.Run(secondRunArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Check that the policies have all been updated. secondRunExpectedPolicies := []string{ @@ -377,49 +390,50 @@ func TestRun_ACLPolicyUpdates(t *testing.T) { "snapshot-agent-policy", "enterprise-license-token", "cross-namespace-policy", - "igw-policy", - "anotherigw-policy", - "tgw-policy", - "anothertgw-policy", + resourcePrefix + "-igw-policy", + resourcePrefix + "-anotherigw-policy", + resourcePrefix + "-tgw-policy", + resourcePrefix + "-anothertgw-policy", + "controller-policy", "partitions-token", } policies, _, err = consul.ACL().PolicyList(nil) - require.NoError(t, err) + require.NoError(err) // Check that we have the right number of policies. The actual // policies will have two more than expected because of the // global management and namespace management polices that // are automatically created, the latter in consul-ent v1.7+. - require.Equal(t, len(secondRunExpectedPolicies), len(policies)-2) + require.Equal(len(secondRunExpectedPolicies), len(policies)-2) // Collect the actual policies into a map to make it easier to assert // on their existence and contents. actualPolicies = make(map[string]string) for _, p := range policies { policy, _, err := consul.ACL().PolicyRead(p.ID, nil) - require.NoError(t, err) + require.NoError(err) actualPolicies[p.Name] = policy.Rules } for _, expected := range secondRunExpectedPolicies { actRules, ok := actualPolicies[expected] - require.True(t, ok, "Did not find policy %s", expected) + require.True(ok, "Did not find policy %s", expected) switch expected { case "connect-inject-policy": // The connect inject token doesn't have namespace config, // but does change to operator:write from an empty string. - require.Contains(t, actRules, "policy = \"write\"") + require.Contains(actRules, "policy = \"write\"") case "snapshot-agent-policy", "enterprise-license-token": // The snapshot agent and enterprise license tokens shouldn't change. - require.NotContains(t, actRules, "namespace") - require.Contains(t, actRules, "acl = \"write\"") + require.NotContains(actRules, "namespace") + require.Contains(actRules, "acl = \"write\"") case "partitions-token": - require.Contains(t, actRules, "operator = \"write\"") + require.Contains(actRules, "operator = \"write\"") default: // Assert that the policies have the word namespace in them. This // tests that they were updated. The actual contents are tested // in rules_test.go. - require.Contains(t, actRules, "namespace") + require.Contains(actRules, "namespace") } } }) @@ -580,18 +594,21 @@ func TestRun_ConnectInject_Updates(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { + require := require.New(tt) k8s, testAgent := completeSetup(tt) + defer testAgent.Stop() setUpK8sServiceAccount(tt, k8s, ns) ui := cli.NewMockUi() defaultArgs := []string{ - "-addresses=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port=" + strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port=" + strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address=" + strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port=" + strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, + "-enable-partitions", "-partition=default", "-connect-inject", + "-consul-api-timeout=5s", } // First run. NOTE: we don't assert anything here since we've @@ -602,7 +619,7 @@ func TestRun_ConnectInject_Updates(t *testing.T) { clientset: k8s, } responseCode := cmd.Run(append(defaultArgs, c.FirstRunArgs...)) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Second run. // NOTE: We're redefining the command so that the old flag values are @@ -612,39 +629,39 @@ func TestRun_ConnectInject_Updates(t *testing.T) { clientset: k8s, } responseCode = cmd.Run(append(defaultArgs, c.SecondRunArgs...)) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Now check that everything is as expected. bootToken := getBootToken(tt, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Check the ACL auth method is as expected. authMethodName := resourcePrefix + "-k8s-auth-method" method, _, err := consul.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{ Namespace: c.AuthMethodExpectedNS, }) - require.NoError(t, err) - require.NotNil(t, method, authMethodName+" not found") + require.NoError(err) + require.NotNil(method, authMethodName+" not found") if c.AuthMethodExpectMapNamespacesConfig { - require.Contains(t, method.Config, "MapNamespaces") - require.Contains(t, method.Config, "ConsulNamespacePrefix") - require.Equal(t, true, method.Config["MapNamespaces"]) - require.Equal(t, c.AuthMethodExpectedNamespacePrefixConfig, method.Config["ConsulNamespacePrefix"]) + require.Contains(method.Config, "MapNamespaces") + require.Contains(method.Config, "ConsulNamespacePrefix") + require.Equal(true, method.Config["MapNamespaces"]) + require.Equal(c.AuthMethodExpectedNamespacePrefixConfig, method.Config["ConsulNamespacePrefix"]) } else { - require.NotContains(t, method.Config, "MapNamespaces") - require.NotContains(t, method.Config, "ConsulNamespacePrefix") + require.NotContains(method.Config, "MapNamespaces") + require.NotContains(method.Config, "ConsulNamespacePrefix") } // Check the binding rule is as expected. rules, _, err := consul.ACL().BindingRuleList(authMethodName, &api.QueryOptions{ Namespace: c.BindingRuleExpectedNS, }) - require.NoError(t, err) - require.Len(t, rules, 1) + require.NoError(err) + require.Len(rules, 1) }) } } @@ -675,7 +692,7 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { LocalToken: false, }, "partitions token": { - TokenFlags: []string{"-partition=default"}, + TokenFlags: []string{"-enable-partitions", "-partition=default"}, PolicyNames: []string{"partitions-token"}, PolicyDCs: []string{"dc1"}, SecretNames: []string{resourcePrefix + "-partitions-acl-token"}, @@ -686,6 +703,8 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { t.Run(testName, func(t *testing.T) { k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() + require := require.New(t) // Run the command. ui := cli.NewMockUi() @@ -695,41 +714,42 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { } cmd.init() cmdArgs := append([]string{ - "-addresses", strings.Split(testSvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testSvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testSvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, + "-enable-partitions", "-partition=default", "-enable-namespaces", + "-consul-api-timeout=5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testSvr.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) // Check that the expected policy was created. for i := range c.PolicyNames { policy := policyExists(t, c.PolicyNames[i], consul) - require.Equal(t, c.PolicyDCs, policy.Datacenters) + require.Equal(c.PolicyDCs, policy.Datacenters) // Test that the token was created as a Kubernetes Secret. tokenSecret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), c.SecretNames[i], metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, tokenSecret) + require.NoError(err) + require.NotNil(tokenSecret) token, ok := tokenSecret.Data["token"] - require.True(t, ok) + require.True(ok) // Test that the token has the expected policies in Consul. tokenData, _, err := consul.ACL().TokenReadSelf(&api.QueryOptions{Token: string(token)}) - require.NoError(t, err) - require.Equal(t, c.PolicyNames[i], tokenData.Policies[0].Name) - require.Equal(t, c.LocalToken, tokenData.Local) + require.NoError(err) + require.Equal(c.PolicyNames[i], tokenData.Policies[0].Name) + require.Equal(c.LocalToken, tokenData.Local) } // Test that if the same command is run again, it doesn't error. @@ -741,7 +761,7 @@ func TestRun_TokensWithNamespacesEnabled(t *testing.T) { } cmd.init() responseCode := cmd.Run(cmdArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) }) }) } @@ -762,9 +782,9 @@ func TestRun_GatewayNamespaceParsing(t *testing.T) { TokenFlags: []string{"-ingress-gateway-name=ingress", "-ingress-gateway-name=gateway", "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -811,9 +831,9 @@ partition "default" { TokenFlags: []string{"-ingress-gateway-name=ingress.", "-ingress-gateway-name=gateway.namespace1", "-ingress-gateway-name=another-gateway.namespace2"}, - PolicyNames: []string{"ingress-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -860,9 +880,9 @@ partition "default" { TokenFlags: []string{"-terminating-gateway-name=terminating", "-terminating-gateway-name=gateway", "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -900,9 +920,9 @@ partition "default" { TokenFlags: []string{"-terminating-gateway-name=terminating.", "-terminating-gateway-name=gateway.namespace1", "-terminating-gateway-name=another-gateway.namespace2"}, - PolicyNames: []string{"terminating-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, ExpectedPolicies: []string{` partition "default" { namespace "default" { @@ -939,7 +959,9 @@ partition "default" { for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) + require := require.New(t) // Run the command. ui := cli.NewMockUi() @@ -950,31 +972,32 @@ partition "default" { cmd.init() cmdArgs := append([]string{ "-k8s-namespace=" + ns, - "-addresses", strings.Split(testSvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testSvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testSvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-enable-namespaces=true", + "-enable-partitions", "-partition=default", + "-consul-api-timeout=5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testSvr.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: bootToken, }) - require.NoError(t, err) + require.NoError(err) for i := range c.PolicyNames { policy := policyExists(t, c.PolicyNames[i], consul) fullPolicy, _, err := consul.ACL().PolicyRead(policy.ID, nil) - require.NoError(t, err) - require.Equal(t, c.ExpectedPolicies[i], fullPolicy.Rules) + require.NoError(err) + require.Equal(c.ExpectedPolicies[i], fullPolicy.Rules) } // Test that if the same command is run again, it doesn't error. @@ -986,7 +1009,7 @@ partition "default" { } cmd.init() responseCode := cmd.Run(cmdArgs) - require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + require.Equal(0, responseCode, ui.ErrorWriter.String()) }) }) } @@ -1008,7 +1031,7 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { ComponentName: "connect-injector", TokenFlags: []string{"-connect-inject"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, Namespace: ns, GlobalToken: false, }, @@ -1026,6 +1049,7 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_PrimaryDatacenter(t *testing.T) serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) k8s, testSvr := completeSetup(t) + defer testSvr.Stop() _, jwtToken := setUpK8sServiceAccount(t, k8s, c.Namespace) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1054,16 +1078,16 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_PrimaryDatacenter(t *testing.T) "-enable-namespaces", "-consul-inject-destination-namespace", c.Namespace, "-auth-method-host=" + k8sMockServer.URL, - "-addresses", strings.Split(testSvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testSvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testSvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout=5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) client, err := api.NewClient(&api.Config{ - Address: testSvr.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, }) require.NoError(t, err) @@ -1099,7 +1123,7 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_SecondaryDatacenter(t *testing. { ComponentName: "connect-injector", TokenFlags: []string{"-connect-inject"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role-dc2"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-dc2"}, Namespace: ns, GlobalToken: true, }, @@ -1118,7 +1142,8 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_SecondaryDatacenter(t *testing. authMethodName := fmt.Sprintf("%s-%s-%s", resourcePrefix, componentAuthMethod, "dc2") serviceAccountName := fmt.Sprintf("%s-%s", resourcePrefix, c.ComponentName) - k8s, _, consulHTTPAddr, consulGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, _, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) + defer cleanup() _, jwtToken := setUpK8sServiceAccount(t, k8s, c.Namespace) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1149,9 +1174,9 @@ func TestRun_NamespaceEnabled_ValidateLoginToken_SecondaryDatacenter(t *testing. "-consul-inject-destination-namespace", c.Namespace, "-acl-replication-token-file", tokenFile, "-auth-method-host=" + k8sMockServer.URL, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-consul-api-timeout=5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) @@ -1186,12 +1211,13 @@ func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { t.Parallel() k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) partitionToken := "123e4567-e89b-12d3-a456-426614174000" partitionTokenFile, err := os.CreateTemp("", "partitiontoken") require.NoError(t, err) - defer os.RemoveAll(partitionTokenFile.Name()) + defer os.Remove(partitionTokenFile.Name()) partitionTokenFile.WriteString(partitionToken) // Run the command. @@ -1204,12 +1230,13 @@ func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { cmdArgs := []string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testSvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testSvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testSvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-enable-partitions", "-partition=default", "-partition-token-file", partitionTokenFile.Name(), + "-consul-api-timeout=5s", } responseCode := cmd.Run(cmdArgs) @@ -1217,7 +1244,7 @@ func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { // Check that this token is created. consul, err := api.NewClient(&api.Config{ - Address: testSvr.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: partitionToken, }) require.NoError(t, err) @@ -1250,18 +1277,35 @@ func TestRun_PartitionTokenDefaultPartition_WithProvidedSecretID(t *testing.T) { // a client in the provided partitionName. The bootToken is the token used as the bootstrap token // for both the client and the server. The helper creates a server, then creates a partition with // the provided partitionName and then creates a client in said partition. -func partitionedSetup(t *testing.T, bootToken string, partitionName string) *testutil.TestServer { - server := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { +func partitionedSetup(t *testing.T, bootToken string, partitionName string) (*testutil.TestServer, func()) { + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.ACL.Tokens.InitialManagement = bootToken }) + require.NoError(t, err) + server.WaitForLeader(t) - server.Cfg.APIClientConfig.Token = bootToken - serverAPIClient, err := consul.NewClient(server.Cfg.APIClientConfig, 5*time.Second) + serverAPIClient, err := consul.NewClient(&api.Config{ + Address: server.HTTPAddr, + Token: bootToken, + }, 5*time.Second) require.NoError(t, err) _, _, err = serverAPIClient.Partitions().Create(context.Background(), &api.Partition{Name: partitionName}, &api.WriteOptions{}) require.NoError(t, err) - return server.TestServer + partitionedClient, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.Server = false + c.Bootstrap = false + c.Partition = partitionName + c.RetryJoin = []string{server.LANAddr} + c.ACL.Enabled = true + c.ACL.Tokens.Agent = bootToken + }) + require.NoError(t, err) + + return server, func() { + server.Stop() + partitionedClient.Stop() + } } diff --git a/control-plane/subcommand/server-acl-init/command_test.go b/control-plane/subcommand/server-acl-init/command_test.go index 3111f58820..83fa50b3b7 100644 --- a/control-plane/subcommand/server-acl-init/command_test.go +++ b/control-plane/subcommand/server-acl-init/command_test.go @@ -16,14 +16,17 @@ import ( "time" "github.com/hashicorp/consul-k8s/control-plane/helper/cert" + "github.com/hashicorp/consul-k8s/control-plane/helper/go-discover/mocks" "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -47,29 +50,38 @@ func TestRun_FlagValidation(t *testing.T) { }{ { Flags: []string{}, - ExpErr: "-addresses must be set", + ExpErr: "-server-address must be set at least once", }, { - Flags: []string{"-addresses=localhost"}, + Flags: []string{"-server-address=localhost"}, ExpErr: "-resource-prefix must be set", }, { Flags: []string{ "-acl-replication-token-file=/notexist", - "-addresses=localhost", + "-server-address=localhost", "-resource-prefix=prefix"}, + ExpErr: "-consul-api-timeout must be set to a value greater than 0", + }, + { + Flags: []string{ + "-acl-replication-token-file=/notexist", + "-server-address=localhost", + "-resource-prefix=prefix", + "-consul-api-timeout=5s"}, ExpErr: "unable to read token from file \"/notexist\": open /notexist: no such file or directory", }, { Flags: []string{ "-bootstrap-token-file=/notexist", - "-addresses=localhost", - "-resource-prefix=prefix"}, + "-server-address=localhost", + "-resource-prefix=prefix", + "-consul-api-timeout=5s"}, ExpErr: "unable to read token from file \"/notexist\": open /notexist: no such file or directory", }, { Flags: []string{ - "-addresses=localhost", + "-server-address=localhost", "-resource-prefix=prefix", "-sync-consul-node-name=Speci@l_Chars", }, @@ -78,7 +90,7 @@ func TestRun_FlagValidation(t *testing.T) { }, { Flags: []string{ - "-addresses=localhost", + "-server-address=localhost", "-resource-prefix=prefix", "-sync-consul-node-name=5r9OPGfSRXUdGzNjBdAwmhCBrzHDNYs4XjZVR4wp7lSLIzqwS0ta51nBLIN0TMPV-too-long", }, @@ -106,7 +118,8 @@ func TestRun_FlagValidation(t *testing.T) { func TestRun_Defaults(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -118,10 +131,10 @@ func TestRun_Defaults(t *testing.T) { args := []string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(args) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -130,8 +143,10 @@ func TestRun_Defaults(t *testing.T) { bootToken := getBootToken(t, k8s, resourcePrefix, ns) // Check that it has the right policies. - testClient.Cfg.APIClientConfig.Token = bootToken - consul, err := api.NewClient(testClient.Cfg.APIClientConfig) + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) require.NoError(t, err) tokenData, _, err := consul.ACL().TokenReadSelf(nil) require.NoError(t, err) @@ -181,8 +196,9 @@ func TestRun_TokensPrimaryDC(t *testing.T) { } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() // Run the command. ui := cli.NewMockUi() @@ -194,10 +210,10 @@ func TestRun_TokensPrimaryDC(t *testing.T) { cmdArgs := append([]string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) @@ -205,9 +221,10 @@ func TestRun_TokensPrimaryDC(t *testing.T) { // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) - testClient.Cfg.APIClientConfig.Token = bootToken - consul, err := api.NewClient(testClient.Cfg.APIClientConfig) - require.NoError(t, err) + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) require.NoError(t, err) for i := range c.PolicyNames { @@ -246,13 +263,14 @@ func TestRun_TokensPrimaryDC(t *testing.T) { func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) replicationToken := "123e4567-e89b-12d3-a456-426614174000" replicationTokenFile, err := os.CreateTemp("", "replicationtoken") require.NoError(t, err) - defer os.RemoveAll(replicationTokenFile.Name()) + defer os.Remove(replicationTokenFile.Name()) replicationTokenFile.WriteString(replicationToken) // Run the command. @@ -265,12 +283,12 @@ func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { cmdArgs := []string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-create-acl-replication-token", "-acl-replication-token-file", replicationTokenFile.Name(), + "-consul-api-timeout", "5s", } responseCode := cmd.Run(cmdArgs) @@ -278,7 +296,7 @@ func TestRun_ReplicationTokenPrimaryDC_WithProvidedSecretID(t *testing.T) { // Check that this token is created. consul, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: replicationToken, }) require.NoError(t, err) @@ -333,8 +351,9 @@ func TestRun_TokensReplicatedDC(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - k8s, consul, secondaryAddr, secondaryGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, consul, secondaryAddr, cleanup := mockReplicatedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() // Run the command. ui := cli.NewMockUi() @@ -348,10 +367,10 @@ func TestRun_TokensReplicatedDC(t *testing.T) { "-timeout=1m", "-k8s-namespace=" + ns, "-acl-replication-token-file", tokenFile, - "-addresses", strings.Split(secondaryAddr, ":")[0], - "-http-port", strings.Split(secondaryAddr, ":")[1], - "-grpc-port", strings.Split(secondaryGRPCAddr, ":")[1], + "-server-address", strings.Split(secondaryAddr, ":")[0], + "-server-port", strings.Split(secondaryAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) @@ -411,6 +430,7 @@ func TestRun_TokensWithProvidedBootstrapToken(t *testing.T) { k8s, testAgent := completeBootstrappedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer testAgent.Stop() // Run the command. ui := cli.NewMockUi() @@ -422,17 +442,17 @@ func TestRun_TokensWithProvidedBootstrapToken(t *testing.T) { "-timeout=1m", "-k8s-namespace", ns, "-bootstrap-token-file", tokenFile, - "-addresses", strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix", resourcePrefix, + "-consul-api-timeout", "5s", }, c.TokenFlags...) responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -495,12 +515,13 @@ func TestRun_AnonymousTokenPolicy(t *testing.T) { flags := c.Flags var k8s *fake.Clientset var consulHTTPAddr string - var consulGRPCAddr string var consul *api.Client if c.SecondaryDC { + var cleanup func() bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" - k8s, consul, consulHTTPAddr, consulGRPCAddr = mockReplicatedSetup(t, bootToken) + k8s, consul, consulHTTPAddr, cleanup = mockReplicatedSetup(t, bootToken) + defer cleanup() tmp, err := os.CreateTemp("", "") require.NoError(t, err) @@ -508,10 +529,10 @@ func TestRun_AnonymousTokenPolicy(t *testing.T) { require.NoError(t, err) flags = append(flags, "-acl-replication-token-file", tmp.Name()) } else { - var testClient *test.TestServerClient - k8s, testClient = completeSetup(t) - consulHTTPAddr = testClient.TestServer.HTTPAddr - consulGRPCAddr = testClient.TestServer.GRPCAddr + var testSvr *testutil.TestServer + k8s, testSvr = completeSetup(t) + defer testSvr.Stop() + consulHTTPAddr = testSvr.HTTPAddr } setUpK8sServiceAccount(t, k8s, ns) @@ -526,10 +547,9 @@ func TestRun_AnonymousTokenPolicy(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-auth-method-host=https://my-kube.com", - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, flags...) responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -602,7 +622,8 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { for testName, c := range cases { t.Run(testName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -617,10 +638,10 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-acl-binding-rule-selector=" + bindingRuleSelector, + "-consul-api-timeout", "5s", } cmdArgs = append(cmdArgs, c.flags...) responseCode := cmd.Run(cmdArgs) @@ -628,8 +649,10 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { // Check that the auth method was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) - consul := testClient.APIClient - + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + }) + require.NoError(t, err) authMethodName := resourcePrefix + "-k8s-auth-method" authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{Token: bootToken}) @@ -669,7 +692,8 @@ func TestRun_ConnectInjectAuthMethod(t *testing.T) { func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() caCert, jwtToken := setUpK8sServiceAccount(t, k8s, ns) ui := cli.NewMockUi() @@ -685,18 +709,20 @@ func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-connect-inject", "-acl-binding-rule-selector=" + bindingRuleSelector, + "-consul-api-timeout", "5s", }) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) // Check that the auth method was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) - consul := testClient.APIClient - + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + }) + require.NoError(t, err) authMethodName := resourcePrefix + "-k8s-auth-method" authMethod, _, err := consul.ACL().AuthMethodRead(authMethodName, &api.QueryOptions{Token: bootToken}) @@ -727,11 +753,12 @@ func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-acl-binding-rule-selector=" + bindingRuleSelector, "-connect-inject", "-auth-method-host=" + kubernetesHost, + "-consul-api-timeout", "5s", }) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -750,11 +777,12 @@ func TestRun_ConnectInjectAuthMethodUpdates(t *testing.T) { // Test that ACL binding rules are updated if the rule selector changes. func TestRun_BindingRuleUpdates(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() consul, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, }) require.NoError(t, err) @@ -762,10 +790,10 @@ func TestRun_BindingRuleUpdates(t *testing.T) { commonArgs := []string{ "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-connect-inject", + "-consul-api-timeout", "5s", } firstRunArgs := append(commonArgs, "-acl-binding-rule-selector=serviceaccount.name!=default", @@ -829,17 +857,18 @@ func TestRun_BindingRuleUpdates(t *testing.T) { // Test that the catalog sync policy is updated if the Consul node name changes. func TestRun_SyncPolicyUpdates(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) ui := cli.NewMockUi() commonArgs := []string{ "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-sync-catalog", + "-consul-api-timeout", "5s", } firstRunArgs := append(commonArgs, "-sync-consul-node-name=k8s-sync", @@ -860,7 +889,7 @@ func TestRun_SyncPolicyUpdates(t *testing.T) { // Create consul client bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -918,9 +947,10 @@ func TestRun_ErrorsOnDuplicateACLPolicy(t *testing.T) { tokenFile := common.WriteTempFile(t, bootToken) k8s, testAgent := completeBootstrappedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer testAgent.Stop() consul, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -945,10 +975,10 @@ func TestRun_ErrorsOnDuplicateACLPolicy(t *testing.T) { "-bootstrap-token-file", tokenFile, "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], "-sync-catalog", + "-consul-api-timeout", "5s", } responseCode := cmd.Run(cmdArgs) @@ -970,7 +1000,7 @@ func TestRun_DelayedServers(t *testing.T) { t.Parallel() k8s := fake.NewSimpleClientset() setUpK8sServiceAccount(t, k8s, ns) - randomPorts := freeport.GetN(t, 7) + randomPorts := freeport.GetN(t, 6) ui := cli.NewMockUi() cmd := Command{ @@ -987,16 +1017,16 @@ func TestRun_DelayedServers(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=127.0.0.1", - "-http-port=" + strconv.Itoa(randomPorts[1]), - "-grpc-port=" + strconv.Itoa(randomPorts[2]), + "-server-address=127.0.0.1", + "-server-port=" + strconv.Itoa(randomPorts[1]), + "-consul-api-timeout", "5s", }) close(done) }() // Asynchronously start the test server after a delay. testServerReady := make(chan bool) - var srv *test.TestServerClient + var srv *testutil.TestServer go func() { // Start the servers after a delay between 100 and 500ms. // It's randomized to ensure we're not relying on specific timing. @@ -1004,17 +1034,16 @@ func TestRun_DelayedServers(t *testing.T) { time.Sleep(time.Duration(delay) * time.Millisecond) var err error - srv = test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + srv, err = testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.Ports = &testutil.TestPortConfig{ DNS: randomPorts[0], HTTP: randomPorts[1], - GRPC: randomPorts[2], - HTTPS: randomPorts[3], - SerfLan: randomPorts[4], - SerfWan: randomPorts[5], - Server: randomPorts[6], + HTTPS: randomPorts[2], + SerfLan: randomPorts[3], + SerfWan: randomPorts[4], + Server: randomPorts[5], } }) require.NoError(t, err) @@ -1024,7 +1053,7 @@ func TestRun_DelayedServers(t *testing.T) { // Wait for server to come up select { case <-testServerReady: - defer srv.TestServer.Stop() + defer srv.Stop() case <-time.After(5 * time.Second): require.FailNow(t, "test server took longer than 5s to come up") } @@ -1042,7 +1071,7 @@ func TestRun_DelayedServers(t *testing.T) { // Check that it has the right policies. consul, err := api.NewClient(&api.Config{ - Address: srv.TestServer.HTTPAddr, + Address: srv.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -1117,15 +1146,12 @@ func TestRun_NoLeader(t *testing.T) { serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - port, err := strconv.Atoi(serverURL.Port()) - require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort(serverURL.Hostname(), port), } done := make(chan bool) @@ -1135,8 +1161,9 @@ func TestRun_NoLeader(t *testing.T) { "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=" + serverURL.Hostname(), - "-http-port=" + serverURL.Port(), + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), + "-consul-api-timeout", "5s", }) close(done) }() @@ -1144,8 +1171,8 @@ func TestRun_NoLeader(t *testing.T) { select { case <-done: require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) - case <-time.After(15 * time.Second): - require.FailNow(t, "command did not complete within 15s") + case <-time.After(5 * time.Second): + require.FailNow(t, "command did not complete within 5s") } // Test that the bootstrap kube secret is created. @@ -1373,22 +1400,20 @@ func TestRun_ClientPolicyAndBindingRuleRetry(t *testing.T) { serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - port, err := strconv.Atoi(serverURL.Port()) - require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort(serverURL.Hostname(), port), } responseCode := cmd.Run([]string{ "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=" + serverURL.Hostname(), - "-http-port=" + serverURL.Port(), + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), + "-consul-api-timeout", "5s", }) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -1506,16 +1531,15 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - port, err := strconv.Atoi(serverURL.Port()) - require.NoError(t, err) setUpK8sServiceAccount(t, k8s, ns) cmdArgs := []string{ "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=" + serverURL.Hostname(), - "-http-port=" + serverURL.Port(), + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), + "-consul-api-timeout", "5s", } // Create the bootstrap secret. @@ -1537,7 +1561,7 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { // Write token to a file. bootTokenFile, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(bootTokenFile.Name()) + defer os.Remove(bootTokenFile.Name()) _, err = bootTokenFile.WriteString("old-token") require.NoError(t, err) @@ -1551,7 +1575,6 @@ func TestRun_AlreadyBootstrapped(t *testing.T) { cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort(serverURL.Hostname(), port), } responseCode := cmd.Run(cmdArgs) @@ -1638,14 +1661,15 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" k8s, testAgent := completeBootstrappedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer testAgent.Stop() cmdArgs := []string{ "-timeout=1m", "-k8s-namespace", ns, - "-addresses", strings.Split(testAgent.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testAgent.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testAgent.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testAgent.HTTPAddr, ":")[0], + "-server-port", strings.Split(testAgent.HTTPAddr, ":")[1], "-resource-prefix", resourcePrefix, + "-consul-api-timeout", "5s", } if tokenInK8sSecret { @@ -1662,7 +1686,7 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { // Write token to a file. bootTokenFile, err := os.CreateTemp("", "") require.NoError(t, err) - defer os.RemoveAll(bootTokenFile.Name()) + defer os.Remove(bootTokenFile.Name()) _, err = bootTokenFile.WriteString(bootToken) require.NoError(t, err) @@ -1672,7 +1696,7 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { } consulClient, err := api.NewClient(&api.Config{ - Address: testAgent.TestServer.HTTPAddr, + Address: testAgent.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -1682,7 +1706,6 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { clientset: k8s, } - cmd.init() // Create the server policy and token _before_ we run the command. agentPolicyRules, err := cmd.agentRules() require.NoError(t, err) @@ -1693,7 +1716,7 @@ func TestRun_AlreadyBootstrapped_ServerTokenExists(t *testing.T) { }, nil) require.NoError(t, err) _, _, err = consulClient.ACL().TokenCreate(&api.ACLToken{ - Description: fmt.Sprintf("Server Token for %s", strings.Split(testAgent.TestServer.HTTPAddr, ":")[0]), + Description: fmt.Sprintf("Server Token for %s", strings.Split(testAgent.HTTPAddr, ":")[0]), Policies: []*api.ACLTokenPolicyLink{ { Name: policy.Name, @@ -1757,25 +1780,23 @@ func TestRun_SkipBootstrapping_WhenServersAreDisabled(t *testing.T) { serverURL, err := url.Parse(consulServer.URL) require.NoError(t, err) - port, err := strconv.Atoi(serverURL.Port()) - require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort(serverURL.Hostname(), port), } responseCode := cmd.Run([]string{ "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses=" + serverURL.Hostname(), - "-http-port=" + serverURL.Port(), + "-server-address=" + serverURL.Hostname(), + "-server-port=" + serverURL.Port(), "-bootstrap-token-file=" + tokenFile, "-set-server-tokens=false", "-client=false", // disable client token, so there are fewer calls + "-consul-api-timeout", "5s", }) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -1797,30 +1818,69 @@ func TestRun_SkipBootstrapping_WhenServersAreDisabled(t *testing.T) { // Test that we exit after timeout. func TestRun_Timeout(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s := fake.NewSimpleClientset() + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + } + + responseCode := cmd.Run([]string{ + "-timeout=500ms", + "-resource-prefix=" + resourcePrefix, + "-k8s-namespace=" + ns, + "-server-address=foo", + "-consul-api-timeout", "5s", + }) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) +} + +// Test that the bootstrapping process can make calls to Consul API over HTTPS +// when the consul agent is configured with HTTPS. +func TestRun_HTTPS(t *testing.T) { + t.Parallel() + k8s := fake.NewSimpleClientset() setUpK8sServiceAccount(t, k8s, ns) - _, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + caFile, certFile, keyFile := test.GenerateServerCerts(t) + + srv, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.ACL.Enabled = true + + c.CAFile = caFile + c.CertFile = certFile + c.KeyFile = keyFile }) require.NoError(t, err) - ui := cli.NewMockUi() + defer srv.Stop() + // Run the command. + ui := cli.NewMockUi() cmd := Command{ UI: ui, clientset: k8s, - watcher: test.MockConnMgrForIPAndPort("localhost", 12345), } responseCode := cmd.Run([]string{ - "-timeout=500ms", + "-timeout=1m", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-use-https", + "-consul-tls-server-name", "server.dc1.consul", + "-consul-ca-cert", caFile, + "-server-address=" + strings.Split(srv.HTTPSAddr, ":")[0], + "-server-port=" + strings.Split(srv.HTTPSAddr, ":")[1], + "-consul-api-timeout", "5s", }) - require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // Test that the bootstrap token is created to make sure the bootstrapping succeeded. + // The presence of the bootstrap token tells us that the API calls to Consul have been successful. + tokenSecret, err := k8s.CoreV1().Secrets(ns).Get(context.Background(), resourcePrefix+"-bootstrap-acl-token", metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, tokenSecret) + _, ok := tokenSecret.Data["token"] + require.True(t, ok) } // Test that the ACL replication token created from the primary DC can be used @@ -1828,7 +1888,8 @@ func TestRun_Timeout(t *testing.T) { func TestRun_ACLReplicationTokenValid(t *testing.T) { t.Parallel() - secondaryK8s, secondaryConsulClient, secondaryAddr, secondaryGRPCAddr, aclReplicationToken := completeReplicatedSetup(t) + secondaryK8s, secondaryConsulClient, secondaryAddr, aclReplicationToken, clean := completeReplicatedSetup(t) + defer clean() setUpK8sServiceAccount(t, secondaryK8s, ns) // completeReplicatedSetup ran the command in our primary dc so now we @@ -1844,14 +1905,14 @@ func TestRun_ACLReplicationTokenValid(t *testing.T) { "-federation", "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(secondaryAddr, ":")[0], - "-http-port", strings.Split(secondaryAddr, ":")[1], - "-grpc-port", strings.Split(secondaryGRPCAddr, ":")[1], + "-server-address", strings.Split(secondaryAddr, ":")[0], + "-server-port", strings.Split(secondaryAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-acl-replication-token-file", tokenFile, "-auth-method-host=" + "https://my-kube.com", "-client", "-mesh-gateway", + "-consul-api-timeout", "5s", } responseCode := secondaryCmd.Run(secondaryCmdArgs) require.Equal(t, 0, responseCode, secondaryUI.ErrorWriter.String()) @@ -1886,8 +1947,9 @@ func TestRun_AnonPolicy_IgnoredWithReplication(t *testing.T) { t.Run(flag, func(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - k8s, consul, serverAddr, serverGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, consul, serverAddr, cleanup := mockReplicatedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() // Run the command. ui := cli.NewMockUi() @@ -1899,12 +1961,11 @@ func TestRun_AnonPolicy_IgnoredWithReplication(t *testing.T) { cmdArgs := append([]string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-auth-method-host=https://my-kube.com", "-acl-replication-token-file", tokenFile, - "-addresses", strings.Split(serverAddr, ":")[0], - "-http-port", strings.Split(serverAddr, ":")[1], - "-grpc-port", strings.Split(serverGRPCAddr, ":")[1], + "-server-address", strings.Split(serverAddr, ":")[0], + "-server-port", strings.Split(serverAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", }, flag) responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) @@ -1921,6 +1982,63 @@ func TestRun_AnonPolicy_IgnoredWithReplication(t *testing.T) { } } +// Test that when the -server-address contains a cloud-auto join string, +// we are still able to bootstrap ACLs. +func TestRun_CloudAutoJoin(t *testing.T) { + t.Parallel() + + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() + setUpK8sServiceAccount(t, k8s, ns) + + // create a mock provider + // that always returns the server address + // provided through the cloud-auto join string + provider := new(mocks.MockProvider) + // create stubs for our MockProvider so that it returns + // the address of the test agent + provider.On("Addrs", mock.Anything, mock.Anything).Return([]string{"127.0.0.1"}, nil) + + // Run the command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + clientset: k8s, + providers: map[string]discover.Provider{"mock": provider}, + } + args := []string{ + "-timeout=1m", + "-k8s-namespace=" + ns, + "-resource-prefix=" + resourcePrefix, + "-server-address", "provider=mock", + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout", "5s", + } + responseCode := cmd.Run(args) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + + // check that the provider has been called + provider.AssertNumberOfCalls(t, "Addrs", 1) + + // Test that the bootstrap kube secret is created. + bootToken := getBootToken(t, k8s, resourcePrefix, ns) + + // Check that it has the right policies. + consul, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) + require.NoError(t, err) + tokenData, _, err := consul.ACL().TokenReadSelf(nil) + require.NoError(t, err) + require.Equal(t, "global-management", tokenData.Policies[0].Name) + + // Check that the agent policy was created. + agentPolicy := policyExists(t, "agent-token", consul) + // Should be a global policy. + require.Len(t, agentPolicy.Datacenters, 0) +} + func TestRun_GatewayErrors(t *testing.T) { t.Parallel() @@ -1949,7 +2067,8 @@ func TestRun_GatewayErrors(t *testing.T) { for testName, c := range cases { t.Run(testName, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) + k8s, testSvr := completeSetup(tt) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) require := require.New(tt) @@ -1963,9 +2082,9 @@ func TestRun_GatewayErrors(t *testing.T) { "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout", "5s", } cmdArgs = append(cmdArgs, c.flags...) responseCode := cmd.Run(cmdArgs) @@ -1988,11 +2107,17 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) PolicyNames []string Roles []string }{ + { + TestName: "Controller", + TokenFlags: []string{"-controller"}, + PolicyNames: []string{"controller-policy"}, + Roles: []string{resourcePrefix + "-controller-acl-role"}, + }, { TestName: "Connect Inject", TokenFlags: []string{"-connect-inject"}, PolicyNames: []string{"connect-inject-policy"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, }, { TestName: "Sync Catalog", @@ -2029,9 +2154,9 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) TokenFlags: []string{"-terminating-gateway-name=terminating", "-terminating-gateway-name=gateway", "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-terminating-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, Roles: []string{resourcePrefix + "-terminating-acl-role", resourcePrefix + "-gateway-acl-role", resourcePrefix + "-another-gateway-acl-role"}, @@ -2041,9 +2166,9 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) TokenFlags: []string{"-ingress-gateway-name=ingress", "-ingress-gateway-name=gateway", "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-policy", - "gateway-policy", - "another-gateway-policy"}, + PolicyNames: []string{resourcePrefix + "-ingress-policy", + resourcePrefix + "-gateway-policy", + resourcePrefix + "-another-gateway-policy"}, Roles: []string{resourcePrefix + "-ingress-acl-role", resourcePrefix + "-gateway-acl-role", resourcePrefix + "-another-gateway-acl-role"}, @@ -2051,7 +2176,8 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() setUpK8sServiceAccount(t, k8s, ns) // Run the command. @@ -2064,9 +2190,9 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) "-timeout=500ms", "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) @@ -2074,7 +2200,7 @@ func TestRun_PoliciesAndBindingRulesForACLLogin_PrimaryDatacenter(t *testing.T) bootToken := getBootToken(t, k8s, resourcePrefix, ns) consul, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, Token: bootToken, }) require.NoError(t, err) @@ -2139,12 +2265,19 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { Roles []string GlobalAuthMethod bool }{ + { + TestName: "Controller", + TokenFlags: []string{"-controller"}, + PolicyNames: []string{"controller-policy-" + secondaryDatacenter}, + Roles: []string{resourcePrefix + "-controller-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: true, + }, { TestName: "Connect Inject", TokenFlags: []string{"-connect-inject"}, PolicyNames: []string{"connect-inject-policy-" + secondaryDatacenter}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role-" + secondaryDatacenter}, - GlobalAuthMethod: true, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-" + secondaryDatacenter}, + GlobalAuthMethod: false, }, { TestName: "Sync Catalog", @@ -2186,9 +2319,9 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { TokenFlags: []string{"-terminating-gateway-name=terminating", "-terminating-gateway-name=gateway", "-terminating-gateway-name=another-gateway"}, - PolicyNames: []string{"terminating-policy-" + secondaryDatacenter, - "gateway-policy-" + secondaryDatacenter, - "another-gateway-policy-" + secondaryDatacenter}, + PolicyNames: []string{resourcePrefix + "-terminating-policy-" + secondaryDatacenter, + resourcePrefix + "-gateway-policy-" + secondaryDatacenter, + resourcePrefix + "-another-gateway-policy-" + secondaryDatacenter}, Roles: []string{resourcePrefix + "-terminating-acl-role-" + secondaryDatacenter, resourcePrefix + "-gateway-acl-role-" + secondaryDatacenter, resourcePrefix + "-another-gateway-acl-role-" + secondaryDatacenter}, @@ -2199,9 +2332,9 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { TokenFlags: []string{"-ingress-gateway-name=ingress", "-ingress-gateway-name=gateway", "-ingress-gateway-name=another-gateway"}, - PolicyNames: []string{"ingress-policy-" + secondaryDatacenter, - "gateway-policy-" + secondaryDatacenter, - "another-gateway-policy-" + secondaryDatacenter}, + PolicyNames: []string{resourcePrefix + "-ingress-policy-" + secondaryDatacenter, + resourcePrefix + "-gateway-policy-" + secondaryDatacenter, + resourcePrefix + "-another-gateway-policy-" + secondaryDatacenter}, Roles: []string{resourcePrefix + "-ingress-acl-role-" + secondaryDatacenter, resourcePrefix + "-gateway-acl-role-" + secondaryDatacenter, resourcePrefix + "-another-gateway-acl-role-" + secondaryDatacenter}, @@ -2212,8 +2345,9 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { t.Run(c.TestName, func(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - k8s, consul, consulHTTPAddr, consulGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, consul, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() // Run the command. ui := cli.NewMockUi() @@ -2228,9 +2362,9 @@ func TestRun_PoliciesAndBindingRulesACLLogin_SecondaryDatacenter(t *testing.T) { "-k8s-namespace=" + ns, "-auth-method-host=" + "https://my-kube.com", "-acl-replication-token-file", tokenFile, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) @@ -2299,10 +2433,16 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { ServiceAccountName string GlobalToken bool }{ + { + ComponentName: "controller", + TokenFlags: []string{"-controller"}, + Roles: []string{resourcePrefix + "-controller-acl-role"}, + GlobalToken: false, + }, { ComponentName: "connect-injector", TokenFlags: []string{"-connect-inject"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role"}, GlobalToken: false, }, { @@ -2318,11 +2458,10 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { GlobalToken: false, }, { - ComponentName: "snapshot-agent", - TokenFlags: []string{"-snapshot-agent"}, - Roles: []string{resourcePrefix + "-snapshot-agent-acl-role"}, - GlobalToken: false, - ServiceAccountName: resourcePrefix + "-server", + ComponentName: "snapshot-agent", + TokenFlags: []string{"-snapshot-agent"}, + Roles: []string{resourcePrefix + "-snapshot-agent-acl-role"}, + GlobalToken: false, }, { ComponentName: "mesh-gateway", @@ -2365,7 +2504,8 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { serviceAccountName = c.ServiceAccountName } - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) + defer testSvr.Stop() _, jwtToken := setUpK8sServiceAccount(t, k8s, ns) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -2391,16 +2531,16 @@ func TestRun_ValidateLoginToken_PrimaryDatacenter(t *testing.T) { "-resource-prefix=" + resourcePrefix, "-k8s-namespace=" + ns, "-auth-method-host=" + k8sMockServer.URL, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) client, err := api.NewClient(&api.Config{ - Address: testClient.TestServer.HTTPAddr, + Address: testSvr.HTTPAddr, }) require.NoError(t, err) @@ -2435,12 +2575,19 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { GlobalToken bool }{ { - ComponentName: "connect-injector", - TokenFlags: []string{"-connect-inject"}, - Roles: []string{resourcePrefix + "-connect-inject-acl-role-dc2"}, + ComponentName: "controller", + TokenFlags: []string{"-controller"}, + Roles: []string{resourcePrefix + "-controller-acl-role-dc2"}, GlobalAuthMethod: true, GlobalToken: true, }, + { + ComponentName: "connect-injector", + TokenFlags: []string{"-connect-inject"}, + Roles: []string{resourcePrefix + "-connect-injector-acl-role-dc2"}, + GlobalAuthMethod: false, + GlobalToken: false, + }, { ComponentName: "sync-catalog", TokenFlags: []string{"-sync-catalog"}, @@ -2456,12 +2603,11 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { GlobalToken: true, }, { - ComponentName: "snapshot-agent", - TokenFlags: []string{"-snapshot-agent"}, - Roles: []string{resourcePrefix + "-snapshot-agent-acl-role-dc2"}, - GlobalAuthMethod: false, - GlobalToken: false, - ServiceAccountName: resourcePrefix + "-server", + ComponentName: "snapshot-agent", + TokenFlags: []string{"-snapshot-agent"}, + Roles: []string{resourcePrefix + "-snapshot-agent-acl-role-dc2"}, + GlobalAuthMethod: false, + GlobalToken: false, }, { ComponentName: "mesh-gateway", @@ -2513,7 +2659,8 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { serviceAccountName = c.ServiceAccountName } - k8s, _, consulHTTPAddr, consulGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, _, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) + defer cleanup() _, jwtToken := setUpK8sServiceAccount(t, k8s, ns) k8sMockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -2541,9 +2688,9 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { "-k8s-namespace=" + ns, "-acl-replication-token-file", tokenFile, "-auth-method-host=" + k8sMockServer.URL, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], + "-consul-api-timeout", "5s", }, c.TokenFlags...) cmd.init() responseCode := cmd.Run(cmdArgs) @@ -2581,8 +2728,9 @@ func TestRun_ValidateLoginToken_SecondaryDatacenter(t *testing.T) { func TestRun_PrimaryDatacenter_ComponentAuthMethod(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testSvr := completeSetup(t) setUpK8sServiceAccount(t, k8s, ns) + defer testSvr.Stop() // Run the command. ui := cli.NewMockUi() @@ -2594,10 +2742,10 @@ func TestRun_PrimaryDatacenter_ComponentAuthMethod(t *testing.T) { cmdArgs := []string{ "-timeout=1m", "-k8s-namespace=" + ns, - "-addresses", strings.Split(testClient.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(testClient.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(testClient.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(testSvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(testSvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(cmdArgs) @@ -2605,9 +2753,10 @@ func TestRun_PrimaryDatacenter_ComponentAuthMethod(t *testing.T) { // Check that the expected policy was created. bootToken := getBootToken(t, k8s, resourcePrefix, ns) - consulConfig := testClient.Cfg - consulConfig.APIClientConfig.Token = bootToken - consulClient, err := api.NewClient(consulConfig.APIClientConfig) + consulClient, err := api.NewClient(&api.Config{ + Address: testSvr.HTTPAddr, + Token: bootToken, + }) require.NoError(t, err) authMethod, _, err := consulClient.ACL().AuthMethodRead(resourcePrefix+"-k8s-component-auth-method", &api.QueryOptions{}) require.NoError(t, err) @@ -2621,8 +2770,9 @@ func TestRun_SecondaryDatacenter_ComponentAuthMethod(t *testing.T) { bootToken := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tokenFile := common.WriteTempFile(t, bootToken) - k8s, consul, consulHTTPAddr, consulGRPCAddr := mockReplicatedSetup(t, bootToken) + k8s, consul, consulHTTPAddr, cleanup := mockReplicatedSetup(t, bootToken) setUpK8sServiceAccount(t, k8s, ns) + defer cleanup() // Run the command. ui := cli.NewMockUi() @@ -2637,10 +2787,10 @@ func TestRun_SecondaryDatacenter_ComponentAuthMethod(t *testing.T) { "-k8s-namespace=" + ns, "-auth-method-host=" + "https://my-kube.com", "-acl-replication-token-file", tokenFile, - "-addresses", strings.Split(consulHTTPAddr, ":")[0], - "-http-port", strings.Split(consulHTTPAddr, ":")[1], - "-grpc-port", strings.Split(consulGRPCAddr, ":")[1], + "-server-address", strings.Split(consulHTTPAddr, ":")[0], + "-server-port", strings.Split(consulHTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, + "-consul-api-timeout", "5s", } responseCode := cmd.Run(cmdArgs) @@ -2657,25 +2807,29 @@ func TestRun_SecondaryDatacenter_ComponentAuthMethod(t *testing.T) { } // Set up test consul agent and kubernetes cluster. -func completeSetup(t *testing.T) (*fake.Clientset, *test.TestServerClient) { +func completeSetup(t *testing.T) (*fake.Clientset, *testutil.TestServer) { k8s := fake.NewSimpleClientset() - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + svr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true }) + require.NoError(t, err) + svr.WaitForLeader(t) - return k8s, testClient + return k8s, svr } // Set up test consul agent and kubernetes cluster. // The consul agent is bootstrapped with the master token. -func completeBootstrappedSetup(t *testing.T, masterToken string) (*fake.Clientset, *test.TestServerClient) { +func completeBootstrappedSetup(t *testing.T, masterToken string) (*fake.Clientset, *testutil.TestServer) { k8s := fake.NewSimpleClientset() - svr := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + svr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true c.ACL.Tokens.InitialManagement = masterToken }) + require.NoError(t, err) + svr.WaitForActiveCARoot(t) return k8s, svr } @@ -2687,7 +2841,7 @@ func completeBootstrappedSetup(t *testing.T, masterToken string) (*fake.Clientse // the address of the secondary Consul server, // the replication token generated and a cleanup function // that should be called at the end of the test that cleans up resources. -func completeReplicatedSetup(t *testing.T) (*fake.Clientset, *api.Client, string, string, string) { +func completeReplicatedSetup(t *testing.T) (*fake.Clientset, *api.Client, string, string, func()) { return replicatedSetup(t, "") } @@ -2700,9 +2854,9 @@ func completeReplicatedSetup(t *testing.T) (*fake.Clientset, *api.Client, string // the address of the secondary Consul server, and a // cleanup function that should be called at the end of the test that cleans // up resources. -func mockReplicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Client, string, string) { - k8sClient, consulClient, serverHTTPAddr, serverGRPCAddr, _ := replicatedSetup(t, bootToken) - return k8sClient, consulClient, serverHTTPAddr, serverGRPCAddr +func mockReplicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Client, string, func()) { + k8sClient, consulClient, serverAddr, _, cleanup := replicatedSetup(t, bootToken) + return k8sClient, consulClient, serverAddr, cleanup } // replicatedSetup is a helper function for completeReplicatedSetup and @@ -2713,16 +2867,20 @@ func mockReplicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api. // the address of the secondary Consul server, ACL replication token, and a // cleanup function that should be called at the end of the test that cleans // up resources. -func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Client, string, string, string) { - primarySvr := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { +func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Client, string, string, func()) { + primarySvr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.ACL.Enabled = true if bootToken != "" { c.ACL.Tokens.InitialManagement = bootToken } }) + require.NoError(t, err) + primarySvr.WaitForLeader(t) + var aclReplicationToken string if bootToken == "" { primaryK8s := fake.NewSimpleClientset() + require.NoError(t, err) setUpK8sServiceAccount(t, primaryK8s, ns) // Run the command to bootstrap ACLs @@ -2735,11 +2893,11 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie primaryCmdArgs := []string{ "-federation", "-k8s-namespace=" + ns, - "-addresses", strings.Split(primarySvr.TestServer.HTTPAddr, ":")[0], - "-http-port", strings.Split(primarySvr.TestServer.HTTPAddr, ":")[1], - "-grpc-port", strings.Split(primarySvr.TestServer.GRPCAddr, ":")[1], + "-server-address", strings.Split(primarySvr.HTTPAddr, ":")[0], + "-server-port", strings.Split(primarySvr.HTTPAddr, ":")[1], "-resource-prefix=" + resourcePrefix, "-create-acl-replication-token", + "-consul-api-timeout", "5s", } responseCode := primaryCmd.Run(primaryCmdArgs) require.Equal(t, 0, responseCode, primaryUI.ErrorWriter.String()) @@ -2754,7 +2912,7 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie } // Set up the secondary server that will federate with the primary. - secondarySvr := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + secondarySvr, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { c.Datacenter = "dc2" c.ACL.Enabled = true c.ACL.TokenReplication = true @@ -2767,6 +2925,7 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie c.ACL.Tokens.Replication = bootToken } }) + require.NoError(t, err) // Our consul client will use the secondary dc. clientToken := bootToken @@ -2780,29 +2939,32 @@ func replicatedSetup(t *testing.T, bootToken string) (*fake.Clientset, *api.Clie // until ACL replication has started, and ACL replication cannot // be started because we haven't told the secondary where the primary // server is yet. - consulConfig := primarySvr.Cfg - consulConfig.APIClientConfig.Token = bootToken - consulConfig.APIClientConfig.Address = primarySvr.TestServer.HTTPAddr - consul, err := api.NewClient(consulConfig.APIClientConfig) + consul, err := api.NewClient(&api.Config{ + Address: primarySvr.HTTPAddr, + Token: bootToken, + }) require.NoError(t, err) // WAN join primary to the secondary - err = consul.Agent().Join(secondarySvr.TestServer.WANAddr, true) + err = consul.Agent().Join(secondarySvr.WANAddr, true) require.NoError(t, err) - secondarySvr.TestServer.WaitForLeader(t) + secondarySvr.WaitForLeader(t) // Overwrite consul client, pointing it to the secondary DC - consulConfig = secondarySvr.Cfg - consulConfig.APIClientConfig.Token = clientToken - consulConfig.APIClientConfig.Address = secondarySvr.TestServer.HTTPAddr - consul, err = api.NewClient(consulConfig.APIClientConfig) + consul, err = api.NewClient(&api.Config{ + Address: secondarySvr.HTTPAddr, + Token: clientToken, + }) require.NoError(t, err) // Finally, set up our kube cluster. It will use the secondary dc. k8s := fake.NewSimpleClientset() - return k8s, consul, secondarySvr.TestServer.HTTPAddr, secondarySvr.TestServer.GRPCAddr, aclReplicationToken + return k8s, consul, secondarySvr.HTTPAddr, aclReplicationToken, func() { + primarySvr.Stop() + secondarySvr.Stop() + } } // getBootToken gets the bootstrap token from the Kubernetes secret. It will diff --git a/control-plane/subcommand/server-acl-init/connect_inject.go b/control-plane/subcommand/server-acl-init/connect_inject.go index e732dae452..0160efd0a1 100644 --- a/control-plane/subcommand/server-acl-init/connect_inject.go +++ b/control-plane/subcommand/server-acl-init/connect_inject.go @@ -96,13 +96,16 @@ func (c *Command) createAuthMethodTmpl(authMethodName string, useNS bool) (api.A var saSecret *apiv1.Secret var secretNames []string - // In Kube 1.24+ there is no automatically generated long term JWT token for a ServiceAccount. - // Furthermore, there is no reference to a Secret in the ServiceAccount. Instead we have deployed - // a Secret in Helm which references the ServiceAccount and contains a permanent JWT token. - secretNames = append(secretNames, c.withPrefix("auth-method")) - // ServiceAccounts always have a SecretRef in Kubernetes < 1.24. The Secret contains the JWT token. - for _, secretRef := range authMethodServiceAccount.Secrets { - secretNames = append(secretNames, secretRef.Name) + if len(authMethodServiceAccount.Secrets) == 0 { + // In Kube 1.24+ there is no automatically generated long term JWT token for a ServiceAccount. + // Furthermore, there is no reference to a Secret in the ServiceAccount. Instead we have deployed + // a Secret in Helm which references the ServiceAccount and contains a permanent JWT token. + secretNames = append(secretNames, c.withPrefix("auth-method")) + } else { + // ServiceAccounts always have a SecretRef in Kubernetes < 1.24. The Secret contains the JWT token. + for _, secretRef := range authMethodServiceAccount.Secrets { + secretNames = append(secretNames, secretRef.Name) + } } // Because there could be multiple secrets attached to the service account, // we need pick the first one of type corev1.SecretTypeServiceAccountToken. diff --git a/control-plane/subcommand/server-acl-init/connect_inject_test.go b/control-plane/subcommand/server-acl-init/connect_inject_test.go index e7144146b7..e3166442af 100644 --- a/control-plane/subcommand/server-acl-init/connect_inject_test.go +++ b/control-plane/subcommand/server-acl-init/connect_inject_test.go @@ -30,20 +30,6 @@ func TestCommand_createAuthMethodTmpl_SecretNotFound(t *testing.T) { ctx: ctx, } - // create the auth method secret since it is always deployed by helm chart. - authMethodSecretName := resourcePrefix + "-auth-method" - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: authMethodSecretName, - Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, - }, - Data: map[string][]byte{}, - // Make it not a service-account-token so the test can pass through to checking the other secrets. - Type: v1.SecretTypeOpaque, - } - _, err := k8s.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{}) - require.NoError(t, err) - serviceAccountName := resourcePrefix + "-auth-method" secretName := resourcePrefix + "-connect-injector" @@ -67,7 +53,7 @@ func TestCommand_createAuthMethodTmpl_SecretNotFound(t *testing.T) { } // Create a secret of non service-account-token type (we're using the opaque type). - secret = &v1.Secret{ + secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Labels: map[string]string{common.CLILabelKey: common.CLILabelValue}, @@ -75,7 +61,7 @@ func TestCommand_createAuthMethodTmpl_SecretNotFound(t *testing.T) { Data: map[string][]byte{}, Type: v1.SecretTypeOpaque, } - _, err = k8s.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{}) + _, err := k8s.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{}) require.NoError(t, err) _, err = cmd.createAuthMethodTmpl("test", true) diff --git a/control-plane/subcommand/server-acl-init/create_or_update.go b/control-plane/subcommand/server-acl-init/create_or_update.go index 833b923b90..085372827b 100644 --- a/control-plane/subcommand/server-acl-init/create_or_update.go +++ b/control-plane/subcommand/server-acl-init/create_or_update.go @@ -43,18 +43,18 @@ func (c *Command) createACLPolicyRoleAndBindingRule(componentName, rules, dc, pr ap := &api.ACLRolePolicyLink{ Name: policyName, } - var apl []*api.ACLRolePolicyLink + apl := []*api.ACLRolePolicyLink{} apl = append(apl, ap) // Add the ACLRole and ACLBindingRule. - return c.addRoleAndBindingRule(client, componentName, serviceAccountName, authMethodName, apl, global, primary, primaryDC, dc) + return c.addRoleAndBindingRule(client, serviceAccountName, authMethodName, apl, global, primary, primaryDC, dc) } // addRoleAndBindingRule adds an ACLRole and ACLBindingRule which reference the authMethod. -func (c *Command) addRoleAndBindingRule(client *api.Client, componentName, serviceAccountName, authMethodName string, policies []*api.ACLRolePolicyLink, global, primary bool, primaryDC, dc string) error { +func (c *Command) addRoleAndBindingRule(client *api.Client, serviceAccountName string, authMethodName string, policies []*api.ACLRolePolicyLink, global, primary bool, primaryDC, dc string) error { // This is the ACLRole which will allow the component which uses the serviceaccount // to be able to do a consul login. - aclRoleName := c.withPrefix(fmt.Sprintf("%s-acl-role", componentName)) + aclRoleName := fmt.Sprintf("%s-acl-role", serviceAccountName) if c.flagFederation && !primary { // If performing ACL replication, we must ensure policy names are // globally unique so we append the datacenter name but only in secondary datacenters. diff --git a/control-plane/subcommand/server-acl-init/create_or_update_test.go b/control-plane/subcommand/server-acl-init/create_or_update_test.go index 259707f85d..5cd01fac25 100644 --- a/control-plane/subcommand/server-acl-init/create_or_update_test.go +++ b/control-plane/subcommand/server-acl-init/create_or_update_test.go @@ -33,7 +33,6 @@ func TestCreateOrUpdateACLPolicy_ErrorsIfDescriptionDoesNotMatch(t *testing.T) { c.ACL.Tokens.InitialManagement = bootToken }) require.NoError(err) - defer svr.Stop() svr.WaitForLeader(t) // Get a Consul client. diff --git a/control-plane/subcommand/server-acl-init/rules.go b/control-plane/subcommand/server-acl-init/rules.go index ee6ae41e40..8b2dec7a14 100644 --- a/control-plane/subcommand/server-acl-init/rules.go +++ b/control-plane/subcommand/server-acl-init/rules.go @@ -53,9 +53,6 @@ agent_prefix "" { partition_prefix "" { namespace_prefix "" { acl = "write" - service_prefix "" { - policy = "write" - } } }` @@ -175,20 +172,13 @@ namespace_prefix "" { // This assumes users are using the default name for the service, i.e. // "mesh-gateway". func (c *Command) meshGatewayRules() (string, error) { - // Mesh gateways can only act as a proxy for services that its ACL token has access to. So, in the case of Consul - // namespaces, it needs access to all namespaces. For peering, it requires the ability to list all peers which in - // enterprise requires peering:read on all partitions or in OSS requires a top level peering:read. Since we cannot - // determine whether we are using an enterprise or OSS consul image based on whether peering is enabled, we include - // both permissions here. - meshGatewayRulesTpl := `mesh = "write" -{{- if .EnablePeering }} -peering = "read" -{{- if eq .PartitionName "default" }} -partition_prefix "" { - peering = "read" -} -{{- end }} -{{- end }} + // Mesh gateways can only act as a proxy for services + // that its ACL token has access to. So, in the case of + // Consul namespaces, it needs access to all namespaces. + meshGatewayRulesTpl := ` + agent_prefix "" { + policy = "read" + } {{- if .EnableNamespaces }} namespace "default" { {{- end }} @@ -322,11 +312,10 @@ func (c *Command) injectRules() (string, error) { injectRulesTpl := ` {{- if .EnablePartitions }} partition "{{ .PartitionName }}" { - mesh = "write" - acl = "write" {{- else }} +{{- if .EnableNamespaces }} operator = "write" - acl = "write" +{{- end }} {{- end }} {{- if .EnablePeering }} peering = "write" @@ -343,7 +332,6 @@ partition "{{ .PartitionName }}" { acl = "write" service_prefix "" { policy = "write" - intentions = "write" } {{- if .EnableNamespaces }} } @@ -378,7 +366,7 @@ partition "default" { {{- end }} acl = "write" service_prefix "" { - policy = "write" + policy = "read" intentions = "read" } {{- if .EnableNamespaces }} @@ -391,11 +379,49 @@ partition "default" { return c.renderRules(aclReplicationRulesTpl) } +// policy = "write" is required when creating namespaces within a partition. +// acl = "write" is required when creating namespace with a default policy. +// Attaching a default ACL policy to a namespace requires acl = "write" in the +// namespace that the policy is defined in, which in our case is "default". +func (c *Command) controllerRules() (string, error) { + controllerRules := ` +{{- if .EnablePartitions }} +partition "{{ .PartitionName }}" { + mesh = "write" + acl = "write" +{{- else }} + operator = "write" + acl = "write" +{{- end }} +{{- if .EnableNamespaces }} +{{- if .InjectEnableNSMirroring }} + namespace_prefix "{{ .InjectNSMirroringPrefix }}" { +{{- else }} + namespace "{{ .InjectConsulDestNS }}" { +{{- end }} +{{- end }} +{{- if .EnablePartitions }} + policy = "write" +{{- end }} + service_prefix "" { + policy = "write" + intentions = "write" + } +{{- if .EnableNamespaces }} + } +{{- end }} +{{- if .EnablePartitions }} +} +{{- end }} +` + return c.renderRules(controllerRules) +} + func (c *Command) rulesData() rulesData { return rulesData{ - EnablePartitions: c.consulFlags.Partition != "", + EnablePartitions: c.flagEnablePartitions, EnablePeering: c.flagEnablePeering, - PartitionName: c.consulFlags.Partition, + PartitionName: c.flagPartitionName, EnableNamespaces: c.flagEnableNamespaces, SyncConsulDestNS: c.flagConsulSyncDestinationNamespace, SyncEnableNSMirroring: c.flagEnableSyncK8SNSMirroring, diff --git a/control-plane/subcommand/server-acl-init/rules_test.go b/control-plane/subcommand/server-acl-init/rules_test.go index 22e63ed0ce..1e736f9a95 100644 --- a/control-plane/subcommand/server-acl-init/rules_test.go +++ b/control-plane/subcommand/server-acl-init/rules_test.go @@ -5,7 +5,6 @@ import ( "strings" "testing" - "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" "github.com/stretchr/testify/require" ) @@ -62,7 +61,8 @@ partition "part-1" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } @@ -127,7 +127,8 @@ partition_prefix "" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } @@ -180,7 +181,6 @@ namespace_prefix "" { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ flagEnableNamespaces: tt.EnableNamespaces, - consulFlags: &flags.ConsulFlags{}, } meshGatewayRules, err := cmd.apiGatewayControllerRules() @@ -195,65 +195,13 @@ func TestMeshGatewayRules(t *testing.T) { cases := []struct { Name string EnableNamespaces bool - EnablePeering bool - PartitionName string Expected string }{ { - Name: "Namespaces and peering are disabled", - Expected: `mesh = "write" - service "mesh-gateway" { - policy = "write" - } - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - }`, - }, - { - Name: "Namespaces are enabled", - EnableNamespaces: true, - Expected: `mesh = "write" -namespace "default" { - service "mesh-gateway" { - policy = "write" - } -} -namespace_prefix "" { - node_prefix "" { - policy = "read" - } - service_prefix "" { - policy = "read" - } -}`, - }, - { - Name: "Peering is enabled with unspecified partition name (oss case)", - EnablePeering: true, - Expected: `mesh = "write" -peering = "read" - service "mesh-gateway" { - policy = "write" - } - node_prefix "" { + Name: "Namespaces are disabled", + Expected: `agent_prefix "" { policy = "read" } - service_prefix "" { - policy = "read" - }`, - }, - { - Name: "Peering is enabled with partition explicitly specified as default (ent default case)", - EnablePeering: true, - PartitionName: "default", - Expected: `mesh = "write" -peering = "read" -partition_prefix "" { - peering = "read" -} service "mesh-gateway" { policy = "write" } @@ -265,27 +213,11 @@ partition_prefix "" { }`, }, { - Name: "Peering is enabled with partition explicitly specified as non-default (ent non-default case)", - EnablePeering: true, - PartitionName: "non-default", - Expected: `mesh = "write" -peering = "read" - service "mesh-gateway" { - policy = "write" - } - node_prefix "" { + Name: "Namespaces are enabled", + EnableNamespaces: true, + Expected: `agent_prefix "" { policy = "read" } - service_prefix "" { - policy = "read" - }`, - }, - { - Name: "Peering and namespaces are enabled", - EnablePeering: true, - EnableNamespaces: true, - Expected: `mesh = "write" -peering = "read" namespace "default" { service "mesh-gateway" { policy = "write" @@ -306,10 +238,6 @@ namespace_prefix "" { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ flagEnableNamespaces: tt.EnableNamespaces, - flagEnablePeering: tt.EnablePeering, - consulFlags: &flags.ConsulFlags{ - Partition: tt.PartitionName, - }, } meshGatewayRules, err := cmd.meshGatewayRules() @@ -429,7 +357,8 @@ partition "default" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } @@ -535,7 +464,8 @@ partition "default" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } @@ -896,7 +826,8 @@ partition "foo" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, flagConsulSyncDestinationNamespace: tt.ConsulSyncDestinationNamespace, flagEnableSyncK8SNSMirroring: tt.EnableSyncK8SNSMirroring, @@ -926,15 +857,12 @@ func TestInjectRules(t *testing.T) { EnablePartitions: false, EnablePeering: false, Expected: ` - operator = "write" - acl = "write" node_prefix "" { policy = "write" } acl = "write" service_prefix "" { policy = "write" - intentions = "write" }`, }, { @@ -943,7 +871,6 @@ func TestInjectRules(t *testing.T) { EnablePeering: false, Expected: ` operator = "write" - acl = "write" node_prefix "" { policy = "write" } @@ -951,7 +878,6 @@ func TestInjectRules(t *testing.T) { acl = "write" service_prefix "" { policy = "write" - intentions = "write" } }`, }, @@ -961,7 +887,6 @@ func TestInjectRules(t *testing.T) { EnablePeering: true, Expected: ` operator = "write" - acl = "write" peering = "write" node_prefix "" { policy = "write" @@ -970,7 +895,6 @@ func TestInjectRules(t *testing.T) { acl = "write" service_prefix "" { policy = "write" - intentions = "write" } }`, }, @@ -981,8 +905,6 @@ func TestInjectRules(t *testing.T) { PartitionName: "part-1", Expected: ` partition "part-1" { - mesh = "write" - acl = "write" node_prefix "" { policy = "write" } @@ -991,7 +913,6 @@ partition "part-1" { acl = "write" service_prefix "" { policy = "write" - intentions = "write" } } }`, @@ -1003,8 +924,6 @@ partition "part-1" { PartitionName: "part-1", Expected: ` partition "part-1" { - mesh = "write" - acl = "write" peering = "write" node_prefix "" { policy = "write" @@ -1014,7 +933,6 @@ partition "part-1" { acl = "write" service_prefix "" { policy = "write" - intentions = "write" } } }`, @@ -1026,7 +944,8 @@ partition "part-1" { t.Run(caseName, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, flagEnablePeering: tt.EnablePeering, } @@ -1059,7 +978,7 @@ func TestReplicationTokenRules(t *testing.T) { } acl = "write" service_prefix "" { - policy = "write" + policy = "read" intentions = "read" }`, }, @@ -1077,7 +996,7 @@ func TestReplicationTokenRules(t *testing.T) { namespace_prefix "" { acl = "write" service_prefix "" { - policy = "write" + policy = "read" intentions = "read" } }`, @@ -1099,7 +1018,7 @@ partition "default" { namespace_prefix "" { acl = "write" service_prefix "" { - policy = "write" + policy = "read" intentions = "read" } } @@ -1110,7 +1029,8 @@ partition "default" { for _, tt := range cases { t.Run(tt.Name, func(t *testing.T) { cmd := Command{ - consulFlags: &flags.ConsulFlags{Partition: tt.PartitionName}, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, flagEnableNamespaces: tt.EnableNamespaces, } replicationTokenRules, err := cmd.aclReplicationRules() @@ -1119,3 +1039,146 @@ partition "default" { }) } } + +func TestControllerRules(t *testing.T) { + cases := []struct { + Name string + EnablePartitions bool + PartitionName string + EnableNamespaces bool + DestConsulNS string + Mirroring bool + MirroringPrefix string + Expected string + }{ + { + Name: "namespaces=disabled, partitions=disabled", + Expected: ` + operator = "write" + acl = "write" + service_prefix "" { + policy = "write" + intentions = "write" + }`, + }, + { + Name: "namespaces=enabled, consulDestNS=consul, partitions=disabled", + EnableNamespaces: true, + DestConsulNS: "consul", + Expected: ` + operator = "write" + acl = "write" + namespace "consul" { + service_prefix "" { + policy = "write" + intentions = "write" + } + }`, + }, + { + Name: "namespaces=enabled, mirroring=true, partitions=disabled", + EnableNamespaces: true, + Mirroring: true, + Expected: ` + operator = "write" + acl = "write" + namespace_prefix "" { + service_prefix "" { + policy = "write" + intentions = "write" + } + }`, + }, + { + Name: "namespaces=enabled, mirroring=true, mirroringPrefix=prefix-, partitions=disabled", + EnableNamespaces: true, + Mirroring: true, + MirroringPrefix: "prefix-", + Expected: ` + operator = "write" + acl = "write" + namespace_prefix "prefix-" { + service_prefix "" { + policy = "write" + intentions = "write" + } + }`, + }, + { + Name: "namespaces=enabled, consulDestNS=consul, partitions=enabled", + EnablePartitions: true, + PartitionName: "part-1", + EnableNamespaces: true, + DestConsulNS: "consul", + Expected: ` +partition "part-1" { + mesh = "write" + acl = "write" + namespace "consul" { + policy = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } + } +}`, + }, + { + Name: "namespaces=enabled, mirroring=true, partitions=enabled", + EnablePartitions: true, + PartitionName: "part-1", + EnableNamespaces: true, + Mirroring: true, + Expected: ` +partition "part-1" { + mesh = "write" + acl = "write" + namespace_prefix "" { + policy = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } + } +}`, + }, + { + Name: "namespaces=enabled, mirroring=true, mirroringPrefix=prefix-, partitions=enabled", + EnablePartitions: true, + PartitionName: "part-1", + EnableNamespaces: true, + Mirroring: true, + MirroringPrefix: "prefix-", + Expected: ` +partition "part-1" { + mesh = "write" + acl = "write" + namespace_prefix "prefix-" { + policy = "write" + service_prefix "" { + policy = "write" + intentions = "write" + } + } +}`, + }, + } + + for _, tt := range cases { + t.Run(tt.Name, func(t *testing.T) { + cmd := Command{ + flagEnableNamespaces: tt.EnableNamespaces, + flagConsulInjectDestinationNamespace: tt.DestConsulNS, + flagEnableInjectK8SNSMirroring: tt.Mirroring, + flagInjectK8SNSMirroringPrefix: tt.MirroringPrefix, + flagEnablePartitions: tt.EnablePartitions, + flagPartitionName: tt.PartitionName, + } + + rules, err := cmd.controllerRules() + + require.NoError(t, err) + require.Equal(t, tt.Expected, rules) + }) + } +} diff --git a/control-plane/subcommand/server-acl-init/servers.go b/control-plane/subcommand/server-acl-init/servers.go index 2dc8f8ab67..7b4f0ef527 100644 --- a/control-plane/subcommand/server-acl-init/servers.go +++ b/control-plane/subcommand/server-acl-init/servers.go @@ -3,7 +3,6 @@ package serveraclinit import ( "errors" "fmt" - "net" "net/http" "strings" "time" @@ -18,15 +17,15 @@ import ( // bootstrapServers bootstraps ACLs and ensures each server has an ACL token. // If bootstrapToken is not empty then ACLs are already bootstrapped. -func (c *Command) bootstrapServers(serverAddresses []net.IPAddr, bootstrapToken, bootTokenSecretName string) (string, error) { +func (c *Command) bootstrapServers(serverAddresses []string, bootstrapToken, bootTokenSecretName, scheme string) (string, error) { // Pick the first server address to connect to for bootstrapping and set up connection. - firstServerAddr := fmt.Sprintf("%s:%d", serverAddresses[0].IP.String(), c.consulFlags.HTTPPort) + firstServerAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) if bootstrapToken == "" { c.log.Info("No bootstrap token from previous installation found, continuing on to bootstrapping") var err error - bootstrapToken, err = c.bootstrapACLs(firstServerAddr, bootTokenSecretName) + bootstrapToken, err = c.bootstrapACLs(firstServerAddr, scheme, bootTokenSecretName) if err != nil { return "", err } @@ -37,8 +36,26 @@ func (c *Command) bootstrapServers(serverAddresses []net.IPAddr, bootstrapToken, // We should only create and set server tokens when servers are running within this cluster. if c.flagSetServerTokens { c.log.Info("Setting Consul server tokens") + + // Override our original client with a new one that has the bootstrap token + // set. + clientConfig := api.DefaultConfig() + clientConfig.Address = firstServerAddr + clientConfig.Scheme = scheme + clientConfig.Token = bootstrapToken + clientConfig.TLSConfig = api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + } + + consulClient, err := consul.NewClient(clientConfig, + c.flagConsulAPITimeout) + if err != nil { + return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) + } + // Create new tokens for each server and apply them. - if err := c.setServerTokens(serverAddresses, bootstrapToken); err != nil { + if err = c.setServerTokens(consulClient, serverAddresses, bootstrapToken, scheme); err != nil { return "", err } } @@ -47,9 +64,14 @@ func (c *Command) bootstrapServers(serverAddresses []net.IPAddr, bootstrapToken, // bootstrapACLs makes the ACL bootstrap API call and writes the bootstrap token // to a kube secret. -func (c *Command) bootstrapACLs(firstServerAddr, bootTokenSecretName string) (string, error) { - config := c.consulFlags.ConsulClientConfig().APIClientConfig - config.Address = firstServerAddr +func (c *Command) bootstrapACLs(firstServerAddr string, scheme string, bootTokenSecretName string) (string, error) { + clientConfig := api.DefaultConfig() + clientConfig.Address = firstServerAddr + clientConfig.Scheme = scheme + clientConfig.TLSConfig = api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + } // Exempting this particular use of the http client from using global.consulAPITimeout // which defaults to 5 seconds. In acceptance tests, we saw that the call // to /v1/acl/bootstrap taking 5-7 seconds and when it does, the request times @@ -58,8 +80,11 @@ func (c *Command) bootstrapACLs(firstServerAddr, bootTokenSecretName string) (st // already bootstrapped and would not be able to complete. // Since this is an area where we have to wait and can't retry, we are setting it // to a large number like 5 minutes since previously this had no timeout. - config.HttpClient = &http.Client{Timeout: 5 * time.Minute} - consulClient, err := consul.NewClient(config, c.consulFlags.APITimeout) + clientConfig.HttpClient = &http.Client{ + Timeout: 5 * time.Minute, + } + consulClient, err := consul.NewClient(clientConfig, + c.flagConsulAPITimeout) if err != nil { return "", fmt.Errorf("creating Consul client for address %s: %s", firstServerAddr, err) @@ -118,22 +143,13 @@ func (c *Command) bootstrapACLs(firstServerAddr, bootTokenSecretName string) (st // setServerTokens creates policies and associated ACL token for each server // and then provides the token to the server. -func (c *Command) setServerTokens(serverAddresses []net.IPAddr, bootstrapToken string) error { - // server specifically. - clientConfig := c.consulFlags.ConsulClientConfig().APIClientConfig - clientConfig.Address = fmt.Sprintf("%s:%d", serverAddresses[0].IP.String(), c.consulFlags.HTTPPort) - clientConfig.Token = bootstrapToken - serverClient, err := consul.NewClient(clientConfig, - c.consulFlags.APITimeout) - if err != nil { - return err - } - agentPolicy, err := c.setServerPolicy(serverClient) +func (c *Command) setServerTokens(consulClient *api.Client, serverAddresses []string, bootstrapToken, scheme string) error { + agentPolicy, err := c.setServerPolicy(consulClient) if err != nil { return err } - existingTokens, _, err := serverClient.ACL().TokenList(nil) + existingTokens, _, err := consulClient.ACL().TokenList(nil) if err != nil { return err } @@ -144,16 +160,22 @@ func (c *Command) setServerTokens(serverAddresses []net.IPAddr, bootstrapToken s // We create a new client for each server because we need to call each // server specifically. - clientConfig := c.consulFlags.ConsulClientConfig().APIClientConfig - clientConfig.Address = fmt.Sprintf("%s:%d", host.IP.String(), c.consulFlags.HTTPPort) + clientConfig := api.DefaultConfig() + clientConfig.Address = fmt.Sprintf("%s:%d", host, c.flagServerPort) + clientConfig.Scheme = scheme clientConfig.Token = bootstrapToken + clientConfig.TLSConfig = api.TLSConfig{ + Address: c.flagConsulTLSServerName, + CAFile: c.flagConsulCACert, + } + serverClient, err := consul.NewClient(clientConfig, - c.consulFlags.APITimeout) + c.flagConsulAPITimeout) if err != nil { return err } - tokenDescription := fmt.Sprintf("Server Token for %s", host.IP.String()) + tokenDescription := fmt.Sprintf("Server Token for %s", host) // Check if the token was already created. We're matching on the description // since that's the only part that's unique. diff --git a/control-plane/subcommand/service-address/command.go b/control-plane/subcommand/service-address/command.go new file mode 100644 index 0000000000..91bc600191 --- /dev/null +++ b/control-plane/subcommand/service-address/command.go @@ -0,0 +1,224 @@ +package serviceaddress + +import ( + "context" + "errors" + "flag" + "fmt" + "net" + "os" + "sync" + "time" + + "github.com/cenkalti/backoff" + "github.com/hashicorp/consul-k8s/control-plane/subcommand" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +type Command struct { + UI cli.Ui + + flags *flag.FlagSet + k8sFlags *k8sflags.K8SFlags + + flagNamespace string + flagServiceName string + flagOutputFile string + flagResolveHostnames bool + flagLogLevel string + flagLogJSON bool + + retryDuration time.Duration + k8sClient kubernetes.Interface + once sync.Once + help string + + ctx context.Context +} + +func (c *Command) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.StringVar(&c.flagNamespace, "k8s-namespace", "", + "Kubernetes namespace where service is created") + c.flags.StringVar(&c.flagServiceName, "name", "", + "Name of the service") + c.flags.StringVar(&c.flagOutputFile, "output-file", "", + "Path to file to write load balancer address") + c.flags.BoolVar(&c.flagResolveHostnames, "resolve-hostnames", false, + "If true we will resolve any hostnames and use their first IP address") + c.flags.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\".") + c.flags.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + c.k8sFlags = &k8sflags.K8SFlags{} + flags.Merge(c.flags, c.k8sFlags.Flags()) + c.help = flags.Usage(help, c.flags) +} + +// Run waits until a Kubernetes service has an ingress address and then writes +// it to an output file. +func (c *Command) Run(args []string) int { + c.once.Do(c.init) + if err := c.validateFlags(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if c.k8sClient == nil { + config, err := subcommand.K8SConfig(c.k8sFlags.KubeConfig()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error retrieving Kubernetes auth: %s", err)) + return 1 + } + c.k8sClient, err = kubernetes.NewForConfig(config) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing Kubernetes client: %s", err)) + return 1 + } + } + if c.retryDuration == 0 { + c.retryDuration = 1 * time.Second + } + logger, err := common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if c.ctx == nil { + c.ctx = context.Background() + } + + // Run until we get an address from the service. + var address string + var unretryableErr error + err = backoff.Retry(withErrLogger(logger, func() error { + svc, err := c.k8sClient.CoreV1().Services(c.flagNamespace).Get(c.ctx, c.flagServiceName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("getting service %s: %s", c.flagServiceName, err) + } + switch svc.Spec.Type { + case v1.ServiceTypeClusterIP: + address = svc.Spec.ClusterIP + return nil + case v1.ServiceTypeNodePort: + unretryableErr = errors.New("services of type NodePort are not supported") + return nil + case v1.ServiceTypeExternalName: + unretryableErr = errors.New("services of type ExternalName are not supported") + return nil + case v1.ServiceTypeLoadBalancer: + for _, ingr := range svc.Status.LoadBalancer.Ingress { + if ingr.IP != "" { + address = ingr.IP + return nil + } else if ingr.Hostname != "" { + if c.flagResolveHostnames { + address, unretryableErr = resolveHostname(ingr.Hostname) + } else { + address = ingr.Hostname + } + return nil + } + } + return fmt.Errorf("service %s has no ingress IP or hostname", c.flagServiceName) + default: + unretryableErr = fmt.Errorf("unknown service type %q", svc.Spec.Type) + return nil + } + }), backoff.NewConstantBackOff(c.retryDuration)) + + if err != nil || unretryableErr != nil { + c.UI.Error(fmt.Sprintf("Unable to get service address: %s, err: %s", unretryableErr.Error(), err)) + return 1 + } + + // Write the address to file. + err = os.WriteFile(c.flagOutputFile, []byte(address), 0600) + if err != nil { + c.UI.Error(fmt.Sprintf("Unable to write address to file: %s", err)) + return 1 + } + + c.UI.Info(fmt.Sprintf("Address %q written to %s successfully", address, c.flagOutputFile)) + return 0 +} + +func (c *Command) validateFlags(args []string) error { + if err := c.flags.Parse(args); err != nil { + return err + } + if len(c.flags.Args()) > 0 { + return errors.New("should have no non-flag arguments") + } + if c.flagNamespace == "" { + return errors.New("-k8s-namespace must be set") + } + if c.flagServiceName == "" { + return errors.New("-name must be set") + } + if c.flagOutputFile == "" { + return errors.New("-output-file must be set") + } + return nil +} + +// resolveHostname returns the first ipv4 address for host. +func resolveHostname(host string) (string, error) { + ips, err := net.LookupIP(host) + if err != nil { + return "", fmt.Errorf("unable to resolve hostname: %s", err) + } + if len(ips) < 1 { + return "", fmt.Errorf("hostname %q had no resolveable IPs", host) + } + + for _, ip := range ips { + v4 := ip.To4() + if v4 == nil { + continue + } + return ip.String(), nil + } + return "", fmt.Errorf("hostname %q had no ipv4 IPs", host) +} + +// withErrLogger runs op and logs if op returns an error. +// It returns the result of op. +func withErrLogger(log hclog.Logger, op func() error) func() error { + return func() error { + err := op() + if err != nil { + log.Error(err.Error()) + } + return err + } +} + +func (c *Command) Synopsis() string { return synopsis } +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +const synopsis = "Output Kubernetes Service address to file" +const help = ` +Usage: consul-k8s-control-plane service-address [options] + + Waits until the Kubernetes service specified by -name in namespace + -k8s-namespace is created, then writes its address to -output-file. + The address written depends on the service type: + ClusterIP - Cluster IP + NodePort - Not supported + LoadBalancer - Load balancer's IP or hostname + ExternalName - Not Supported +` diff --git a/control-plane/subcommand/service-address/command_test.go b/control-plane/subcommand/service-address/command_test.go new file mode 100644 index 0000000000..24e83551b6 --- /dev/null +++ b/control-plane/subcommand/service-address/command_test.go @@ -0,0 +1,397 @@ +package serviceaddress + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/fake" +) + +// Test that flags are validated. +func TestRun_FlagValidation(t *testing.T) { + cases := []struct { + Flags []string + ExpErr string + }{ + { + Flags: []string{}, + ExpErr: "-k8s-namespace must be set", + }, + { + Flags: []string{"-k8s-namespace=default"}, + ExpErr: "-name must be set", + }, + { + Flags: []string{"-k8s-namespace=default", "-name=name"}, + ExpErr: "-output-file must be set", + }, + } + for _, c := range cases { + t.Run(c.ExpErr, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + responseCode := cmd.Run(c.Flags) + require.Equal(t, 1, responseCode, ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), c.ExpErr) + }) + } +} + +// Test that if the file can't be written to we return an error. +func TestRun_UnableToWriteToFile(t *testing.T) { + t.Parallel() + require := require.New(t) + + k8sNS := "default" + svcName := "service-name" + expAddress := "1.2.3.4" + + // Create the service. + k8s := fake.NewSimpleClientset() + _, err := k8s.CoreV1().Services(k8sNS).Create(context.Background(), kubeLoadBalancerSvc(svcName, expAddress, ""), metav1.CreateOptions{}) + require.NoError(err) + + // Run command with an unwriteable file. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + } + responseCode := cmd.Run([]string{ + "-k8s-namespace", k8sNS, + "-name", svcName, + "-output-file", "/this/filepath/does/not/exist", + }) + require.Equal(1, responseCode, ui.ErrorWriter.String()) + require.Contains(ui.ErrorWriter.String(), + "Unable to write address to file: open /this/filepath/does/not/exist: no such file or directory") +} + +func TestRun_UnresolvableHostname(t *testing.T) { + t.Parallel() + require := require.New(t) + + k8sNS := "default" + svcName := "service-name" + + // Create the service. + k8s := fake.NewSimpleClientset() + _, err := k8s.CoreV1().Services(k8sNS).Create(context.Background(), kubeLoadBalancerSvc(svcName, "", "unresolvable"), metav1.CreateOptions{}) + require.NoError(err) + + // Run command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + } + tmpDir, err := os.MkdirTemp("", "") + require.NoError(err) + defer os.RemoveAll(tmpDir) + outputFile := filepath.Join(tmpDir, "address.txt") + + responseCode := cmd.Run([]string{ + "-k8s-namespace", k8sNS, + "-name", svcName, + "-output-file", outputFile, + "-resolve-hostnames=true", + }) + require.Equal(1, responseCode) + require.Contains(ui.ErrorWriter.String(), "Unable to get service address: unable to resolve hostname:") +} + +// Test running with different service types. +func TestRun_ServiceTypes(t *testing.T) { + t.Parallel() + + // All services will have the name "service-name" + cases := map[string]struct { + Service *v1.Service + ServiceModificationF func(*v1.Service) + ResolveHostnames bool + ExpErr string + ExpAddress string + }{ + "ClusterIP": { + Service: kubeClusterIPSvc("service-name"), + ExpAddress: "5.6.7.8", + }, + "NodePort": { + Service: kubeNodePortSvc("service-name"), + ExpErr: "services of type NodePort are not supported", + }, + "LoadBalancer IP": { + Service: kubeLoadBalancerSvc("service-name", "1.2.3.4", ""), + ExpAddress: "1.2.3.4", + }, + "LoadBalancer hostname": { + Service: kubeLoadBalancerSvc("service-name", "", "localhost"), + ExpAddress: "localhost", + }, + "LoadBalancer hostname with resolve-hostnames=true": { + Service: kubeLoadBalancerSvc("service-name", "", "localhost"), + ResolveHostnames: true, + ExpAddress: "127.0.0.1", + }, + "LoadBalancer IP and hostname": { + Service: kubeLoadBalancerSvc("service-name", "1.2.3.4", "example.com"), + ExpAddress: "1.2.3.4", + }, + "LoadBalancer first ingress empty": { + Service: kubeLoadBalancerSvc("service-name", "1.2.3.4", "example.com"), + ServiceModificationF: func(svc *v1.Service) { + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{ + {}, + { + IP: "5.6.7.8", + }, + } + }, + ExpAddress: "5.6.7.8", + }, + "ExternalName": { + Service: kubeExternalNameSvc("service-name"), + ExpErr: "services of type ExternalName are not supported", + }, + "invalid name": { + Service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-name", + }, + Spec: v1.ServiceSpec{ + Type: "invalid", + }, + }, + ExpErr: "unknown service type \"invalid\"", + }, + } + + for name, c := range cases { + t.Run(name, func(tt *testing.T) { + require := require.New(tt) + k8sNS := "default" + svcName := "service-name" + + // Create the service. + k8s := fake.NewSimpleClientset() + if c.ServiceModificationF != nil { + c.ServiceModificationF(c.Service) + } + _, err := k8s.CoreV1().Services(k8sNS).Create(context.Background(), c.Service, metav1.CreateOptions{}) + require.NoError(err) + + // Run command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + } + tmpDir, err := os.MkdirTemp("", "") + require.NoError(err) + defer os.RemoveAll(tmpDir) + outputFile := filepath.Join(tmpDir, "address.txt") + + args := []string{ + "-k8s-namespace", k8sNS, + "-name", svcName, + "-output-file", outputFile, + } + if c.ResolveHostnames { + args = append(args, "-resolve-hostnames=true") + } + responseCode := cmd.Run(args) + if c.ExpErr != "" { + require.Equal(1, responseCode) + require.Contains(ui.ErrorWriter.String(), c.ExpErr) + } else { + require.Equal(0, responseCode, ui.ErrorWriter.String()) + actAddressBytes, err := os.ReadFile(outputFile) + require.NoError(err) + require.Equal(c.ExpAddress, string(actAddressBytes)) + } + }) + } +} + +// Test that we write the address to file successfully, even when we have to retry +// looking up the service. This mimics what happens in Kubernetes when a +// service gets an ingress address after a cloud provider provisions a +// load balancer. +func TestRun_FileWrittenAfterRetry(t *testing.T) { + t.Parallel() + cases := map[string]struct { + // InitialService controls whether a service with that name will have + // already been created. The service won't have an address yet. + InitialService bool + // UpdateDelay controls how long we wait before updating the service + // with the UpdateIP address. NOTE: the retry duration for this + // test is set to 10ms. + UpdateDelay time.Duration + }{ + "initial service exists": { + InitialService: true, + UpdateDelay: 50 * time.Millisecond, + }, + "initial service does not exist, immediate update": { + InitialService: false, + UpdateDelay: 0, + }, + "initial service does not exist, 50ms delay": { + InitialService: false, + UpdateDelay: 50 * time.Millisecond, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + k8sNS := "default" + svcName := "service-name" + ip := "1.2.3.4" + k8s := fake.NewSimpleClientset() + + if c.InitialService { + svc := kubeLoadBalancerSvc(svcName, "", "") + // Reset the status to nothing. + svc.Status = v1.ServiceStatus{} + _, err := k8s.CoreV1().Services(k8sNS).Create(context.Background(), svc, metav1.CreateOptions{}) + require.NoError(t, err) + } + + // Create/update the service after delay. + go func() { + time.Sleep(c.UpdateDelay) + svc := kubeLoadBalancerSvc(svcName, ip, "") + var err error + if c.InitialService { + _, err = k8s.CoreV1().Services(k8sNS).Update(context.Background(), svc, metav1.UpdateOptions{}) + } else { + _, err = k8s.CoreV1().Services(k8sNS).Create(context.Background(), svc, metav1.CreateOptions{}) + } + require.NoError(t, err) + }() + + // Run command. + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + k8sClient: k8s, + retryDuration: 10 * time.Millisecond, + } + tmpDir, err := os.MkdirTemp("", "") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + outputFile := filepath.Join(tmpDir, "address.txt") + + responseCode := cmd.Run([]string{ + "-k8s-namespace", k8sNS, + "-name", svcName, + "-output-file", outputFile, + }) + require.Equal(t, 0, responseCode, ui.ErrorWriter.String()) + actAddressBytes, err := os.ReadFile(outputFile) + require.NoError(t, err) + require.Equal(t, ip, string(actAddressBytes)) + }) + } +} + +func kubeLoadBalancerSvc(name string, ip string, hostname string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Type: "LoadBalancer", + ClusterIP: "9.0.1.2", + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + NodePort: 32001, + }, + }, + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + IP: ip, + Hostname: hostname, + }, + }, + }, + }, + } +} + +func kubeNodePortSvc(name string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Type: "NodePort", + ClusterIP: "1.2.3.4", + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + NodePort: 32000, + }, + }, + }, + } +} + +func kubeClusterIPSvc(name string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Type: "ClusterIP", + ClusterIP: "5.6.7.8", + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + }, + }, + }, + } +} + +func kubeExternalNameSvc(name string) *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Type: "ExternalName", + ExternalName: fmt.Sprintf("%s.example.com", name), + }, + } +} diff --git a/control-plane/subcommand/sync-catalog/command.go b/control-plane/subcommand/sync-catalog/command.go index f890b44f34..105ce6619c 100644 --- a/control-plane/subcommand/sync-catalog/command.go +++ b/control-plane/subcommand/sync-catalog/command.go @@ -15,12 +15,11 @@ import ( mapset "github.com/deckarep/golang-set" catalogtoconsul "github.com/hashicorp/consul-k8s/control-plane/catalog/to-consul" catalogtok8s "github.com/hashicorp/consul-k8s/control-plane/catalog/to-k8s" - "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/helper/controller" "github.com/hashicorp/consul-k8s/control-plane/subcommand" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul-server-connection-manager/discovery" + "github.com/hashicorp/consul/api" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +33,7 @@ type Command struct { UI cli.Ui flags *flag.FlagSet - consul *flags.ConsulFlags + http *flags.HTTPFlags k8s *flags.K8SFlags flagListen string flagToConsul bool @@ -64,17 +63,13 @@ type Command struct { flagK8SNSMirroringPrefix string // Prefix added to Consul namespaces created when mirroring flagCrossNamespaceACLPolicy string // The name of the ACL policy to add to every created namespace if ACLs are enabled - clientset kubernetes.Interface + consulClient *api.Client + clientset kubernetes.Interface - // ready indicates whether this controller is ready to sync services. This will be changed to true once the - // consul-server-connection-manager has finished initial initialization. - ready bool - - once sync.Once - sigCh chan os.Signal - help string - logger hclog.Logger - connMgr consul.ServerConnectionManager + once sync.Once + sigCh chan os.Signal + help string + logger hclog.Logger } func (c *Command) init() { @@ -148,9 +143,9 @@ func (c *Command) init() { "[Enterprise Only] Name of the ACL policy to attach to all created Consul namespaces to allow service "+ "discovery across Consul namespaces. Only necessary if ACLs are enabled.") - c.consul = &flags.ConsulFlags{} + c.http = &flags.HTTPFlags{} c.k8s = &flags.K8SFlags{} - flags.Merge(c.flags, c.consul.Flags()) + flags.Merge(c.flags, c.http.Flags()) flags.Merge(c.flags, c.k8s.Flags()) c.help = flags.Usage(help, c.flags) @@ -195,49 +190,26 @@ func (c *Command) Run(args []string) int { } } - // Set up logging - if c.logger == nil { + // Setup Consul client + if c.consulClient == nil { var err error - c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) + c.consulClient, err = c.http.APIClient() if err != nil { - c.UI.Error(err.Error()) + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) return 1 } } - // Create Consul API config object. - consulConfig := c.consul.ConsulClientConfig() - - // Create a context to be used by the processes started in this command. - ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer cancelFunc() - - if c.connMgr == nil { - // Start Consul server Connection manager. - serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) - return 1 - } - c.connMgr, err = discovery.NewWatcher(ctx, serverConnMgrCfg, c.logger.Named("consul-server-connection-manager")) + // Set up logging + if c.logger == nil { + var err error + c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) if err != nil { - c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + c.UI.Error(err.Error()) return 1 } - - go c.connMgr.Run() - defer c.connMgr.Stop() } - // This is a blocking command that is run in order to ensure we only start the - // sync-catalog controllers only after we have access to the Consul server. - _, err := c.connMgr.State() - if err != nil { - c.UI.Error(fmt.Sprintf("unable to start Consul server watcher: %s", err)) - return 1 - } - c.ready = true - // Convert allow/deny lists to sets allowSet := flags.ToSet(c.flagAllowK8sNamespacesList) denySet := flags.ToSet(c.flagDenyK8sNamespacesList) @@ -255,17 +227,31 @@ func (c *Command) Run(args []string) int { // Start the K8S-to-Consul syncer var toConsulCh chan struct{} if c.flagToConsul { + // If namespaces are enabled we need to use a new Consul API endpoint + // to list node services. This endpoint is only available in Consul + // 1.7+. To preserve backwards compatibility, when namespaces are not + // enabled we use a client that queries the older API endpoint. + var svcsClient catalogtoconsul.ConsulNodeServicesClient + if c.flagEnableNamespaces { + svcsClient = &catalogtoconsul.NamespacesNodeServicesClient{ + Client: c.consulClient, + } + } else { + svcsClient = &catalogtoconsul.PreNamespacesNodeServicesClient{ + Client: c.consulClient, + } + } // Build the Consul sync and start it syncer := &catalogtoconsul.ConsulSyncer{ - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: c.connMgr, - Log: c.logger.Named("to-consul/sink"), - EnableNamespaces: c.flagEnableNamespaces, - CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, - SyncPeriod: c.flagConsulWritePeriod, - ServicePollPeriod: c.flagConsulWritePeriod * 2, - ConsulK8STag: c.flagConsulK8STag, - ConsulNodeName: c.flagConsulNodeName, + Client: c.consulClient, + Log: c.logger.Named("to-consul/sink"), + EnableNamespaces: c.flagEnableNamespaces, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + SyncPeriod: c.flagConsulWritePeriod, + ServicePollPeriod: c.flagConsulWritePeriod * 2, + ConsulK8STag: c.flagConsulK8STag, + ConsulNodeName: c.flagConsulNodeName, + ConsulNodeServicesClient: svcsClient, } go syncer.Run(ctx) @@ -312,13 +298,12 @@ func (c *Command) Run(args []string) int { } source := &catalogtok8s.Source{ - ConsulClientConfig: consulConfig, - ConsulServerConnMgr: c.connMgr, - Domain: c.flagConsulDomain, - Sink: sink, - Prefix: c.flagK8SServicePrefix, - Log: c.logger.Named("to-k8s/source"), - ConsulK8STag: c.flagConsulK8STag, + Client: c.consulClient, + Domain: c.flagConsulDomain, + Sink: sink, + Prefix: c.flagK8SServicePrefix, + Log: c.logger.Named("to-k8s/source"), + ConsulK8STag: c.flagConsulK8STag, } go source.Run(ctx) @@ -378,9 +363,12 @@ func (c *Command) Run(args []string) int { } } -func (c *Command) handleReady(rw http.ResponseWriter, _ *http.Request) { - if !c.ready { - c.UI.Error("[GET /health/ready] sync catalog controller is not yet ready") +func (c *Command) handleReady(rw http.ResponseWriter, req *http.Request) { + // The main readiness check is whether sync can talk to + // the consul cluster, in this case querying for the leader + _, err := c.consulClient.Status().Leader() + if err != nil { + c.UI.Error(fmt.Sprintf("[GET /health/ready] Error getting leader status: %s", err)) rw.WriteHeader(500) return } diff --git a/control-plane/subcommand/sync-catalog/command_ent_test.go b/control-plane/subcommand/sync-catalog/command_ent_test.go index fac330c557..4e5ba14e93 100644 --- a/control-plane/subcommand/sync-catalog/command_ent_test.go +++ b/control-plane/subcommand/sync-catalog/command_ent_test.go @@ -5,12 +5,10 @@ package synccatalog import ( "context" "fmt" - "strconv" "strings" "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" @@ -45,15 +43,20 @@ func TestRun_ToConsulSingleDestinationNamespace(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) - consulClient := testClient.APIClient + k8s, testServer := completeSetupEnterprise(tt) + defer testServer.Stop() // Run the command. ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(tt, err) + cmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name(), Level: hclog.Debug, @@ -61,7 +64,7 @@ func TestRun_ToConsulSingleDestinationNamespace(t *testing.T) { } // Create two services in k8s in default and foo namespaces. - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(tt, err) _, err = k8s.CoreV1().Namespaces().Create( context.Background(), @@ -76,8 +79,6 @@ func TestRun_ToConsulSingleDestinationNamespace(t *testing.T) { require.NoError(tt, err) exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "500ms", "-add-k8s-namespace-suffix", "-log-level=debug", @@ -180,16 +181,20 @@ func TestRun_ToConsulMirroringNamespaces(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) - - consulClient := testClient.APIClient + k8s, testServer := completeSetupEnterprise(tt) + defer testServer.Stop() // Run the command. ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(tt, err) + cmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name(), Level: hclog.Debug, @@ -197,7 +202,7 @@ func TestRun_ToConsulMirroringNamespaces(t *testing.T) { } // Create two services in k8s in default and foo namespaces. - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(tt, err) _, err = k8s.CoreV1().Namespaces().Create( context.Background(), @@ -212,8 +217,6 @@ func TestRun_ToConsulMirroringNamespaces(t *testing.T) { require.NoError(tt, err) args := append([]string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "500ms", "-add-k8s-namespace-suffix", "-log-level=debug", @@ -452,13 +455,15 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) - consulClient := testClient.APIClient - + k8s, testServer := completeSetupEnterprise(tt) + defer testServer.Stop() ui := cli.NewMockUi() + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(tt, err) + commonArgs := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "500ms", "-log-level=debug", "-allow-k8s-namespace=*", @@ -466,7 +471,7 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { // Create two services in k8s in default and foo namespaces. { - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(tt, err) _, err = k8s.CoreV1().Namespaces().Create( context.Background(), @@ -484,9 +489,9 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { // Run the first command. { firstCmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name() + "-firstrun", Level: hclog.Debug, @@ -514,9 +519,9 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { // Run the second command. { secondCmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name() + "-secondrun", Level: hclog.Debug, @@ -559,7 +564,7 @@ func TestRun_ToConsulChangingNamespaceFlags(t *testing.T) { } // Tests that the cross-namespace ACL policy is correctly -// attached to all created namespaces. Specific test for +// attached to all created namespaces. Specific teste for // services and their destinations are covered in other tests. func TestRun_ToConsulNamespacesACLs(t *testing.T) { cases := []struct { @@ -617,17 +622,37 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { require.NoError(tt, err) // Set up consul server - bootToken := "74044c72-03c8-42b0-b57f-728bb22ca7fb" - testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { - c.ACL.Enabled = true - c.ACL.Tokens.InitialManagement = bootToken + a, err := testutil.NewTestServerConfigT(tt, func(client *testutil.TestServerConfig) { + client.ACL.Enabled = true }) + require.NoError(tt, err) + defer a.Stop() + + // Set up a client for bootstrapping + bootClient, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + }) + require.NoError(tt, err) + + // Bootstrap the server and get the bootstrap token + var bootstrapResp *api.ACLToken + timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + retry.RunWith(timer, tt, func(r *retry.R) { + bootstrapResp, _, err = bootClient.ACL().Bootstrap() + require.NoError(r, err) + }) + bootstrapToken := bootstrapResp.SecretID + require.NotEmpty(tt, bootstrapToken) // Set up consul client - client := testClient.APIClient + client, err := api.NewClient(&api.Config{ + Address: a.HTTPAddr, + Token: bootstrapToken, + }) + require.NoError(tt, err) // Create cross namespace policy - // This would have been created by the server-acl-init in the + // This would have been created by the acl bootstrapper in the // default namespace to be attached to all created namespaces. crossNamespaceRules := `namespace_prefix "" { service_prefix "" { @@ -650,9 +675,9 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { // Set up the sync command ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, - connMgr: testClient.Watcher, + UI: ui, + clientset: k8s, + consulClient: client, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name(), Level: hclog.Debug, @@ -661,9 +686,6 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { // Set flags and run the command commonArgs := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), - "-token", bootToken, "-consul-write-interval", "500ms", "-log-level=debug", "-allow-k8s-namespace=*", @@ -674,7 +696,7 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { defer stopCommand(tt, &cmd, exitChan) // Check the namespaces are created correctly - timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + timer = &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} retry.RunWith(timer, tt, func(r *retry.R) { // Check that we have the right number of namespaces namespaces, _, err := client.Namespaces().List(&api.QueryOptions{}) @@ -707,7 +729,16 @@ func TestRun_ToConsulNamespacesACLs(t *testing.T) { } } + }) }) } } + +// Set up test consul agent and fake kubernetes cluster client +func completeSetupEnterprise(t *testing.T) (*fake.Clientset, *testutil.TestServer) { + k8s := fake.NewSimpleClientset() + svr, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + return k8s, svr +} diff --git a/control-plane/subcommand/sync-catalog/command_test.go b/control-plane/subcommand/sync-catalog/command_test.go index 8228986d00..c4e892c834 100644 --- a/control-plane/subcommand/sync-catalog/command_test.go +++ b/control-plane/subcommand/sync-catalog/command_test.go @@ -3,12 +3,12 @@ package synccatalog import ( "context" "os" - "strconv" "syscall" "testing" "time" - "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" @@ -55,7 +55,8 @@ func TestRun_FlagValidation(t *testing.T) { func TestRun_Defaults_SyncsConsulServiceToK8s(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testServer := completeSetup(t) + defer testServer.Stop() // Run the command. ui := cli.NewMockUi() @@ -66,12 +67,11 @@ func TestRun_Defaults_SyncsConsulServiceToK8s(t *testing.T) { Name: t.Name(), Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), + "-http-addr", testServer.HTTPAddr, + "-consul-api-timeout", "5s", }) defer stopCommand(t, &cmd, exitChan) @@ -92,7 +92,8 @@ func TestRun_ExitCleanlyOnSignals(t *testing.T) { func testSignalHandling(sig os.Signal) func(*testing.T) { return func(t *testing.T) { - k8s, testClient := completeSetup(t) + k8s, testServer := completeSetup(t) + defer testServer.Stop() // Run the command. ui := cli.NewMockUi() @@ -103,12 +104,11 @@ func testSignalHandling(sig os.Signal) func(*testing.T) { Name: t.Name(), Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), + "-http-addr", testServer.HTTPAddr, + "-consul-api-timeout", "5s", }) cmd.sendSignal(sig) @@ -132,29 +132,32 @@ func testSignalHandling(sig os.Signal) func(*testing.T) { func TestRun_ToConsulWithAddK8SNamespaceSuffix(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) - consulClient := testClient.APIClient + k8s, testServer := completeSetup(t) + defer testServer.Stop() + + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: t.Name(), Level: hclog.Debug, }), flagAllowK8sNamespacesList: []string{"*"}, - connMgr: testClient.Watcher, } // create a service in k8s - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(t, err) exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), // change the write interval, so we can see changes in Consul quicker "-consul-write-interval", "100ms", "-add-k8s-namespace-suffix", @@ -174,30 +177,32 @@ func TestRun_ToConsulWithAddK8SNamespaceSuffix(t *testing.T) { func TestCommand_Run_ToConsulChangeAddK8SNamespaceSuffixToTrue(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testServer := completeSetup(t) + defer testServer.Stop() - consulClient := testClient.APIClient + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: t.Name(), Level: hclog.Debug, }), flagAllowK8sNamespacesList: []string{"*"}, - connMgr: testClient.Watcher, } // create a service in k8s - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(t, err) exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", }) @@ -212,8 +217,6 @@ func TestCommand_Run_ToConsulChangeAddK8SNamespaceSuffixToTrue(t *testing.T) { // restart sync with -add-k8s-namespace-suffix exitChan = runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", "-add-k8s-namespace-suffix", }) @@ -234,33 +237,35 @@ func TestCommand_Run_ToConsulChangeAddK8SNamespaceSuffixToTrue(t *testing.T) { func TestCommand_Run_ToConsulTwoServicesSameNameDifferentNamespace(t *testing.T) { t.Parallel() - k8s, testClient := completeSetup(t) + k8s, testServer := completeSetup(t) + defer testServer.Stop() - consulClient := testClient.APIClient + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) // Run the command. ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: t.Name(), Level: hclog.Debug, }), flagAllowK8sNamespacesList: []string{"*"}, - connMgr: testClient.Watcher, } // create two services in k8s - _, err := k8s.CoreV1().Services("bar").Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services("bar").Create(context.Background(), lbService("foo", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(t, err) _, err = k8s.CoreV1().Services("baz").Create(context.Background(), lbService("foo", "2.2.2.2"), metav1.CreateOptions{}) require.NoError(t, err) exitChan := runCommandAsynchronously(&cmd, []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", "-add-k8s-namespace-suffix", }) @@ -326,13 +331,17 @@ func TestRun_ToConsulAllowDenyLists(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) + k8s, testServer := completeSetup(tt) + defer testServer.Stop() - consulClient := testClient.APIClient + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) // Create two services in k8s in default and foo namespaces. { - _, err := k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) + _, err = k8s.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), lbService("default", "1.1.1.1"), metav1.CreateOptions{}) require.NoError(tt, err) _, err = k8s.CoreV1().Namespaces().Create( context.Background(), @@ -348,8 +357,6 @@ func TestRun_ToConsulAllowDenyLists(t *testing.T) { } flags := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", "-log-level=debug", } @@ -363,13 +370,13 @@ func TestRun_ToConsulAllowDenyLists(t *testing.T) { // Run the command ui := cli.NewMockUi() cmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name(), Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&cmd, flags) defer stopCommand(tt, &cmd, exitChan) @@ -473,15 +480,17 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { for name, c := range cases { t.Run(name, func(tt *testing.T) { - k8s, testClient := completeSetup(tt) + k8s, testServer := completeSetup(tt) + defer testServer.Stop() - consulClient := testClient.APIClient + consulClient, err := api.NewClient(&api.Config{ + Address: testServer.HTTPAddr, + }) + require.NoError(t, err) ui := cli.NewMockUi() commonArgs := []string{ - "-addresses", "127.0.0.1", - "-http-port", strconv.Itoa(testClient.Cfg.HTTPPort), "-consul-write-interval", "100ms", "-log-level=debug", } @@ -506,13 +515,13 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { // Run the first command. { firstCmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name() + "-firstrun", Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&firstCmd, append(commonArgs, c.FirstRunFlags...)) @@ -532,13 +541,13 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { // Run the second command. { secondCmd := Command{ - UI: ui, - clientset: k8s, + UI: ui, + clientset: k8s, + consulClient: consulClient, logger: hclog.New(&hclog.LoggerOptions{ Name: tt.Name() + "-secondrun", Level: hclog.Debug, }), - connMgr: testClient.Watcher, } exitChan := runCommandAsynchronously(&secondCmd, append(commonArgs, c.SecondRunFlags...)) defer stopCommand(tt, &secondCmd, exitChan) @@ -568,12 +577,13 @@ func TestRun_ToConsulChangingFlags(t *testing.T) { } // Set up test consul agent and fake kubernetes cluster client. -func completeSetup(t *testing.T) (*fake.Clientset, *test.TestServerClient) { +func completeSetup(t *testing.T) (*fake.Clientset, *testutil.TestServer) { k8s := fake.NewSimpleClientset() - testClient := test.TestServerWithMockConnMgrWatcher(t, nil) + svr, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) - return k8s, testClient + return k8s, svr } // This function starts the command asynchronously and returns a non-blocking chan. diff --git a/control-plane/subcommand/webhook-cert-manager/command_test.go b/control-plane/subcommand/webhook-cert-manager/command_test.go index 7e302d5261..1841abf802 100644 --- a/control-plane/subcommand/webhook-cert-manager/command_test.go +++ b/control-plane/subcommand/webhook-cert-manager/command_test.go @@ -89,7 +89,7 @@ func testSignalHandling(sig os.Signal) func(*testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFile)) require.NoError(t, err) @@ -212,7 +212,7 @@ func TestRun_SecretDoesNotExist(t *testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFile)) require.NoError(t, err) @@ -340,7 +340,7 @@ func TestRun_SecretExists(t *testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFile)) require.NoError(t, err) @@ -440,7 +440,7 @@ func TestRun_SecretUpdates(t *testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFileUpdates)) require.NoError(t, err) @@ -630,7 +630,7 @@ func TestCertWatcher(t *testing.T) { file, err := os.CreateTemp("", "config.json") require.NoError(t, err) - defer os.RemoveAll(file.Name()) + defer os.Remove(file.Name()) _, err = file.Write([]byte(configFileUpdates)) require.NoError(t, err) diff --git a/control-plane/version/version.go b/control-plane/version/version.go index 933f072f35..f89582cd9d 100644 --- a/control-plane/version/version.go +++ b/control-plane/version/version.go @@ -14,7 +14,7 @@ var ( // // Version must conform to the format expected by // github.com/hashicorp/go-version for tests to work. - Version = "1.1.0" + Version = "0.49.5" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/hack/aws-acceptance-test-cleanup/go.mod b/hack/aws-acceptance-test-cleanup/go.mod index ac4f7b0d1a..a266c1a7e8 100644 --- a/hack/aws-acceptance-test-cleanup/go.mod +++ b/hack/aws-acceptance-test-cleanup/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/consul-helm/hack/aws-acceptance-test-cleanup -go 1.20 +go 1.18 require ( github.com/aws/aws-sdk-go v1.38.63 diff --git a/hack/copy-crds-to-chart/go.mod b/hack/copy-crds-to-chart/go.mod index c224f8f244..5456f28ce2 100644 --- a/hack/copy-crds-to-chart/go.mod +++ b/hack/copy-crds-to-chart/go.mod @@ -1,3 +1,3 @@ module github.com/hashicorp/consul-k8s/hack/copy-crds-to-chart -go 1.20 +go 1.18 diff --git a/hack/copy-crds-to-chart/main.go b/hack/copy-crds-to-chart/main.go index 7085bdb9e6..5feee4e229 100644 --- a/hack/copy-crds-to-chart/main.go +++ b/hack/copy-crds-to-chart/main.go @@ -9,14 +9,6 @@ import ( "strings" ) -var ( - // HACK IT! - requiresPeering = map[string]struct{}{ - "consul.hashicorp.com_peeringacceptors.yaml": {}, - "consul.hashicorp.com_peeringdialers.yaml": {}, - } -) - func main() { if len(os.Args) != 1 { fmt.Println("Usage: go run ./...") @@ -51,13 +43,8 @@ func realMain(helmPath string) error { // Strip leading newline. contents = strings.TrimPrefix(contents, "\n") - if _, ok := requiresPeering[info.Name()]; ok { - // Add {{- if and .Values.connectInject.enabled .Values.global.peering.enabled }} {{- end }} wrapper. - contents = fmt.Sprintf("{{- if and .Values.connectInject.enabled .Values.global.peering.enabled }}\n%s{{- end }}\n", contents) - } else { - // Add {{- if .Values.connectInject.enabled }} {{- end }} wrapper. - contents = fmt.Sprintf("{{- if .Values.connectInject.enabled }}\n%s{{- end }}\n", contents) - } + // Add {{- if .Values.controller.enabled }} {{- end }} wrapper. + contents = fmt.Sprintf("{{- if .Values.controller.enabled }}\n%s{{- end }}\n", contents) // Add labels, this is hacky because we're relying on the line number // but it means we don't need to regex or yaml parse. diff --git a/hack/helm-reference-gen/go.mod b/hack/helm-reference-gen/go.mod index 36e1ff3a8d..8595831de1 100644 --- a/hack/helm-reference-gen/go.mod +++ b/hack/helm-reference-gen/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/consul-k8s/hack/helm-reference-gen -go 1.20 +go 1.18 require ( github.com/stretchr/testify v1.6.1