Skip to content

Commit

Permalink
Merge pull request #904 from hashicorp/consul-vault-base
Browse files Browse the repository at this point in the history
Add support for Vault as a secrets backend for Gossip Encryption, Server TLS certs and ConnectCA
  • Loading branch information
kschoche committed Dec 7, 2021
2 parents 2e9da0d + 5df0004 commit 053af17
Show file tree
Hide file tree
Showing 54 changed files with 3,645 additions and 147 deletions.
20 changes: 17 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,25 @@
BREAKING CHANGES:
* Control Plane
* Update minimum go version for project to 1.17 [[GH-878](https://github.com/hashicorp/consul-k8s/pull/878)]
* Add boolean metric to merged metrics response `consul_merged_service_metrics_success` to indicate if service metrics were
scraped successfully. [[GH-551](https://github.com/hashicorp/consul-k8s/pull/551)]
* Add boolean metric to merged metrics response `consul_merged_service_metrics_success` to indicate if service metrics
were scraped successfully. [[GH-551](https://github.com/hashicorp/consul-k8s/pull/551)]

FEATURES:
* Vault as a Secrets Backend: Add support for Vault as a secrets backend for Gossip Encryption, Server TLS certs and Service Mesh TLS certificates,
removing the existing usage of Kubernetes Secrets for the respective secrets. [[GH-904](https://github.com/hashicorp/consul-k8s/pull/904/)]

See the [Consul Kubernetes and Vault documentation](https://www.consul.io/docs/k8s/installation/vault)
for full install instructions.

Requirements:
* Consul 1.11+
* Vault 1.19+ and Vault-K8s 0.14+ must be installed with the Vault Agent Injector enabled (`injector.enabled=true`)
into the Kubernetes cluster that Consul is installed into.
* `global.tls.enableAutoEncryption=true` is required for TLS support.
* If TLS is enabled in Vault, `global.secretsBackend.vault.ca` must be provided and should reference a Kube secret
which holds a copy of the Vault CA cert.

IMPROVEMENTS:
* CLI
* Pre-check in the `install` command to verify the correct license secret exists when using an enterprise Consul image. [[GH-875](https://github.com/hashicorp/consul-k8s/pull/875)]
* Control Plane
* Add a label "managed-by" to every secret the control-plane creates. Only delete said secrets on an uninstall. [[GH-835](https://github.com/hashicorp/consul-k8s/pull/835)]
Expand Down
46 changes: 1 addition & 45 deletions acceptance/framework/consul/consul_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package consul

import (
"context"
"encoding/json"
"fmt"
"strings"
"testing"
Expand Down Expand Up @@ -115,7 +114,7 @@ func (h *HelmCluster) Create(t *testing.T) {
})

// Fail if there are any existing installations of the Helm chart.
h.checkForPriorInstallations(t)
helpers.CheckForPriorInstallations(t, h.kubernetesClient, h.helmOptions, "consul-helm", "chart=consul-helm")

helm.Install(t, h.helmOptions, config.HelmChartPath, h.releaseName)

Expand Down Expand Up @@ -281,49 +280,6 @@ func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool) *api.Client {
return consulClient
}

// checkForPriorInstallations checks if there is an existing Helm release
// for this Helm chart already installed. If there is, it fails the tests.
func (h *HelmCluster) checkForPriorInstallations(t *testing.T) {
t.Helper()

var helmListOutput string
// Check if there's an existing cluster and fail if there is one.
// We may need to retry since this is the first command run once the Kube
// cluster is created and sometimes the API server returns errors.
retry.RunWith(&retry.Counter{Wait: 1 * time.Second, Count: 3}, t, func(r *retry.R) {
var err error
// NOTE: It's okay to pass in `t` to RunHelmCommandAndGetOutputE despite being in a retry
// because we're using RunHelmCommandAndGetOutputE (not RunHelmCommandAndGetOutput) so the `t` won't
// get used to fail the test, just for logging.
helmListOutput, err = helm.RunHelmCommandAndGetOutputE(t, h.helmOptions, "list", "--output", "json")
require.NoError(r, err)
})

var installedReleases []map[string]string

err := json.Unmarshal([]byte(helmListOutput), &installedReleases)
require.NoError(t, err, "unmarshalling %q", helmListOutput)

for _, r := range installedReleases {
require.NotContains(t, r["chart"], "consul", fmt.Sprintf("detected an existing installation of Consul %s, release name: %s", r["chart"], r["name"]))
}

// Wait for all pods in the "default" namespace to exit. A previous
// release may not be listed by Helm but its pods may still be terminating.
retry.RunWith(&retry.Counter{Wait: 1 * time.Second, Count: 60}, t, func(r *retry.R) {
consulPods, err := h.kubernetesClient.CoreV1().Pods(h.helmOptions.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{})
require.NoError(r, err)
if len(consulPods.Items) > 0 {
var podNames []string
for _, p := range consulPods.Items {
podNames = append(podNames, p.Name)
}
r.Errorf("pods from previous installation still running: %s", strings.Join(podNames, ", "))
}
})

}

// configurePodSecurityPolicies creates a simple pod security policy, a cluster role to allow access to the PSP,
// and a role binding that binds the default service account in the helm installation namespace to the cluster role.
// We bind the default service account for tests that are spinning up pods without a service account set so that
Expand Down
50 changes: 48 additions & 2 deletions acceptance/framework/helpers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package helpers

import (
"context"
"encoding/json"
"fmt"
"os"
"os/signal"
Expand All @@ -10,6 +11,8 @@ import (
"testing"
"time"

"github.com/gruntwork-io/terratest/modules/helm"

terratestk8s "github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/hashicorp/consul-k8s/acceptance/framework/logger"
Expand All @@ -25,14 +28,56 @@ func RandomName() string {
return fmt.Sprintf("test-%s", strings.ToLower(random.UniqueId()))
}

// CheckForPriorInstallations checks if there is an existing Helm release
// for this Helm chart already installed. If there is, it fails the tests.
func CheckForPriorInstallations(t *testing.T, client kubernetes.Interface, options *helm.Options, chartName, labelSelector string) {
t.Helper()

var helmListOutput string
// Check if there's an existing cluster and fail if there is one.
// We may need to retry since this is the first command run once the Kube
// cluster is created and sometimes the API server returns errors.
retry.RunWith(&retry.Counter{Wait: 1 * time.Second, Count: 3}, t, func(r *retry.R) {
var err error
// NOTE: It's okay to pass in `t` to RunHelmCommandAndGetOutputE despite being in a retry
// because we're using RunHelmCommandAndGetOutputE (not RunHelmCommandAndGetOutput) so the `t` won't
// get used to fail the test, just for logging.
helmListOutput, err = helm.RunHelmCommandAndGetOutputE(t, options, "list", "--output", "json")
require.NoError(r, err)
})

var installedReleases []map[string]string

err := json.Unmarshal([]byte(helmListOutput), &installedReleases)
require.NoError(t, err, "unmarshalling %q", helmListOutput)

for _, r := range installedReleases {
require.NotContains(t, r["chart"], chartName, fmt.Sprintf("detected an existing installation of %s %s, release name: %s", chartName, r["chart"], r["name"]))
}

// Wait for all pods in the "default" namespace to exit. A previous
// release may not be listed by Helm but its pods may still be terminating.
retry.RunWith(&retry.Counter{Wait: 1 * time.Second, Count: 60}, t, func(r *retry.R) {
pods, err := client.CoreV1().Pods(options.KubectlOptions.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: labelSelector})
require.NoError(r, err)
if len(pods.Items) > 0 {
var podNames []string
for _, p := range pods.Items {
podNames = append(podNames, p.Name)
}
r.Errorf("pods from previous installation still running: %s", strings.Join(podNames, ", "))
}
})
}

// WaitForAllPodsToBeReady waits until all pods with the provided podLabelSelector
// are in the ready status. It checks every 5 seconds for a total of 20 tries.
// If there is at least one container in a pod that isn't ready after that,
// it fails the test.
func WaitForAllPodsToBeReady(t *testing.T, client kubernetes.Interface, namespace, podLabelSelector string) {
t.Helper()

logger.Log(t, "Waiting for pods to be ready.")
logger.Logf(t, "Waiting for pods with label %q to be ready.", podLabelSelector)

// Wait up to 10m.
// On Azure, volume provisioning can sometimes take close to 5 min,
Expand All @@ -41,6 +86,7 @@ func WaitForAllPodsToBeReady(t *testing.T, client kubernetes.Interface, namespac
retry.RunWith(counter, t, func(r *retry.R) {
pods, err := client.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: podLabelSelector})
require.NoError(r, err)
require.NotEmpty(r, pods.Items)

var notReadyPods []string
for _, pod := range pods.Items {
Expand All @@ -55,7 +101,7 @@ func WaitForAllPodsToBeReady(t *testing.T, client kubernetes.Interface, namespac
logger.Log(t, "Finished waiting for pods to be ready.")
}

// Sets up a goroutine that will wait for interrupt signals
// SetupInterruptHandler sets up a goroutine that will wait for interrupt signals
// and call cleanup function when it catches it.
func SetupInterruptHandler(cleanup func()) {
c := make(chan os.Signal, 1)
Expand Down
Loading

0 comments on commit 053af17

Please sign in to comment.