Skip to content

Commit

Permalink
Adding support for Enterprise and other improvement on the Customizin…
Browse files Browse the repository at this point in the history
…g Vault Version for WanFed Test (#2481)

* Adding support for Enterprise and other improvement on the Customizing Vault Version for WanFed Test
This is the extension of the PR -
#2043

In this PR, the followings were addressed -

1. Now the vault enterprise version can be provided in the cli command.  The previous PR only addressed Vault OSS.
2. Two flags “-no-cleanup-wan-fed” and “test-duration” were introduced to not to cleanup the test environment after successful setup to give it time to do manual testing for features/to reproduce customer issues.  Default is 1 hour.
3. This was tested in Kind environment and it works fine.  The following was taken out to use the “use-kind” option for WanFed test.

    //if cfg.UseKind {
    //  t.Skipf("Skipping this test because it's currently flaky on kind")
    //}

* Fix indentation

* Fix unit test for deleting gateway w/ consul services

* Remove redundant service deregistration code

* Exit loop early once registration is found for service

* Fix import blocking

* Set status on pods added to test

* Apply suggestions from code review

* Reduce count of test gateways to 10 from 100

---------

Co-authored-by: Nathan Coleman <[email protected]>
Co-authored-by: Sarah Alsmiller <[email protected]>

Changes proposed in this PR:
-
-

How I've tested this PR:

How I expect reviewers to test this PR:

Checklist:
- [ ] Tests added
- [ ] CHANGELOG entry added
  > HashiCorp engineers only, community PRs should not add a changelog entry.
  > Entries should use present tense (e.g. Add support for...)

* Removing the changes in vault_namespaces_test.go

* Introducing new flag no-cleanup

* Removed "go 1.20" from go.work file

* cfg.USEKind check is added back

* Removed previousy added "Test Duration" flag

* Some changes

* Some changes
  • Loading branch information
20sr20 committed Jul 20, 2023
1 parent 414554c commit ff24495
Show file tree
Hide file tree
Showing 45 changed files with 626 additions and 200 deletions.
1 change: 1 addition & 0 deletions acceptance/framework/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ type TestConfig struct {
VaultServerVersion string

NoCleanupOnFailure bool
NoCleanup bool
DebugDirectory string

UseAKS bool
Expand Down
8 changes: 4 additions & 4 deletions acceptance/framework/connhelper/connect_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,11 +108,11 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) {

logger.Log(t, "creating static-server and static-client deployments")

k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-server-inject")
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-server-inject")
if c.Cfg.EnableTransparentProxy {
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy")
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy")
} else {
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-inject")
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-inject")
}

// Check that both static-server and static-client have been injected and
Expand Down Expand Up @@ -140,7 +140,7 @@ func (c *ConnectHelper) CreateResolverRedirect(t *testing.T) {
kustomizeDir := "../fixtures/cases/resolver-redirect-virtualip"
k8s.KubectlApplyK(t, options, kustomizeDir)

helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, func() {
k8s.KubectlDeleteK(t, options, kustomizeDir)
})
}
Expand Down
4 changes: 3 additions & 1 deletion acceptance/framework/consul/cli_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ type CLICluster struct {
kubeConfig string
kubeContext string
noCleanupOnFailure bool
noCleanup bool
debugDirectory string
logger terratestLogger.TestLogger
cli cli.CLI
Expand Down Expand Up @@ -109,6 +110,7 @@ func NewCLICluster(
kubeConfig: cfg.GetPrimaryKubeEnv().KubeConfig,
kubeContext: cfg.GetPrimaryKubeEnv().KubeContext,
noCleanupOnFailure: cfg.NoCleanupOnFailure,
noCleanup: cfg.NoCleanup,
debugDirectory: cfg.DebugDirectory,
logger: logger,
cli: *cli,
Expand All @@ -122,7 +124,7 @@ func (c *CLICluster) Create(t *testing.T) {

// Make sure we delete the cluster if we receive an interrupt signal and
// register cleanup so that we delete the cluster when test finishes.
helpers.Cleanup(t, c.noCleanupOnFailure, func() {
helpers.Cleanup(t, c.noCleanupOnFailure, c.noCleanup, func() {
c.Destroy(t)
})

Expand Down
10 changes: 6 additions & 4 deletions acceptance/framework/consul/helm_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ type HelmCluster struct {
runtimeClient client.Client
kubernetesClient kubernetes.Interface
noCleanupOnFailure bool
noCleanup bool
debugDirectory string
logger terratestLogger.TestLogger
}
Expand Down Expand Up @@ -107,6 +108,7 @@ func NewHelmCluster(
runtimeClient: ctx.ControllerRuntimeClient(t),
kubernetesClient: ctx.KubernetesClient(t),
noCleanupOnFailure: cfg.NoCleanupOnFailure,
noCleanup: cfg.NoCleanup,
debugDirectory: cfg.DebugDirectory,
logger: logger,
}
Expand All @@ -117,7 +119,7 @@ func (h *HelmCluster) Create(t *testing.T) {

// Make sure we delete the cluster if we receive an interrupt signal and
// register cleanup so that we delete the cluster when test finishes.
helpers.Cleanup(t, h.noCleanupOnFailure, func() {
helpers.Cleanup(t, h.noCleanupOnFailure, h.noCleanup, func() {
h.Destroy(t)
})

Expand Down Expand Up @@ -508,7 +510,7 @@ func configurePodSecurityPolicies(t *testing.T, client kubernetes.Interface, cfg
}
}

helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
_ = client.PolicyV1beta1().PodSecurityPolicies().Delete(context.Background(), pspName, metav1.DeleteOptions{})
_ = client.RbacV1().ClusterRoles().Delete(context.Background(), pspName, metav1.DeleteOptions{})
_ = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), pspName, metav1.DeleteOptions{})
Expand Down Expand Up @@ -559,7 +561,7 @@ func configureSCCs(t *testing.T, client kubernetes.Interface, cfg *config.TestCo
}
}

helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
_ = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), anyuidRoleBinding, metav1.DeleteOptions{})
_ = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), privilegedRoleBinding, metav1.DeleteOptions{})
})
Expand Down Expand Up @@ -601,7 +603,7 @@ func CreateK8sSecret(t *testing.T, client kubernetes.Interface, cfg *config.Test
}
})

helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
_ = client.CoreV1().Secrets(namespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
})
}
6 changes: 6 additions & 0 deletions acceptance/framework/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ type TestFlags struct {
flagHCPResourceID string

flagNoCleanupOnFailure bool
flagNoCleanup bool

flagDebugDirectory string

Expand Down Expand Up @@ -124,6 +125,9 @@ func (t *TestFlags) init() {
"If true, the tests will not cleanup Kubernetes resources they create when they finish running."+
"Note this flag must be run with -failfast flag, otherwise subsequent tests will fail.")

flag.BoolVar(&t.flagNoCleanup, "no-cleanup", false,
"If true, the tests will not cleanup Kubernetes resources for Vault test")

flag.StringVar(&t.flagDebugDirectory, "debug-directory", "", "The directory where to write debug information about failed test runs, "+
"such as logs and pod definitions. If not provided, a temporary directory will be created by the tests.")

Expand Down Expand Up @@ -185,6 +189,7 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig {
kubeEnvs := config.NewKubeTestConfigList(t.flagKubeconfigs, t.flagKubecontexts, t.flagKubeNamespaces)

c := &config.TestConfig{

EnableEnterprise: t.flagEnableEnterprise,
EnterpriseLicense: t.flagEnterpriseLicense,

Expand Down Expand Up @@ -215,6 +220,7 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig {
HCPResourceID: t.flagHCPResourceID,

NoCleanupOnFailure: t.flagNoCleanupOnFailure,
NoCleanup: t.flagNoCleanup,
DebugDirectory: tempDir,
UseAKS: t.flagUseAKS,
UseEKS: t.flagUseEKS,
Expand Down
4 changes: 2 additions & 2 deletions acceptance/framework/helpers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func SetupInterruptHandler(cleanup func()) {
// Cleanup will both register a cleanup function with t
// and SetupInterruptHandler to make sure resources get cleaned up
// if an interrupt signal is caught.
func Cleanup(t *testing.T, noCleanupOnFailure bool, cleanup func()) {
func Cleanup(t *testing.T, noCleanupOnFailure bool, noCleanup bool, cleanup func()) {
t.Helper()

// Always clean up when an interrupt signal is caught.
Expand All @@ -97,7 +97,7 @@ func Cleanup(t *testing.T, noCleanupOnFailure bool, cleanup func()) {
// We need to wrap the cleanup function because t that is passed in to this function
// might not have the information on whether the test has failed yet.
wrappedCleanupFunc := func() {
if !(noCleanupOnFailure && t.Failed()) {
if !((noCleanupOnFailure && t.Failed()) || noCleanup) {
logger.Logf(t, "cleaning up resources for %s", t.Name())
cleanup()
} else {
Expand Down
8 changes: 4 additions & 4 deletions acceptance/framework/k8s/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (

// Deploy creates a Kubernetes deployment by applying configuration stored at filepath,
// sets up a cleanup function and waits for the deployment to become available.
func Deploy(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool, debugDirectory string, filepath string) {
func Deploy(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool, noCleanup bool, debugDirectory string, filepath string) {
t.Helper()

KubectlApply(t, options, filepath)
Expand All @@ -33,7 +33,7 @@ func Deploy(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool,
err = yaml.NewYAMLOrJSONDecoder(file, 1024).Decode(&deployment)
require.NoError(t, err)

helpers.Cleanup(t, noCleanupOnFailure, func() {
helpers.Cleanup(t, noCleanupOnFailure, noCleanup, func() {
// Note: this delete command won't wait for pods to be fully terminated.
// This shouldn't cause any test pollution because the underlying
// objects are deployments, and so when other tests create these
Expand All @@ -47,7 +47,7 @@ func Deploy(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool,

// DeployKustomize creates a Kubernetes deployment by applying the kustomize directory stored at kustomizeDir,
// sets up a cleanup function and waits for the deployment to become available.
func DeployKustomize(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool, debugDirectory string, kustomizeDir string) {
func DeployKustomize(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool, noCleanup bool, debugDirectory string, kustomizeDir string) {
t.Helper()

KubectlApplyK(t, options, kustomizeDir)
Expand All @@ -59,7 +59,7 @@ func DeployKustomize(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailu
err = yaml.NewYAMLOrJSONDecoder(strings.NewReader(output), 1024).Decode(&deployment)
require.NoError(t, err)

helpers.Cleanup(t, noCleanupOnFailure, func() {
helpers.Cleanup(t, noCleanupOnFailure, noCleanup, func() {
// Note: this delete command won't wait for pods to be fully terminated.
// This shouldn't cause any test pollution because the underlying
// objects are deployments, and so when other tests create these
Expand Down
31 changes: 28 additions & 3 deletions acceptance/framework/vault/vault_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,16 @@ package vault
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"

"github.com/gruntwork-io/terratest/modules/helm"
terratestk8s "github.com/gruntwork-io/terratest/modules/k8s"
terratestLogger "github.com/gruntwork-io/terratest/modules/logger"
"github.com/hashicorp/consul-k8s/acceptance/framework/config"
"github.com/hashicorp/consul-k8s/acceptance/framework/consul"
"github.com/hashicorp/consul-k8s/acceptance/framework/environment"
"github.com/hashicorp/consul-k8s/acceptance/framework/helpers"
"github.com/hashicorp/consul-k8s/acceptance/framework/k8s"
Expand Down Expand Up @@ -44,6 +47,7 @@ type VaultCluster struct {
kubernetesClient kubernetes.Interface

noCleanupOnFailure bool
noCleanup bool
debugDirectory string
logger terratestLogger.TestLogger
}
Expand All @@ -54,12 +58,32 @@ func NewVaultCluster(t *testing.T, ctx environment.TestContext, cfg *config.Test
logger := terratestLogger.New(logger.TestLogger{})

kopts := ctx.KubectlOptions(t)
ns := ctx.KubectlOptions(t).Namespace

entstr := "-ent"

values := defaultHelmValues(releaseName)
if cfg.EnablePodSecurityPolicies {
values["global.psp.enable"] = "true"
}
vaultReleaseName := helpers.RandomName()
k8sClient := environment.KubernetesClientFromOptions(t, ctx.KubectlOptions(t))
vaultLicenseSecretName := fmt.Sprintf("%s-enterprise-license", vaultReleaseName)
vaultLicenseSecretKey := "license"

vaultEnterpriseLicense := os.Getenv("VAULT_LICENSE")

if cfg.VaultServerVersion != "" {

if strings.Contains(cfg.VaultServerVersion, entstr) {

logger.Logf(t, "Creating secret for Vault license")
consul.CreateK8sSecret(t, k8sClient, cfg, ns, vaultLicenseSecretName, vaultLicenseSecretKey, vaultEnterpriseLicense)

values["server.image.repository"] = "docker.mirror.hashicorp.services/hashicorp/vault-enterprise"
values["server.enterpriseLicense.secretName"] = vaultLicenseSecretName
values["server.enterpriseLicense.secretKey"] = vaultLicenseSecretKey
}
values["server.image.tag"] = cfg.VaultServerVersion
}
vaultHelmChartVersion := defaultVaultHelmChartVersion
Expand Down Expand Up @@ -89,6 +113,7 @@ func NewVaultCluster(t *testing.T, ctx environment.TestContext, cfg *config.Test
kubectlOptions: kopts,
kubernetesClient: ctx.KubernetesClient(t),
noCleanupOnFailure: cfg.NoCleanupOnFailure,
noCleanup: cfg.NoCleanup,
debugDirectory: cfg.DebugDirectory,
logger: logger,
releaseName: releaseName,
Expand Down Expand Up @@ -224,7 +249,7 @@ func (v *VaultCluster) Create(t *testing.T, ctx environment.TestContext, vaultNa

// Make sure we delete the cluster if we receive an interrupt signal and
// register cleanup so that we delete the cluster when test finishes.
helpers.Cleanup(t, v.noCleanupOnFailure, func() {
helpers.Cleanup(t, v.noCleanupOnFailure, v.noCleanup, func() {
v.Destroy(t)
})

Expand Down Expand Up @@ -346,7 +371,7 @@ func (v *VaultCluster) createTLSCerts(t *testing.T) {
require.NoError(t, err)

t.Cleanup(func() {
if !v.noCleanupOnFailure {
if !(v.noCleanupOnFailure || v.noCleanup) {
// We're ignoring error here because secret deletion is best-effort.
_ = v.kubernetesClient.CoreV1().Secrets(namespace).Delete(context.Background(), certSecretName(v.releaseName), metav1.DeleteOptions{})
_ = v.kubernetesClient.CoreV1().Secrets(namespace).Delete(context.Background(), CASecretName(v.releaseName), metav1.DeleteOptions{})
Expand Down Expand Up @@ -419,7 +444,7 @@ func (v *VaultCluster) initAndUnseal(t *testing.T) {
rootTokenSecret := fmt.Sprintf("%s-vault-root-token", v.releaseName)
v.logger.Logf(t, "saving Vault root token to %q Kubernetes secret", rootTokenSecret)

helpers.Cleanup(t, v.noCleanupOnFailure, func() {
helpers.Cleanup(t, v.noCleanupOnFailure, v.noCleanup, func() {
_ = v.kubernetesClient.CoreV1().Secrets(namespace).Delete(context.Background(), rootTokenSecret, metav1.DeleteOptions{})
})
_, err := v.kubernetesClient.CoreV1().Secrets(namespace).Create(context.Background(), &corev1.Secret{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ func TestAPIGateway_ExternalServers(t *testing.T) {
consulCluster.Create(t)

logger.Log(t, "creating static-server and static-client deployments")
k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject")
k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject")
k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/static-server-inject")
k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/static-client-inject")

// Override the default proxy config settings for this test
consulClient, _ := consulCluster.SetupConsulClient(t, true, serverReleaseName)
Expand All @@ -79,7 +79,7 @@ func TestAPIGateway_ExternalServers(t *testing.T) {
logger.Log(t, "creating api-gateway resources")
out, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "apply", "-k", "../fixtures/bases/api-gateway")
require.NoError(t, err, out)
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
// Ignore errors here because if the test ran as expected
// the custom resources will have been deleted.
k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "delete", "-k", "../fixtures/bases/api-gateway")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,23 @@ func TestAPIGateway_GatewayClassConfig(t *testing.T) {

k8sClient := ctx.ControllerRuntimeClient(t)

// Create a GatewayClassConfig.
//create clean namespace
err = k8sClient.Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
})
require.NoError(t, err)
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
logger.Log(t, "deleting gateway namesapce")
k8sClient.Delete(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
})
})

// create a GatewayClassConfig with configuration set
gatewayClassConfigName := "gateway-class-config"
gatewayClassConfig := &v1alpha1.GatewayClassConfig{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -80,7 +96,7 @@ func TestAPIGateway_GatewayClassConfig(t *testing.T) {
logger.Log(t, "creating gateway class config")
err = k8sClient.Create(context.Background(), gatewayClassConfig)
require.NoError(t, err)
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
logger.Log(t, "deleting all gateway class configs")
k8sClient.DeleteAllOf(context.Background(), &v1alpha1.GatewayClassConfig{})
})
Expand All @@ -94,7 +110,7 @@ func TestAPIGateway_GatewayClassConfig(t *testing.T) {
// Create gateway class referencing gateway-class-config.
logger.Log(t, "creating controlled gateway class")
createGatewayClass(t, k8sClient, gatewayClassName, gatewayClassControllerName, gatewayParametersRef)
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
logger.Log(t, "deleting all gateway classes")
k8sClient.DeleteAllOf(context.Background(), &gwv1beta1.GatewayClass{})
})
Expand All @@ -119,15 +135,20 @@ func TestAPIGateway_GatewayClassConfig(t *testing.T) {
logger.Log(t, "creating certificate")
err = k8sClient.Create(context.Background(), certificate)
require.NoError(t, err)
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
k8sClient.Delete(context.Background(), certificate)
})

// Create gateway referencing gateway class.
gatewayName := "gcctestgateway" + namespace
logger.Log(t, "creating controlled gateway")
gateway := createGateway(t, k8sClient, gatewayName, namespace, gatewayClassName, certificateName)
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {

// make sure it exists
logger.Log(t, "checking that gateway one is synchronized to Consul")
checkConsulExists(t, consulClient, api.APIGateway, gatewayName)

helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
logger.Log(t, "deleting all gateways")
k8sClient.DeleteAllOf(context.Background(), &gwv1beta1.Gateway{}, client.InNamespace(namespace))
})
Expand Down
Loading

0 comments on commit ff24495

Please sign in to comment.