Skip to content

Commit

Permalink
Make assertions more robust, clean up code/comments
Browse files Browse the repository at this point in the history
  • Loading branch information
nathancoleman committed Nov 13, 2024
1 parent df79cf0 commit d0c0ba7
Show file tree
Hide file tree
Showing 5 changed files with 124 additions and 126 deletions.
2 changes: 1 addition & 1 deletion acceptance/framework/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func (t *TestFlags) init() {
"If true, the tests will assume they are running against a local kind cluster(s).")

flag.BoolVar(&t.flagUseOpenshift, "use-openshift", false,
"If true, the tests will assume they are running against a openshift cluster(s).")
"If true, the tests will assume they are running against an openshift cluster(s).")

flag.BoolVar(&t.flagDisablePeering, "disable-peering", false,
"If true, the peering tests will not run.")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,4 +56,4 @@ metadata:
name: backend
namespace: backend
spec:
protocol: http
protocol: http
Original file line number Diff line number Diff line change
Expand Up @@ -59,4 +59,4 @@ metadata:
name: frontend
namespace: frontend
spec:
protocol: http
protocol: http
2 changes: 1 addition & 1 deletion acceptance/tests/fixtures/cases/openshift/basic/route.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,4 @@ spec:
to:
- group: ""
kind: Service
name: frontend
name: frontend
242 changes: 120 additions & 122 deletions acceptance/tests/openshift/basic_openshift_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,143 +6,141 @@ package openshift
import (
"context"
"crypto/tls"
"fmt"
"encoding/json"
"log"
"net/http"
"os/exec"
"testing"
"time"

"github.com/go-logr/logr"
"github.com/hashicorp/consul-k8s/acceptance/framework/helpers"
"github.com/hashicorp/consul-k8s/acceptance/framework/logger"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/types"
"log"
"net/http"
"os/exec"
logf "sigs.k8s.io/controller-runtime/pkg/log"
gwv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
"testing"
"time"

"github.com/hashicorp/consul-k8s/acceptance/framework/helpers"
"github.com/hashicorp/consul-k8s/acceptance/framework/logger"

"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

const (
StaticClientName = "static-client"
gatewayClassControllerName = "consul.hashicorp.com/gateway-controller"
gatewayClassFinalizer = "gateway-exists-finalizer.consul.hashicorp.com"
gatewayFinalizer = "gateway-finalizer.consul.hashicorp.com"
)

// Test that api gateway basic functionality works in a default installation and a secure installation.
func TestOpenshift_Basic(t *testing.T) {
cases := []struct {
secure bool
}{
{
secure: true,
},
cfg := suite.Config()

// namespaceName := helpers.RandomName()
// FUTURE for some reason NewHelmCluster creates a consul server pod that runs as root which
// isn't allowed in OpenShift. In order to test OpenShift properly, we have to call helm and k8s
// directly to bypass. Ideally we would just fix the framework that is running the pod as root.
cmd := exec.Command("helm", "upgrade", "--install", "consul", "hashicorp/consul", "--create-namespace",
"--namespace", "consul",
"--set", "global.name=consul",
"--set", "connectInject.enabled=true",
"--set", "connectInject.transparentProxy.defaultEnabled=false",
"--set", "connectInject.apiGateway.managedGatewayClass.mapPrivilegedContainerPorts=8000",
"--set", "global.acls.manageSystemACLs=true",
"--set", "global.tls.enabled=true",
"--set", "global.tls.enableAutoEncrypt=true",
"--set", "global.openshift.enabled=true",
"--set", "global.image=docker.mirror.hashicorp.services/hashicorppreview/consul:1.21-dev",
"--set", "global.imageK8S=docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.7-dev",
)
output, err := cmd.CombinedOutput()
if err != nil {
log.Fatal(string(output))
require.NoError(t, err)
}

helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd := exec.Command("helm", "uninstall", "consul", "--namespace", "consul")
output, err := cmd.CombinedOutput()
require.NoErrorf(t, err, "failed to uninstall consul: %s", string(output))

cmd = exec.Command("kubectl", "delete", "namespace", "consul")
output, err = cmd.CombinedOutput()
assert.NoErrorf(t, err, "failed to delete namespace: %s", string(output))
})

// this is normally called by the environment, but because we have to bypass we have to call it explicitly
logf.SetLogger(logr.New(nil))
logger.Log(t, "creating resources for OpenShift test")

kubectlCmd := exec.Command("kubectl", "apply", "-f", "../fixtures/cases/openshift/basic")

output, err = kubectlCmd.CombinedOutput()
if err != nil {
log.Fatal(string(output))
require.NoError(t, err)
}
for _, c := range cases {
name := fmt.Sprintf("secure: %t", c.secure)
t.Run(name, func(t *testing.T) {
cfg := suite.Config()

//namespaceName := helpers.RandomName()
//TODO for some reason NewHelmCluster creates consul server pod that runs as root which
// isn't allowed in openshift. In order to test openshift properly we have to call helm and k8s directly to bypass
// but ideally we would just fix the helper function running the pod as root
cmd := exec.Command("helm", "upgrade", "--install", "consul", "hashicorp/consul", "--create-namespace",
"--namespace", "consul",
"--set", "connectInject.enabled=true",
"--set", "connectInject.transparentProxy.defaultEnabled=false",
"--set", "connectInject.apiGateway.managedGatewayClass.mapPrivilegedContainerPorts=8000",
"--set", "global.acls.manageSystemACLs=true",
"--set", "global.tls.enabled=true",
"--set", "global.tls.enableAutoEncrypt=true",
"--set", "global.openshift.enabled=true",
"--set", "global.image=docker.mirror.hashicorp.services/hashicorppreview/consul:1.21-dev",
"--set", "global.imageK8S=docker.mirror.hashicorp.services/hashicorppreview/consul-k8s-control-plane:1.7-dev",
)
output, err := cmd.CombinedOutput()
if err != nil {
log.Fatal(string(output))
require.NoError(t, err)
}

helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd := exec.Command("helm", "uninstall", "consul",
"--namespace", "consul",
)
output, err := cmd.CombinedOutput()
if err != nil {
log.Fatal(string(output))
}
kubectlCmd := exec.Command("kubectl", "delete", "namespace", "consul")

output, err = kubectlCmd.CombinedOutput()
if err != nil {
log.Fatal(string(output))
require.NoError(t, err)
}
})
//this is normally called by the environment, but because we have to bypass we have to call it explicitly
logf.SetLogger(logr.New(nil))
logger.Log(t, "creating api-gateway resources")

kubectlCmd := exec.Command("kubectl", "apply", "-f", "../fixtures/cases/openshift/basic")

output, err = kubectlCmd.CombinedOutput()
if err != nil {
log.Fatal(string(output))
require.NoError(t, err)
}

helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
kubectlCmd := exec.Command("kubectl", "delete", "-f", "../fixtures/cases/openshift/basic")

output, err := kubectlCmd.CombinedOutput()
if err != nil {
log.Fatal(string(output))
require.NoError(t, err)
}
})

//// Grab a kubernetes client so that we can verify binding
//// behavior prior to issuing requests through the gateway.
ctx := suite.Environment().DefaultContext(t)
k8sClient := ctx.ControllerRuntimeClient(t)
//
//// On startup, the controller can take upwards of 1m to perform
//// leader election so we may need to wait a long time for
//// the reconcile loop to run (hence the timeout here).
var gatewayAddress string
counter := &retry.Counter{Count: 120, Wait: 2 * time.Second}
retry.RunWith(counter, t, func(r *retry.R) {
var gateway gwv1beta1.Gateway
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: "api-gateway", Namespace: "consul"}, &gateway)
require.NoError(r, err)

// check that we have an address to use
require.Len(r, gateway.Status.Addresses, 1)
// now we know we have an address, set it so we can use it
gatewayAddress = gateway.Status.Addresses[0].Value
})
fmt.Println(gatewayAddress)

// now that we've satisfied those assertions, we know reconciliation is done
// so we can run assertions on the routes and the other objects

//// finally we check that we can actually route to the service via the gateway
//k8sOptions := ctx.KubectlOptions(t)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
targetHTTPSAddress := fmt.Sprintf("https://%s", gatewayAddress)
resp, err := client.Get(targetHTTPSAddress)
require.NoError(t, err)
require.Equal(t, resp.StatusCode, http.StatusOK)
})

helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
cmd := exec.Command("kubectl", "delete", "-f", "../fixtures/cases/openshift/basic")
output, err := cmd.CombinedOutput()
assert.NoErrorf(t, err, "failed to delete resources: %s", string(output))
})

// Grab a kubernetes client so that we can verify binding
// behavior prior to issuing requests through the gateway.
ctx := suite.Environment().DefaultContext(t)
k8sClient := ctx.ControllerRuntimeClient(t)

// Get the public IP address of the API gateway that we created from its status.
//
// On startup, the controller can take upwards of 1m to perform leader election,
// so we may need to wait a long time for the reconcile loop to run (hence the timeout).
var gatewayIP string
counter := &retry.Counter{Count: 120, Wait: 2 * time.Second}
retry.RunWith(counter, t, func(r *retry.R) {
var gateway gwv1beta1.Gateway
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: "api-gateway", Namespace: "consul"}, &gateway)
require.NoError(r, err)

require.Len(r, gateway.Status.Addresses, 1)
gatewayIP = gateway.Status.Addresses[0].Value
})
logger.Log(t, "API gateway is reachable at:", gatewayIP)

// Verify that we can reach the services that we created in the mesh
// via the API gateway that we created.
//
// The request goes Gateway --> Frontend --> Backend
client := &http.Client{Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}}

resp, err := client.Get("https://" + gatewayIP)
require.NoError(t, err)
assert.Equal(t, resp.StatusCode, http.StatusOK)

var body struct {
Body string `json:"body"`
Code int `json:"code"`
Name string `json:"name"`
UpstreamCalls map[string]struct {
Body string `json:"body"`
Code int `json:"code"`
Name string `json:"name"`
} `json:"upstream_calls"`
URI string `json:"uri"`
}

require.NoError(t, json.NewDecoder(resp.Body).Decode(&body))
assert.Equal(t, "Hello World", body.Body)
assert.Equal(t, 200, body.Code)
assert.Equal(t, "frontend", body.Name)
assert.Equal(t, "/", body.URI)

require.Len(t, body.UpstreamCalls, 1)
require.Contains(t, body.UpstreamCalls, "http://backend.backend:8080")

backend := body.UpstreamCalls["http://backend.backend:8080"]
assert.Equal(t, "Hello World", body.Body)
assert.Equal(t, 200, backend.Code)
assert.Equal(t, "backend", backend.Name)
}

func checkStatusCondition(t require.TestingT, conditions []metav1.Condition, toCheck metav1.Condition) {

Check failure on line 146 in acceptance/tests/openshift/basic_openshift_test.go

View workflow job for this annotation

GitHub Actions / golangci-lint

func `checkStatusCondition` is unused (unused)
Expand Down

0 comments on commit d0c0ba7

Please sign in to comment.