Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable specifing a ClientsAuthFile for user authorization data #179

Merged
merged 6 commits into from
Apr 29, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ e2e: NAMESPACE ?= default
e2e:
FEATURE_GATE_CLUSTER_SCOPED=$(FEATURE_GATE_CLUSTER_SCOPED) MODE=run NAMESPACE=$(NAMESPACE) PROFILE=local TARGET=operator $(MAKE) run
FEATURE_GATE_CLUSTER_SCOPED=$(FEATURE_GATE_CLUSTER_SCOPED) MODE=run NAMESPACE=$(NAMESPACE) PROFILE=local TARGET=e2e $(MAKE) run
@go test -tags e2e -v ./test/e2e/main_test.go -feature-gates=ClusterScoped=$(FEATURE_GATE_CLUSTER_SCOPED) -kubeconfig $(KUBECONFIG) -namespace $(NAMESPACE) -wait
@go test -timeout 20m -tags e2e -v ./test/e2e/main_test.go -feature-gates=ClusterScoped=$(FEATURE_GATE_CLUSTER_SCOPED) -kubeconfig $(KUBECONFIG) -namespace $(NAMESPACE) -wait

# run deploys either nats-operator or nats-operator-e2e to the Kubernetes cluster targeted by the current kubeconfig.
.PHONY: run
Expand Down
40 changes: 40 additions & 0 deletions example/example-nats-cluster-authfile.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# This is an example NatsCluster manifest which uses a 3rd party initContainer
# to fetch the authorization credentials from outside kubernetes.
#
apiVersion: "nats.io/v1alpha2"
kind: "NatsCluster"
metadata:
name: nats-auth-file-example
namespace: default
spec:
size: 1
version: "1.4.1"

natsConfig:
maxPayload: 20971520

pod:
enableConfigReload: true

volumeMounts:
- name: authconfig
mountPath: /etc/nats-config/authconfig

auth:
# Needs to be under /etc/nats-config where nats looks
# for its config file, or it won't be able to be included
# by /etc/nats-config/gnatsd.conf
clientsAuthFile: "authconfig/auth.json"

template:
spec:
initContainers:
- name: secret-getter
image: "busybox"
command: ["sh", "-c", "echo 'users = [ { user: 'foo', pass: 'bar' } ]' > /etc/nats-config/authconfig/auth.json"]
volumeMounts:
- name: authconfig
mountPath: /etc/nats-config/authconfig
volumes:
- name: authconfig
emptyDir: {}
4 changes: 4 additions & 0 deletions pkg/apis/nats/v1alpha2/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,10 @@ type AuthConfig struct {
// configuration in JSON.
ClientsAuthSecret string `json:"clientsAuthSecret,omitempty"`

// ClientsAuthFile is the path that nats-operator should read
// auth secrets from on disk.
ClientsAuthFile string `json:"clientsAuthFile,omitempty"`

// ClientsAuthTimeout is the time in seconds that the NATS server will
// allow to clients to send their auth credentials.
ClientsAuthTimeout int `json:"clientsAuthTimeout,omitempty"`
Expand Down
1 change: 1 addition & 0 deletions pkg/conf/natsconf.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ type AuthorizationConfig struct {
Timeout int `json:"timeout,omitempty"`
Users []*User `json:"users,omitempty"`
DefaultPermissions *Permissions `json:"default_permissions,omitempty"`
Include string `json:"include,omitempty"`
}

type User struct {
Expand Down
22 changes: 13 additions & 9 deletions pkg/util/kubernetes/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,11 @@ func addAuthConfig(
break
}
return nil
} else if cs.Auth.ClientsAuthFile != "" {
sconfig.Authorization = &natsconf.AuthorizationConfig{
Include: cs.Auth.ClientsAuthFile,
}
return nil
}
return nil
}
Expand Down Expand Up @@ -426,10 +431,7 @@ func CreateConfigSecret(kubecli corev1client.CoreV1Interface, operatorcli natsal
}

// FIXME: Quoted "include" causes include to be ignored.
// Remove once using NATS v2.0 as the default container image.
if cluster.Pod != nil && cluster.Pod.AdvertiseExternalIP {
rawConfig = bytes.Replace(rawConfig, []byte(`"include":`), []byte("include "), 1)
}
rawConfig = bytes.Replace(rawConfig, []byte(`"include":`), []byte("include "), -1)

labels := LabelsForCluster(clusterName)
cm := &v1.Secret{
Expand Down Expand Up @@ -536,10 +538,7 @@ func UpdateConfigSecret(
}

// FIXME: Quoted "include" causes include to be ignored.
// Remove once using NATS v2.0 as the default container image.
if cluster.Pod != nil && cluster.Pod.AdvertiseExternalIP {
rawConfig = bytes.Replace(rawConfig, []byte(`"include":`), []byte("include "), 1)
}
rawConfig = bytes.Replace(rawConfig, []byte(`"include":`), []byte("include "), -1)

cm, err := kubecli.Secrets(ns).Get(clusterName, metav1.GetOptions{})
if err != nil {
Expand Down Expand Up @@ -840,7 +839,12 @@ func NewNatsPodSpec(namespace, name, clusterName string, cs v1alpha2.ClusterSpec
imagePullPolicy = cs.Pod.ReloaderImagePullPolicy
}

reloaderContainer := natsPodReloaderContainer(image, imageTag, imagePullPolicy)
authFilePath := ""
if cs.Auth != nil {
authFilePath = cs.Auth.ClientsAuthFile
}

reloaderContainer := natsPodReloaderContainer(image, imageTag, imagePullPolicy, authFilePath)
reloaderContainer.VolumeMounts = volumeMounts
containers = append(containers, reloaderContainer)
}
Expand Down
13 changes: 10 additions & 3 deletions pkg/util/kubernetes/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,10 @@ import (
"context"
"encoding/json"
"fmt"
"path/filepath"

appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
Expand Down Expand Up @@ -80,8 +81,8 @@ func natsPodContainer(clusterName, version string, serverImage string, enableCli
}

// natsPodReloaderContainer returns a NATS server pod container spec for configuration reloader.
func natsPodReloaderContainer(image, tag, pullPolicy string) v1.Container {
return v1.Container{
func natsPodReloaderContainer(image, tag, pullPolicy, authFilePath string) v1.Container {
container := v1.Container{
Name: "reloader",
Image: fmt.Sprintf("%s:%s", image, tag),
ImagePullPolicy: v1.PullPolicy(pullPolicy),
Expand All @@ -93,6 +94,12 @@ func natsPodReloaderContainer(image, tag, pullPolicy string) v1.Container {
constants.PidFilePath,
},
}
if authFilePath != "" {
// The volume is mounted as a subdirectory under the NATS config.
af := filepath.Join(constants.ConfigMapMountPath, authFilePath)
container.Command = append(container.Command, "-config", af)
}
return container
}

// natsPodMetricsContainer returns a NATS server pod container spec for prometheus metrics exporter.
Expand Down
120 changes: 102 additions & 18 deletions test/e2e/config_reload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,17 @@
package e2e

import (
"bytes"
"context"
"encoding/json"
"strings"
"testing"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"

"github.com/nats-io/go-nats"
nats "github.com/nats-io/go-nats"
natsv1alpha2 "github.com/nats-io/nats-operator/pkg/apis/nats/v1alpha2"
"github.com/nats-io/nats-operator/pkg/conf"
natsconf "github.com/nats-io/nats-operator/pkg/conf"
"github.com/nats-io/nats-operator/pkg/util/kubernetes"
"github.com/nats-io/nats-operator/test/e2e/framework"
)
Expand Down Expand Up @@ -101,13 +102,78 @@ func TestConfigReloadOnClientAuthSecretChange(t *testing.T) {
// Skip the test if "ShareProcessNamespace" is not enabled.
f.Require(t, framework.ShareProcessNamespace)

// Create a NatsCluster resource with a single member, having configuration reloading enabled and using the secret above for client authentication.
ConfigReloadTestHelper(t, func(natsCluster *natsv1alpha2.NatsCluster, cas *v1.Secret) {
natsCluster.Spec.Auth = &natsv1alpha2.AuthConfig{
// Use the secret created above for client authentication.
ClientsAuthSecret: cas.Name,
}
natsCluster.Spec.Pod = &natsv1alpha2.PodPolicy{
// Enable configuration reloading.
EnableConfigReload: true,
}
})
}

// TestConfigReloadOnClientAuthSecretChange creates a secret containing authentication data for a NATS cluster.
// This secret initially contains two users ("user-1" and "user-2") and the corresponding password.
// Then, the test creates a NatsCluster resource that uses this secret for authentication, and makes sure that "user-1" can connect to the NATS cluster.
// Finally, it removes the entry that corresponds to "user-1" from the authentication secret, and makes sure that "user-1" cannot connect to the NATS cluster anymore.
func TestConfigReloadOnClientAuthFileChange(t *testing.T) {
// Skip the test if "ShareProcessNamespace" is not enabled.
f.Require(t, framework.ShareProcessNamespace)

ConfigReloadTestHelper(t, func(natsCluster *natsv1alpha2.NatsCluster, cas *v1.Secret) {
natsCluster.Spec.Auth = &natsv1alpha2.AuthConfig{
// Use the secret created above for client authentication.
ClientsAuthFile: "authconfig/auth.json",
}
natsCluster.Spec.Pod = &natsv1alpha2.PodPolicy{
// Enable configuration reloading.
EnableConfigReload: true,
ReloaderImage: "wallyqs/nats-server-config-reloader",
ReloaderImageTag: "0.4.5-v1alpha2",
ReloaderImagePullPolicy: "Always",
VolumeMounts: []v1.VolumeMount{
v1.VolumeMount{
Name: "authconfig",
MountPath: "/etc/nats-config/authconfig",
},
},
}
natsCluster.Spec.PodTemplate = &v1.PodTemplateSpec{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
v1.Volume{
Name: "authconfig",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: cas.Name,
Items: []v1.KeyToPath{
v1.KeyToPath{
Key: "data",
Path: "auth.json",
},
},
},
},
},
},
},
}
})
}

type NatsClusterCustomizerWSecret func(natsCluster *natsv1alpha2.NatsCluster, cas *v1.Secret)

func ConfigReloadTestHelper(t *testing.T, customizer NatsClusterCustomizerWSecret) {
var (
username1 = "user-1"
username2 = "user-2"
password1 = "pass-1"
password2 = "pass-2"
size = 1
version = "1.3.0"
version = "1.4.0"
)

var (
Expand Down Expand Up @@ -148,11 +214,24 @@ func TestConfigReloadOnClientAuthSecretChange(t *testing.T) {
},
},
}
// Serialize the object containing authentication data.
if d, err = json.Marshal(auth); err != nil {
// Serialize the object containing authentication data,
// we are using wildcard so need to unescape the HTML
// which the JSON encoder does by default...
buf := &bytes.Buffer{}
encoder := json.NewEncoder(buf)
encoder.SetEscapeHTML(false)
err = encoder.Encode(auth)
if err != nil {
t.Fatal(err)
}
buf2 := &bytes.Buffer{}
err = json.Indent(buf2, buf.Bytes(), "", " ")
if err != nil {
t.Fatal(err)
}

// Create a secret containing authentication data.
d = buf2.Bytes()
if cas, err = f.CreateSecret(f.Namespace, "data", d); err != nil {
t.Fatal(err)
}
Expand All @@ -164,16 +243,10 @@ func TestConfigReloadOnClientAuthSecretChange(t *testing.T) {
}()

// Create a NatsCluster resource with a single member, having configuration reloading enabled and using the secret above for client authentication.
natsCluster, err = f.CreateCluster(f.Namespace, "test-nats-", size, version, func(natsCluster *natsv1alpha2.NatsCluster) {
natsCluster.Spec.Auth = &natsv1alpha2.AuthConfig{
// Use the secret created above for client authentication.
ClientsAuthSecret: cas.Name,
}
natsCluster.Spec.Pod = &natsv1alpha2.PodPolicy{
// Enable configuration reloading.
EnableConfigReload: true,
}
natsCluster, err = f.CreateCluster(f.Namespace, "test-nats-reload-", size, version, func(natsCluster *natsv1alpha2.NatsCluster) {
customizer(natsCluster, cas)
})

if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -201,12 +274,23 @@ func TestConfigReloadOnClientAuthSecretChange(t *testing.T) {

// Remove "user1" from the list of allowed users.
auth.Users = auth.Users[1:]
// Serialize the object containing authentication data.
if d, err = json.Marshal(auth); err != nil {

// Serialize the object containing authentication data again.
buf = &bytes.Buffer{}
encoder = json.NewEncoder(buf)
encoder.SetEscapeHTML(false)
err = encoder.Encode(auth)
if err != nil {
t.Fatal(err)
}
buf2 = &bytes.Buffer{}
err = json.Indent(buf2, buf.Bytes(), "", " ")
if err != nil {
t.Fatal(err)
}

// Update the client authentication secret with the new contents.
cas.Data["data"] = d
cas.Data["data"] = buf2.Bytes()
if cas, err = f.PatchSecret(cas); err != nil {
t.Fatal(err)
}
Expand Down
11 changes: 6 additions & 5 deletions test/e2e/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,14 @@ import (
natsv1alpha2 "github.com/nats-io/nats-operator/pkg/apis/nats/v1alpha2"
)

// TestUpgradeCluster creates a NatsCluster resource with version 1.2.0 and waits for the full mesh to be formed.
// Then, it updates the ".spec.version" field of the NatsCluster resource to 1.3.0 and waits for the upgrade to be performed.
// TestUpgradeCluster creates a NatsCluster resource with version
// 1.3.0 and waits for the full mesh to be formed. Then, it updates
// the ".spec.version" field of the NatsCluster resource to 1.4.0 and
// waits for the upgrade to be performed.
func TestUpgradeCluster(t *testing.T) {
var (
initialVersion = "1.2.0"
finalVersion = "1.3.0"
initialVersion = "1.3.0"
finalVersion = "1.4.0"
size = 2
)

Expand All @@ -55,7 +57,6 @@ func TestUpgradeCluster(t *testing.T) {
t.Fatal(err)
}

// Upgrade the cluster's version to 1.3.0.
natsCluster.Spec.Version = finalVersion
if natsCluster, err = f.PatchCluster(natsCluster); err != nil {
t.Fatal(err)
Expand Down
6 changes: 4 additions & 2 deletions test/reloader/reloader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,9 @@ func TestReloader(t *testing.T) {
t.Fatal(err)
}
// We should have gotten only one signal for each configuration file
if signals != len(configFiles) {
t.Fatalf("Wrong number of signals received.")
got := signals
expected := len(configFiles)
if got != expected {
t.Fatalf("Wrong number of signals received. Expected: %v, got: %v", expected, got)
}
}