From cf90a617b8f405b5717ce24d2b44328911d95802 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Wed, 15 Feb 2023 22:35:36 +0000 Subject: [PATCH 01/17] feat: allow cert-manager annotations on ingress based on environment variables --- cluster/kube/client.go | 14 ++++++++++++++ cluster/kube/client_ingress.go | 16 ++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/cluster/kube/client.go b/cluster/kube/client.go index 1369a6e0..687d328a 100644 --- a/cluster/kube/client.go +++ b/cluster/kube/client.go @@ -3,6 +3,7 @@ package kube import ( "context" "fmt" + "os" "strings" "github.com/pkg/errors" @@ -56,6 +57,7 @@ type client struct { ns string log log.Logger kubeContentConfig *restclient.Config + env map[string]string } func (c *client) String() string { @@ -98,6 +100,7 @@ func NewClient(ctx context.Context, log log.Logger, ns string, configPath string ns: ns, log: log.With("client", "kube"), kubeContentConfig: config, + env: environmentVariablesToMap(), }, nil } @@ -767,3 +770,14 @@ func (c *client) deploymentsForLease(ctx context.Context, lid mtypes.LeaseID) (m func (c *client) KubeVersion() (*version.Info, error) { return c.kc.Discovery().ServerVersion() } + +func environmentVariablesToMap() map[string]string { + m := make(map[string]string) + for _, e := range os.Environ() { + if i := strings.Index(e, "="); i >= 0 { + m[e[:i]] = e[i+1:] + } + } + + return m +} diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index e4adecac..d6e5691a 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -26,7 +26,7 @@ const ( akashIngressClassName = "akash-ingress-class" ) -func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDirective) map[string]string { +func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDirective, env map[string]string) map[string]string { // For kubernetes/ingress-nginx // https://github.com/kubernetes/ingress-nginx const root = "nginx.ingress.kubernetes.io" @@ -66,6 +66,12 @@ func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDir } } + if env["AKASH_PROVIDER_ISSUER_TYPE"] == "cluster-issuer" { + result["cert-manager.io/cluster-issuer"] = env["AKASH_PROVIDER_ISSUER_NAME"] + } else if env["AKASH_PROVIDER_ISSUER_TYPE"] == "issuer" { + result["cert-manager.io/issuer"] = env["AKASH_PROVIDER_ISSUER_NAME"] + } + result[fmt.Sprintf("%s/proxy-next-upstream", root)] = strBuilder.String() return result } @@ -87,11 +93,17 @@ func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctyp ObjectMeta: metav1.ObjectMeta{ Name: ingressName, Labels: labels, - Annotations: kubeNginxIngressAnnotations(directive), + Annotations: kubeNginxIngressAnnotations(directive, c.env), }, Spec: netv1.IngressSpec{ IngressClassName: &ingressClassName, Rules: rules, + TLS: []netv1.IngressTLS{ + { + Hosts: []string{directive.Hostname}, + SecretName: fmt.Sprintf("%s-tls", ingressName), + }, + }, }, } From c2f8518e6ade793923262f74ffdabae63ee32f38 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Wed, 22 Feb 2023 19:16:22 +0000 Subject: [PATCH 02/17] refactor: improve code --- cluster/kube/client.go | 15 ++------------- cluster/kube/client_ingress.go | 5 +++-- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/cluster/kube/client.go b/cluster/kube/client.go index 687d328a..8eb2604d 100644 --- a/cluster/kube/client.go +++ b/cluster/kube/client.go @@ -3,7 +3,7 @@ package kube import ( "context" "fmt" - "os" + "github.com/akash-network/provider/cluster/util" "strings" "github.com/pkg/errors" @@ -100,7 +100,7 @@ func NewClient(ctx context.Context, log log.Logger, ns string, configPath string ns: ns, log: log.With("client", "kube"), kubeContentConfig: config, - env: environmentVariablesToMap(), + env: util.EnvironmentVariablesToMap(), }, nil } @@ -770,14 +770,3 @@ func (c *client) deploymentsForLease(ctx context.Context, lid mtypes.LeaseID) (m func (c *client) KubeVersion() (*version.Info, error) { return c.kc.Discovery().ServerVersion() } - -func environmentVariablesToMap() map[string]string { - m := make(map[string]string) - for _, e := range os.Environ() { - if i := strings.Index(e, "="); i >= 0 { - m[e[:i]] = e[i+1:] - } - } - - return m -} diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index d6e5691a..8a3ec562 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -30,6 +30,7 @@ func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDir // For kubernetes/ingress-nginx // https://github.com/kubernetes/ingress-nginx const root = "nginx.ingress.kubernetes.io" + const certManager = "cert-manager.io" readTimeout := math.Ceil(float64(directive.ReadTimeout) / 1000.0) sendTimeout := math.Ceil(float64(directive.SendTimeout) / 1000.0) @@ -67,9 +68,9 @@ func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDir } if env["AKASH_PROVIDER_ISSUER_TYPE"] == "cluster-issuer" { - result["cert-manager.io/cluster-issuer"] = env["AKASH_PROVIDER_ISSUER_NAME"] + result[fmt.Sprintf("%s/cluster-issuer", certManager)] = env["AKASH_PROVIDER_ISSUER_NAME"] } else if env["AKASH_PROVIDER_ISSUER_TYPE"] == "issuer" { - result["cert-manager.io/issuer"] = env["AKASH_PROVIDER_ISSUER_NAME"] + result[fmt.Sprintf("%s/issuer", certManager)] = env["AKASH_PROVIDER_ISSUER_NAME"] } result[fmt.Sprintf("%s/proxy-next-upstream", root)] = strBuilder.String() From 99734339d9237d99b9859b5c22eb22ac11917b8b Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Wed, 22 Feb 2023 19:16:37 +0000 Subject: [PATCH 03/17] refactor: improve code --- cluster/util/environment.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 cluster/util/environment.go diff --git a/cluster/util/environment.go b/cluster/util/environment.go new file mode 100644 index 00000000..292e28e2 --- /dev/null +++ b/cluster/util/environment.go @@ -0,0 +1,17 @@ +package util + +import ( + "os" + "strings" +) + +func EnvironmentVariablesToMap() map[string]string { + m := make(map[string]string) + for _, e := range os.Environ() { + if i := strings.Index(e, "="); i >= 0 { + m[e[:i]] = e[i+1:] + } + } + + return m +} From 53ae03567302bab0cfdd93fb930474cd8ef19a59 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Mon, 27 Feb 2023 18:01:05 +0000 Subject: [PATCH 04/17] refactor: improve code --- cluster/kube/client_ingress.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index 8a3ec562..b1c3e14d 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -67,10 +67,13 @@ func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDir } } - if env["AKASH_PROVIDER_ISSUER_TYPE"] == "cluster-issuer" { + switch env["AKASH_PROVIDER_ISSUER_TYPE"] { + case "cluster-issuer": result[fmt.Sprintf("%s/cluster-issuer", certManager)] = env["AKASH_PROVIDER_ISSUER_NAME"] - } else if env["AKASH_PROVIDER_ISSUER_TYPE"] == "issuer" { + break + case "issuer": result[fmt.Sprintf("%s/issuer", certManager)] = env["AKASH_PROVIDER_ISSUER_NAME"] + break } result[fmt.Sprintf("%s/proxy-next-upstream", root)] = strBuilder.String() From 4a36fc622a81c00fd9b35ce37bc17dd2ff273319 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Mon, 27 Feb 2023 18:19:31 +0000 Subject: [PATCH 05/17] fix: pre-allocate environment variables map --- cluster/util/environment.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/util/environment.go b/cluster/util/environment.go index 292e28e2..0d11b118 100644 --- a/cluster/util/environment.go +++ b/cluster/util/environment.go @@ -6,7 +6,7 @@ import ( ) func EnvironmentVariablesToMap() map[string]string { - m := make(map[string]string) + m := make(map[string]string, len(os.Environ())) for _, e := range os.Environ() { if i := strings.Index(e, "="); i >= 0 { m[e[:i]] = e[i+1:] From 9c32db5501766c1bcdab2778ce6e9d30a1455e23 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Thu, 13 Apr 2023 21:53:36 +0100 Subject: [PATCH 06/17] fix: disable ingress by default and omit tls rules on ingress if disabled --- cluster/client.go | 4 ++-- cluster/kube/client_ingress.go | 19 ++++++++++++------- .../hostnameoperator/hostname_operator.go | 7 +++++-- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/cluster/client.go b/cluster/client.go index 9fd075b0..4d47021f 100644 --- a/cluster/client.go +++ b/cluster/client.go @@ -82,7 +82,7 @@ type Client interface { tsq remotecommand.TerminalSizeQueue) (ctypes.ExecResult, error) // ConnectHostnameToDeployment Connect a given hostname to a deployment - ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective) error + ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective, tlsEnabled bool) error // RemoveHostnameFromDeployment Remove a given hostname from a deployment RemoveHostnameFromDeployment(ctx context.Context, hostname string, leaseID mtypes.LeaseID, allowMissing bool) error @@ -415,7 +415,7 @@ func (c *nullClient) GetHostnameDeploymentConnections(_ context.Context) ([]ctyp return nil, errNotImplemented } -func (c *nullClient) ConnectHostnameToDeployment(_ context.Context, _ ctypes.ConnectHostnameToDeploymentDirective) error { +func (c *nullClient) ConnectHostnameToDeployment(_ context.Context, _ ctypes.ConnectHostnameToDeploymentDirective, _ bool) error { return errNotImplemented } diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index b1c3e14d..69e293bd 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -80,7 +80,7 @@ func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDir return result } -func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective) error { +func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective, tlsEnabled bool) error { ingressName := directive.Hostname ns := builder.LidNS(directive.LeaseID) rules := ingressRules(directive.Hostname, directive.ServiceName, directive.ServicePort) @@ -92,6 +92,16 @@ func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctyp labels[builder.AkashManagedLabelName] = "true" builder.AppendLeaseLabels(directive.LeaseID, labels) + var tls []netv1.IngressTLS + if tlsEnabled { + tls = []netv1.IngressTLS{ + { + Hosts: []string{directive.Hostname}, + SecretName: fmt.Sprintf("%s-tls", ingressName), + }, + } + } + ingressClassName := akashIngressClassName obj := &netv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ @@ -102,12 +112,7 @@ func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctyp Spec: netv1.IngressSpec{ IngressClassName: &ingressClassName, Rules: rules, - TLS: []netv1.IngressTLS{ - { - Hosts: []string{directive.Hostname}, - SecretName: fmt.Sprintf("%s-tls", ingressName), - }, - }, + TLS: tls, }, } diff --git a/operator/hostnameoperator/hostname_operator.go b/operator/hostnameoperator/hostname_operator.go index f0b6eee3..4822a5c8 100644 --- a/operator/hostnameoperator/hostname_operator.go +++ b/operator/hostnameoperator/hostname_operator.go @@ -47,6 +47,8 @@ type hostnameOperator struct { flagHostnamesData operatorcommon.PrepareFlagFn flagIgnoreListData operatorcommon.PrepareFlagFn + + env map[string]string } func (op *hostnameOperator) run(parentCtx context.Context) error { @@ -389,7 +391,7 @@ func (op *hostnameOperator) applyAddOrUpdateEvent(ctx context.Context, ev ctypes if shouldConnect { op.log.Debug("Updating ingress") // Update or create the existing ingress - err = op.client.ConnectHostnameToDeployment(ctx, directive) + err = op.client.ConnectHostnameToDeployment(ctx, directive, op.env["AKASH_SSL_ENABLED"] != "") } } else { op.log.Debug("Swapping ingress to new deployment") @@ -398,7 +400,7 @@ func (op *hostnameOperator) applyAddOrUpdateEvent(ctx context.Context, ev ctypes if err == nil { // Remove the current entry, if the next action succeeds then it gets inserted below delete(op.hostnames, ev.GetHostname()) - err = op.client.ConnectHostnameToDeployment(ctx, directive) + err = op.client.ConnectHostnameToDeployment(ctx, directive, op.env["AKASH_SSL_ENABLED"] != "") } } @@ -427,6 +429,7 @@ func newHostnameOperator(logger log.Logger, client cluster.Client, config operat cfg: config, server: opHTTP, leasesIgnored: operatorcommon.NewIgnoreList(ilc), + env: clusterutil.EnvironmentVariablesToMap(), } op.flagIgnoreListData = op.server.AddPreparedEndpoint("/ignore-list", op.prepareIgnoreListData) From 2f9f0687836dfa2c1cd6305ca54b234d712e220e Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Thu, 13 Apr 2023 22:15:11 +0100 Subject: [PATCH 07/17] fix: implement interface on mock client --- cluster/mocks/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/mocks/client.go b/cluster/mocks/client.go index 784da12c..8273c286 100644 --- a/cluster/mocks/client.go +++ b/cluster/mocks/client.go @@ -90,7 +90,7 @@ func (_c *Client_AllHostnames_Call) RunAndReturn(run func(context.Context) ([]v1 } // ConnectHostnameToDeployment provides a mock function with given fields: ctx, directive -func (_m *Client) ConnectHostnameToDeployment(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective) error { +func (_m *Client) ConnectHostnameToDeployment(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective, tlsEnabled bool) error { ret := _m.Called(ctx, directive) var r0 error From 17eb4248ffd36ab4a65a539fd511235ae21a84f2 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Fri, 14 Apr 2023 17:42:39 +0100 Subject: [PATCH 08/17] refactor: Apply suggestions on #86 --- cluster/kube/client_ingress.go | 14 +++++++------- cluster/mocks/client.go | 19 ++++++++++--------- .../hostname_operator_test.go | 8 ++++---- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index 69e293bd..ffe4d0dd 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -24,13 +24,13 @@ import ( const ( akashIngressClassName = "akash-ingress-class" + root = "nginx.ingress.kubernetes.io" + certManager = "cert-manager.io" ) -func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDirective, env map[string]string) map[string]string { +func (c *client) kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDirective) map[string]string { // For kubernetes/ingress-nginx // https://github.com/kubernetes/ingress-nginx - const root = "nginx.ingress.kubernetes.io" - const certManager = "cert-manager.io" readTimeout := math.Ceil(float64(directive.ReadTimeout) / 1000.0) sendTimeout := math.Ceil(float64(directive.SendTimeout) / 1000.0) @@ -67,12 +67,12 @@ func kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToDeploymentDir } } - switch env["AKASH_PROVIDER_ISSUER_TYPE"] { + switch c.env["AKASH_PROVIDER_ISSUER_TYPE"] { case "cluster-issuer": - result[fmt.Sprintf("%s/cluster-issuer", certManager)] = env["AKASH_PROVIDER_ISSUER_NAME"] + result[fmt.Sprintf("%s/cluster-issuer", certManager)] = c.env["AKASH_PROVIDER_ISSUER_NAME"] break case "issuer": - result[fmt.Sprintf("%s/issuer", certManager)] = env["AKASH_PROVIDER_ISSUER_NAME"] + result[fmt.Sprintf("%s/issuer", certManager)] = c.env["AKASH_PROVIDER_ISSUER_NAME"] break } @@ -107,7 +107,7 @@ func (c *client) ConnectHostnameToDeployment(ctx context.Context, directive ctyp ObjectMeta: metav1.ObjectMeta{ Name: ingressName, Labels: labels, - Annotations: kubeNginxIngressAnnotations(directive, c.env), + Annotations: c.kubeNginxIngressAnnotations(directive), }, Spec: netv1.IngressSpec{ IngressClassName: &ingressClassName, diff --git a/cluster/mocks/client.go b/cluster/mocks/client.go index 8273c286..3b82bc7a 100644 --- a/cluster/mocks/client.go +++ b/cluster/mocks/client.go @@ -89,13 +89,13 @@ func (_c *Client_AllHostnames_Call) RunAndReturn(run func(context.Context) ([]v1 return _c } -// ConnectHostnameToDeployment provides a mock function with given fields: ctx, directive +// ConnectHostnameToDeployment provides a mock function with given fields: ctx, directive, tlsEnabled func (_m *Client) ConnectHostnameToDeployment(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective, tlsEnabled bool) error { - ret := _m.Called(ctx, directive) + ret := _m.Called(ctx, directive, tlsEnabled) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective) error); ok { - r0 = rf(ctx, directive) + if rf, ok := ret.Get(0).(func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective, bool) error); ok { + r0 = rf(ctx, directive, tlsEnabled) } else { r0 = ret.Error(0) } @@ -111,13 +111,14 @@ type Client_ConnectHostnameToDeployment_Call struct { // ConnectHostnameToDeployment is a helper method to define mock.On call // - ctx context.Context // - directive v1beta3.ConnectHostnameToDeploymentDirective -func (_e *Client_Expecter) ConnectHostnameToDeployment(ctx interface{}, directive interface{}) *Client_ConnectHostnameToDeployment_Call { - return &Client_ConnectHostnameToDeployment_Call{Call: _e.mock.On("ConnectHostnameToDeployment", ctx, directive)} +// - tlsEnabled bool +func (_e *Client_Expecter) ConnectHostnameToDeployment(ctx interface{}, directive interface{}, tlsEnabled interface{}) *Client_ConnectHostnameToDeployment_Call { + return &Client_ConnectHostnameToDeployment_Call{Call: _e.mock.On("ConnectHostnameToDeployment", ctx, directive, tlsEnabled)} } -func (_c *Client_ConnectHostnameToDeployment_Call) Run(run func(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective)) *Client_ConnectHostnameToDeployment_Call { +func (_c *Client_ConnectHostnameToDeployment_Call) Run(run func(ctx context.Context, directive v1beta3.ConnectHostnameToDeploymentDirective, tlsEnabled bool)) *Client_ConnectHostnameToDeployment_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(v1beta3.ConnectHostnameToDeploymentDirective)) + run(args[0].(context.Context), args[1].(v1beta3.ConnectHostnameToDeploymentDirective), args[2].(bool)) }) return _c } @@ -127,7 +128,7 @@ func (_c *Client_ConnectHostnameToDeployment_Call) Return(_a0 error) *Client_Con return _c } -func (_c *Client_ConnectHostnameToDeployment_Call) RunAndReturn(run func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective) error) *Client_ConnectHostnameToDeployment_Call { +func (_c *Client_ConnectHostnameToDeployment_Call) RunAndReturn(run func(context.Context, v1beta3.ConnectHostnameToDeploymentDirective, bool) error) *Client_ConnectHostnameToDeployment_Call { _c.Call.Return(run) return _c } diff --git a/operator/hostnameoperator/hostname_operator_test.go b/operator/hostnameoperator/hostname_operator_test.go index 6e857401..e86047d0 100644 --- a/operator/hostnameoperator/hostname_operator_test.go +++ b/operator/hostnameoperator/hostname_operator_test.go @@ -424,7 +424,7 @@ func TestHostnameOperatorApplyAdd(t *testing.T) { } s.client.On("GetManifestGroup", mock.Anything, leaseID).Return(true, mg, nil) directive := buildDirective(ev, serviceExpose) // result tested in other unit tests - s.client.On("ConnectHostnameToDeployment", mock.Anything, directive).Return(nil) + s.client.On("ConnectHostnameToDeployment", mock.Anything, directive, mock.Anything).Return(nil) managed := grabManagedHostnames(t, s.op.server.GetRouter().ServeHTTP) require.Empty(t, managed) @@ -511,7 +511,7 @@ func TestHostnameOperatorApplyAddMultipleServices(t *testing.T) { } s.client.On("GetManifestGroup", mock.Anything, leaseID).Return(true, mg, nil) directive := buildDirective(ev, serviceExpose) // result tested in other unit tests - s.client.On("ConnectHostnameToDeployment", mock.Anything, directive).Return(nil) + s.client.On("ConnectHostnameToDeployment", mock.Anything, directive, mock.Anything).Return(nil) err := s.op.applyEvent(s.ctx, ev) require.NoError(t, err) @@ -596,9 +596,9 @@ func TestHostnameOperatorApplyUpdate(t *testing.T) { s.client.On("GetManifestGroup", mock.Anything, secondLeaseID).Return(true, mg2, nil) directive := buildDirective(ev, serviceExpose) // result tested in other unit tests - s.client.On("ConnectHostnameToDeployment", mock.Anything, directive).Return(nil) + s.client.On("ConnectHostnameToDeployment", mock.Anything, directive, mock.Anything).Return(nil) secondDirective := buildDirective(secondEv, secondServiceExpose) // result tested in other unit tests - s.client.On("ConnectHostnameToDeployment", mock.Anything, secondDirective).Return(nil) + s.client.On("ConnectHostnameToDeployment", mock.Anything, secondDirective, mock.Anything).Return(nil) s.client.On("RemoveHostnameFromDeployment", mock.Anything, hostname, leaseID, false).Return(nil) From 6cc3494a8228462499babe792c556b03bb9c4163 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Tue, 18 Apr 2023 22:33:49 +0200 Subject: [PATCH 09/17] refactor: use config struct instead of environment variables --- cluster/kube/client.go | 10 ++++-- cluster/kube/client_ingress.go | 10 +++--- cluster/kube/config.go | 37 ++++++++++++++++++++++ cluster/kube/config_test.go | 58 ++++++++++++++++++++++++++++++++++ 4 files changed, 107 insertions(+), 8 deletions(-) create mode 100644 cluster/kube/config.go create mode 100644 cluster/kube/config_test.go diff --git a/cluster/kube/client.go b/cluster/kube/client.go index 8eb2604d..b70bab7b 100644 --- a/cluster/kube/client.go +++ b/cluster/kube/client.go @@ -3,7 +3,6 @@ package kube import ( "context" "fmt" - "github.com/akash-network/provider/cluster/util" "strings" "github.com/pkg/errors" @@ -57,7 +56,7 @@ type client struct { ns string log log.Logger kubeContentConfig *restclient.Config - env map[string]string + cfg clientConfig } func (c *client) String() string { @@ -93,6 +92,11 @@ func NewClient(ctx context.Context, log log.Logger, ns string, configPath string return nil, errors.Wrap(err, "kube: error creating metrics client") } + ccfg, err := configFromEnv() + if err != nil { + return nil, errors.Wrap(err, "kube: error creating client configuration") + } + return &client{ kc: kc, ac: mc, @@ -100,7 +104,7 @@ func NewClient(ctx context.Context, log log.Logger, ns string, configPath string ns: ns, log: log.With("client", "kube"), kubeContentConfig: config, - env: util.EnvironmentVariablesToMap(), + cfg: *ccfg, }, nil } diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index ffe4d0dd..e2222e82 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -67,12 +67,12 @@ func (c *client) kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToD } } - switch c.env["AKASH_PROVIDER_ISSUER_TYPE"] { - case "cluster-issuer": - result[fmt.Sprintf("%s/cluster-issuer", certManager)] = c.env["AKASH_PROVIDER_ISSUER_NAME"] + switch c.cfg.issuerType { + case clusterIssuer: + result[fmt.Sprintf("%s/cluster-issuer", certManager)] = c.cfg.issuerName break - case "issuer": - result[fmt.Sprintf("%s/issuer", certManager)] = c.env["AKASH_PROVIDER_ISSUER_NAME"] + case issuer: + result[fmt.Sprintf("%s/issuer", certManager)] = c.cfg.issuerName break } diff --git a/cluster/kube/config.go b/cluster/kube/config.go new file mode 100644 index 00000000..3762f58a --- /dev/null +++ b/cluster/kube/config.go @@ -0,0 +1,37 @@ +package kube + +import ( + "errors" + "fmt" + "os" +) + +const ( + issuer = "issuer" + clusterIssuer = "cluster-issuer" + akashProviderIssuerTypeStr = "AKASH_PROVIDER_ISSUER_TYPE" + akashProviderIssuerNameStr = "AKASH_PROVIDER_ISSUER_NAME" +) + +type clientConfig struct { + issuerType string + issuerName string +} + +// configFromEnv creates a new clientConfig from environment variables. +func configFromEnv() (*clientConfig, error) { + issuerType, ok := os.LookupEnv(akashProviderIssuerTypeStr) + if !ok || (issuerType != issuer && issuerType != clusterIssuer) { + return nil, errors.New(fmt.Sprintf("Invalid value for %s: %s", akashProviderIssuerTypeStr, issuerType)) + } + + issuerName, ok := os.LookupEnv(akashProviderIssuerNameStr) + if !ok { + return nil, errors.New(fmt.Sprintf("Value for %s not set", akashProviderIssuerNameStr)) + } + + return &clientConfig{ + issuerType: issuerType, + issuerName: issuerName, + }, nil +} diff --git a/cluster/kube/config_test.go b/cluster/kube/config_test.go new file mode 100644 index 00000000..8adea8c4 --- /dev/null +++ b/cluster/kube/config_test.go @@ -0,0 +1,58 @@ +package kube + +import ( + "os" + "testing" +) + +func TestConfigFromEnv(t *testing.T) { + t.Run("should create if environment variables are set correctly", func(t *testing.T) { + os.Setenv(akashProviderIssuerTypeStr, "cluster-issuer") + os.Setenv(akashProviderIssuerNameStr, "letsencrypt") + ccfg, err := configFromEnv() + + if err != nil { + t.Fatalf("Did not expect an error: %s", err) + } + + if ccfg.issuerType != "cluster-issuer" { + t.Errorf("Expected cluster-issuer, got %s", ccfg.issuerType) + } + + if ccfg.issuerName != "letsencrypt" { + t.Errorf("Expected letsencrypt, got %s", ccfg.issuerName) + } + }) + + t.Run("should return error if type not set", func(t *testing.T) { + os.Clearenv() + os.Setenv(akashProviderIssuerNameStr, "letsencrypt") + _, err := configFromEnv() + + if err == nil { + t.Fatalf("Expected an error") + } + }) + + t.Run("should return error if name not set", func(t *testing.T) { + os.Clearenv() + os.Setenv(akashProviderIssuerTypeStr, "cluster-issuer") + _, err := configFromEnv() + + if err == nil { + t.Fatalf("Expected an error") + } + }) + + t.Run("should return error if type is invalid", func(t *testing.T) { + os.Clearenv() + os.Setenv(akashProviderIssuerTypeStr, "fake-issuer-type") + os.Setenv(akashProviderIssuerNameStr, "letsencrypt") + + _, err := configFromEnv() + + if err == nil { + t.Fatalf("Expected an error") + } + }) +} From 73a34d53f1d40b31096f2e3f1e88f1ebb439c114 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Tue, 18 Apr 2023 23:56:10 +0200 Subject: [PATCH 10/17] fix: pipeline e2e and lint failures --- cluster/kube/client_ingress.go | 6 +++--- cluster/kube/config.go | 35 ++++++++++++++++++++++++---------- cluster/kube/config_test.go | 14 ++++++++++---- 3 files changed, 38 insertions(+), 17 deletions(-) diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index e2222e82..f0fc795e 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -67,12 +67,12 @@ func (c *client) kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToD } } - switch c.cfg.issuerType { + switch c.cfg.ssl.issuerType { case clusterIssuer: - result[fmt.Sprintf("%s/cluster-issuer", certManager)] = c.cfg.issuerName + result[fmt.Sprintf("%s/cluster-issuer", certManager)] = c.cfg.ssl.issuerName break case issuer: - result[fmt.Sprintf("%s/issuer", certManager)] = c.cfg.issuerName + result[fmt.Sprintf("%s/issuer", certManager)] = c.cfg.ssl.issuerName break } diff --git a/cluster/kube/config.go b/cluster/kube/config.go index 3762f58a..f1e56a1c 100644 --- a/cluster/kube/config.go +++ b/cluster/kube/config.go @@ -1,8 +1,9 @@ package kube import ( - "errors" "fmt" + kubeclienterrors "github.com/akash-network/provider/cluster/kube/errors" + "github.com/pkg/errors" "os" ) @@ -11,27 +12,41 @@ const ( clusterIssuer = "cluster-issuer" akashProviderIssuerTypeStr = "AKASH_PROVIDER_ISSUER_TYPE" akashProviderIssuerNameStr = "AKASH_PROVIDER_ISSUER_NAME" + akashProviderSslEnabledStr = "AKASH_PROVIDER_SSL_ENABLED" ) type clientConfig struct { + ssl ssl +} + +type ssl struct { issuerType string issuerName string } // configFromEnv creates a new clientConfig from environment variables. func configFromEnv() (*clientConfig, error) { - issuerType, ok := os.LookupEnv(akashProviderIssuerTypeStr) - if !ok || (issuerType != issuer && issuerType != clusterIssuer) { - return nil, errors.New(fmt.Sprintf("Invalid value for %s: %s", akashProviderIssuerTypeStr, issuerType)) - } + sslEnabled := os.Getenv(akashProviderSslEnabledStr) + var sslCfg ssl + + if sslEnabled != "" && sslEnabled != "0" { + issuerType, ok := os.LookupEnv(akashProviderIssuerTypeStr) + if !ok || (issuerType != issuer && issuerType != clusterIssuer) { + return nil, errors.Wrap(kubeclienterrors.ErrInternalError, fmt.Sprintf("Invalid value for %s: %s", akashProviderIssuerTypeStr, issuerType)) + } + + issuerName, ok := os.LookupEnv(akashProviderIssuerNameStr) + if !ok { + return nil, errors.Wrap(kubeclienterrors.ErrInternalError, fmt.Sprintf("Value for %s not set", akashProviderIssuerNameStr)) + } - issuerName, ok := os.LookupEnv(akashProviderIssuerNameStr) - if !ok { - return nil, errors.New(fmt.Sprintf("Value for %s not set", akashProviderIssuerNameStr)) + sslCfg = ssl{ + issuerType: issuerType, + issuerName: issuerName, + } } return &clientConfig{ - issuerType: issuerType, - issuerName: issuerName, + ssl: sslCfg, }, nil } diff --git a/cluster/kube/config_test.go b/cluster/kube/config_test.go index 8adea8c4..5a5bfbc4 100644 --- a/cluster/kube/config_test.go +++ b/cluster/kube/config_test.go @@ -9,24 +9,27 @@ func TestConfigFromEnv(t *testing.T) { t.Run("should create if environment variables are set correctly", func(t *testing.T) { os.Setenv(akashProviderIssuerTypeStr, "cluster-issuer") os.Setenv(akashProviderIssuerNameStr, "letsencrypt") + os.Setenv(akashProviderSslEnabledStr, "1") ccfg, err := configFromEnv() if err != nil { t.Fatalf("Did not expect an error: %s", err) } - if ccfg.issuerType != "cluster-issuer" { - t.Errorf("Expected cluster-issuer, got %s", ccfg.issuerType) + if ccfg.ssl.issuerType != "cluster-issuer" { + t.Errorf("Expected cluster-issuer, got %s", ccfg.ssl.issuerType) } - if ccfg.issuerName != "letsencrypt" { - t.Errorf("Expected letsencrypt, got %s", ccfg.issuerName) + if ccfg.ssl.issuerName != "letsencrypt" { + t.Errorf("Expected letsencrypt, got %s", ccfg.ssl.issuerName) } }) t.Run("should return error if type not set", func(t *testing.T) { os.Clearenv() os.Setenv(akashProviderIssuerNameStr, "letsencrypt") + os.Setenv(akashProviderSslEnabledStr, "1") + _, err := configFromEnv() if err == nil { @@ -37,6 +40,8 @@ func TestConfigFromEnv(t *testing.T) { t.Run("should return error if name not set", func(t *testing.T) { os.Clearenv() os.Setenv(akashProviderIssuerTypeStr, "cluster-issuer") + os.Setenv(akashProviderSslEnabledStr, "1") + _, err := configFromEnv() if err == nil { @@ -48,6 +53,7 @@ func TestConfigFromEnv(t *testing.T) { os.Clearenv() os.Setenv(akashProviderIssuerTypeStr, "fake-issuer-type") os.Setenv(akashProviderIssuerNameStr, "letsencrypt") + os.Setenv(akashProviderSslEnabledStr, "1") _, err := configFromEnv() From 3ba2fbef1df66819045e5623ca86a95bc7e17d22 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Wed, 19 Apr 2023 22:53:02 +0200 Subject: [PATCH 11/17] refactor: use viper for client configuration --- cluster/kube/client.go | 11 +--- cluster/kube/config.go | 51 ++------------- cluster/kube/config_test.go | 64 ------------------- cmd/provider-services/cmd/run.go | 19 +++++- .../hostnameoperator/hostname_operator.go | 3 +- operator/ipoperator/ip_operator.go | 8 +-- operator/operatorcommon/operator_config.go | 13 ++++ 7 files changed, 46 insertions(+), 123 deletions(-) delete mode 100644 cluster/kube/config_test.go diff --git a/cluster/kube/client.go b/cluster/kube/client.go index b70bab7b..95a3cfd0 100644 --- a/cluster/kube/client.go +++ b/cluster/kube/client.go @@ -56,7 +56,7 @@ type client struct { ns string log log.Logger kubeContentConfig *restclient.Config - cfg clientConfig + cfg ClientConfig } func (c *client) String() string { @@ -65,7 +65,7 @@ func (c *client) String() string { // NewClient returns new Kubernetes Client instance with provided logger, host and ns. Returns error in-case of failure // configPath may be the empty string -func NewClient(ctx context.Context, log log.Logger, ns string, configPath string) (Client, error) { +func NewClient(ctx context.Context, log log.Logger, ns string, configPath string, ccfg ClientConfig) (Client, error) { config, err := clientcommon.OpenKubeConfig(configPath, log) if err != nil { return nil, errors.Wrap(err, "kube: error building config flags") @@ -92,11 +92,6 @@ func NewClient(ctx context.Context, log log.Logger, ns string, configPath string return nil, errors.Wrap(err, "kube: error creating metrics client") } - ccfg, err := configFromEnv() - if err != nil { - return nil, errors.Wrap(err, "kube: error creating client configuration") - } - return &client{ kc: kc, ac: mc, @@ -104,7 +99,7 @@ func NewClient(ctx context.Context, log log.Logger, ns string, configPath string ns: ns, log: log.With("client", "kube"), kubeContentConfig: config, - cfg: *ccfg, + cfg: ccfg, }, nil } diff --git a/cluster/kube/config.go b/cluster/kube/config.go index f1e56a1c..d254daaf 100644 --- a/cluster/kube/config.go +++ b/cluster/kube/config.go @@ -1,52 +1,15 @@ package kube -import ( - "fmt" - kubeclienterrors "github.com/akash-network/provider/cluster/kube/errors" - "github.com/pkg/errors" - "os" -) - const ( - issuer = "issuer" - clusterIssuer = "cluster-issuer" - akashProviderIssuerTypeStr = "AKASH_PROVIDER_ISSUER_TYPE" - akashProviderIssuerNameStr = "AKASH_PROVIDER_ISSUER_NAME" - akashProviderSslEnabledStr = "AKASH_PROVIDER_SSL_ENABLED" + issuer = "issuer" + clusterIssuer = "cluster-issuer" ) -type clientConfig struct { - ssl ssl -} - -type ssl struct { - issuerType string - issuerName string +type ClientConfig struct { + Ssl Ssl } -// configFromEnv creates a new clientConfig from environment variables. -func configFromEnv() (*clientConfig, error) { - sslEnabled := os.Getenv(akashProviderSslEnabledStr) - var sslCfg ssl - - if sslEnabled != "" && sslEnabled != "0" { - issuerType, ok := os.LookupEnv(akashProviderIssuerTypeStr) - if !ok || (issuerType != issuer && issuerType != clusterIssuer) { - return nil, errors.Wrap(kubeclienterrors.ErrInternalError, fmt.Sprintf("Invalid value for %s: %s", akashProviderIssuerTypeStr, issuerType)) - } - - issuerName, ok := os.LookupEnv(akashProviderIssuerNameStr) - if !ok { - return nil, errors.Wrap(kubeclienterrors.ErrInternalError, fmt.Sprintf("Value for %s not set", akashProviderIssuerNameStr)) - } - - sslCfg = ssl{ - issuerType: issuerType, - issuerName: issuerName, - } - } - - return &clientConfig{ - ssl: sslCfg, - }, nil +type Ssl struct { + IssuerType string + IssuerName string } diff --git a/cluster/kube/config_test.go b/cluster/kube/config_test.go deleted file mode 100644 index 5a5bfbc4..00000000 --- a/cluster/kube/config_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package kube - -import ( - "os" - "testing" -) - -func TestConfigFromEnv(t *testing.T) { - t.Run("should create if environment variables are set correctly", func(t *testing.T) { - os.Setenv(akashProviderIssuerTypeStr, "cluster-issuer") - os.Setenv(akashProviderIssuerNameStr, "letsencrypt") - os.Setenv(akashProviderSslEnabledStr, "1") - ccfg, err := configFromEnv() - - if err != nil { - t.Fatalf("Did not expect an error: %s", err) - } - - if ccfg.ssl.issuerType != "cluster-issuer" { - t.Errorf("Expected cluster-issuer, got %s", ccfg.ssl.issuerType) - } - - if ccfg.ssl.issuerName != "letsencrypt" { - t.Errorf("Expected letsencrypt, got %s", ccfg.ssl.issuerName) - } - }) - - t.Run("should return error if type not set", func(t *testing.T) { - os.Clearenv() - os.Setenv(akashProviderIssuerNameStr, "letsencrypt") - os.Setenv(akashProviderSslEnabledStr, "1") - - _, err := configFromEnv() - - if err == nil { - t.Fatalf("Expected an error") - } - }) - - t.Run("should return error if name not set", func(t *testing.T) { - os.Clearenv() - os.Setenv(akashProviderIssuerTypeStr, "cluster-issuer") - os.Setenv(akashProviderSslEnabledStr, "1") - - _, err := configFromEnv() - - if err == nil { - t.Fatalf("Expected an error") - } - }) - - t.Run("should return error if type is invalid", func(t *testing.T) { - os.Clearenv() - os.Setenv(akashProviderIssuerTypeStr, "fake-issuer-type") - os.Setenv(akashProviderIssuerNameStr, "letsencrypt") - os.Setenv(akashProviderSslEnabledStr, "1") - - _, err := configFromEnv() - - if err == nil { - t.Fatalf("Expected an error") - } - }) -} diff --git a/cmd/provider-services/cmd/run.go b/cmd/provider-services/cmd/run.go index 935aae0e..63bbfcdd 100644 --- a/cmd/provider-services/cmd/run.go +++ b/cmd/provider-services/cmd/run.go @@ -94,6 +94,9 @@ const ( FlagBidPriceIPScale = "bid-price-ip-scale" FlagEnableIPOperator = "ip-operator" FlagTxBroadcastTimeout = "tx-broadcast-timeout" + FlagSslEnabled = "ssl" + FlagSslIssuerType = "ssl-issuer-type" + FlagSslIssuerName = "ssl-issuer-name" ) const ( @@ -350,6 +353,11 @@ func RunCmd() *cobra.Command { return nil } + cmd.Flags().Bool(FlagSslEnabled, false, "enable issuing of SSL certificates on the provider's ingress controller. defaults to false") + if err := viper.BindPFlag(FlagSslEnabled, cmd.Flags().Lookup(FlagSslEnabled)); err != nil { + return nil + } + return cmd } @@ -760,7 +768,16 @@ func createClusterClient(ctx context.Context, log log.Logger, _ *cobra.Command, if ns == "" { return nil, fmt.Errorf("%w: --%s required", errInvalidConfig, providerflags.FlagK8sManifestNS) } - return kube.NewClient(ctx, log, ns, configPath) + + var sslCfg kube.Ssl + if viper.GetBool(FlagSslEnabled) { + sslCfg = kube.Ssl{ + IssuerName: viper.GetString(FlagSslIssuerName), + IssuerType: viper.GetString(FlagSslIssuerType), + } + } + ccfg := kube.ClientConfig{Ssl: sslCfg} + return kube.NewClient(ctx, log, ns, configPath, ccfg) } func showErrorToUser(err error) error { diff --git a/operator/hostnameoperator/hostname_operator.go b/operator/hostnameoperator/hostname_operator.go index 4822a5c8..cbaa0b18 100644 --- a/operator/hostnameoperator/hostname_operator.go +++ b/operator/hostnameoperator/hostname_operator.go @@ -429,7 +429,6 @@ func newHostnameOperator(logger log.Logger, client cluster.Client, config operat cfg: config, server: opHTTP, leasesIgnored: operatorcommon.NewIgnoreList(ilc), - env: clusterutil.EnvironmentVariablesToMap(), } op.flagIgnoreListData = op.server.AddPreparedEndpoint("/ignore-list", op.prepareIgnoreListData) @@ -448,7 +447,7 @@ func doHostnameOperator(cmd *cobra.Command) error { logger := operatorcommon.OpenLogger().With("op", "hostname") logger.Info("HTTP listening", "address", listenAddr) - client, err := clusterClient.NewClient(cmd.Context(), logger, ns, configPath) + client, err := clusterClient.NewClient(cmd.Context(), logger, ns, configPath, config.ClientConfig) if err != nil { return err } diff --git a/operator/ipoperator/ip_operator.go b/operator/ipoperator/ip_operator.go index eee9674e..315d5a96 100644 --- a/operator/ipoperator/ip_operator.go +++ b/operator/ipoperator/ip_operator.go @@ -579,13 +579,13 @@ func doIPOperator(cmd *cobra.Command) error { poolName := viper.GetString(flagMetalLbPoolName) logger := operatorcommon.OpenLogger().With("operator", "ip") - opcfg := operatorcommon.GetOperatorConfigFromViper() - _, err := sdk.AccAddressFromBech32(opcfg.ProviderAddress) + config := operatorcommon.GetOperatorConfigFromViper() + _, err := sdk.AccAddressFromBech32(config.ProviderAddress) if err != nil { return fmt.Errorf("%w: provider address must valid bech32", err) } - client, err := clusterClient.NewClient(cmd.Context(), logger, ns, configPath) + client, err := clusterClient.NewClient(cmd.Context(), logger, ns, configPath, config.ClientConfig) if err != nil { return err } @@ -603,7 +603,7 @@ func doIPOperator(cmd *cobra.Command) error { logger.Info("clients", "kube", client, "metallb", mllbc) logger.Info("HTTP listening", "address", listenAddr) - op, err := newIPOperator(logger, client, opcfg, operatorcommon.IgnoreListConfigFromViper(), mllbc) + op, err := newIPOperator(logger, client, config, operatorcommon.IgnoreListConfigFromViper(), mllbc) if err != nil { return err } diff --git a/operator/operatorcommon/operator_config.go b/operator/operatorcommon/operator_config.go index 2d3a78db..987ff01e 100644 --- a/operator/operatorcommon/operator_config.go +++ b/operator/operatorcommon/operator_config.go @@ -1,6 +1,8 @@ package operatorcommon import ( + "github.com/akash-network/provider/cluster/kube" + providerCmd "github.com/akash-network/provider/cmd/provider-services/cmd" "time" "github.com/spf13/viper" @@ -13,13 +15,24 @@ type OperatorConfig struct { WebRefreshInterval time.Duration RetryDelay time.Duration ProviderAddress string + ClientConfig kube.ClientConfig } func GetOperatorConfigFromViper() OperatorConfig { + var sslCfg kube.Ssl + if viper.GetBool(providerCmd.FlagSslEnabled) { + sslCfg = kube.Ssl{ + IssuerName: viper.GetString(providerCmd.FlagSslIssuerName), + IssuerType: viper.GetString(providerCmd.FlagSslIssuerType), + } + } + ccfg := kube.ClientConfig{Ssl: sslCfg} + return OperatorConfig{ PruneInterval: viper.GetDuration(providerflags.FlagPruneInterval), WebRefreshInterval: viper.GetDuration(providerflags.FlagWebRefreshInterval), RetryDelay: viper.GetDuration(providerflags.FlagRetryDelay), ProviderAddress: viper.GetString(flagProviderAddress), + ClientConfig: ccfg, } } From 1e65d9cf00994a8f8b31135aa1028118a9638579 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Wed, 19 Apr 2023 23:24:31 +0200 Subject: [PATCH 12/17] fix: cycle import --- cmd/provider-services/cmd/flags/flags.go | 5 ++++- cmd/provider-services/cmd/run.go | 13 +++++-------- operator/operatorcommon/operator_config.go | 7 +++---- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/cmd/provider-services/cmd/flags/flags.go b/cmd/provider-services/cmd/flags/flags.go index c4620464..d8a35b24 100644 --- a/cmd/provider-services/cmd/flags/flags.go +++ b/cmd/provider-services/cmd/flags/flags.go @@ -9,5 +9,8 @@ const ( FlagWebRefreshInterval = "web-refresh-interval" FlagRetryDelay = "retry-delay" - FlagKubeConfig = "kubeconfig" + FlagKubeConfig = "kubeconfig" + FlagSslEnabled = "ssl" + FlagSslIssuerType = "ssl-issuer-type" + FlagSslIssuerName = "ssl-issuer-name" ) diff --git a/cmd/provider-services/cmd/run.go b/cmd/provider-services/cmd/run.go index 63bbfcdd..74d6655f 100644 --- a/cmd/provider-services/cmd/run.go +++ b/cmd/provider-services/cmd/run.go @@ -94,9 +94,6 @@ const ( FlagBidPriceIPScale = "bid-price-ip-scale" FlagEnableIPOperator = "ip-operator" FlagTxBroadcastTimeout = "tx-broadcast-timeout" - FlagSslEnabled = "ssl" - FlagSslIssuerType = "ssl-issuer-type" - FlagSslIssuerName = "ssl-issuer-name" ) const ( @@ -353,8 +350,8 @@ func RunCmd() *cobra.Command { return nil } - cmd.Flags().Bool(FlagSslEnabled, false, "enable issuing of SSL certificates on the provider's ingress controller. defaults to false") - if err := viper.BindPFlag(FlagSslEnabled, cmd.Flags().Lookup(FlagSslEnabled)); err != nil { + cmd.Flags().Bool(providerflags.FlagSslEnabled, false, "enable issuing of SSL certificates on the provider's ingress controller. defaults to false") + if err := viper.BindPFlag(providerflags.FlagSslEnabled, cmd.Flags().Lookup(providerflags.FlagSslEnabled)); err != nil { return nil } @@ -770,10 +767,10 @@ func createClusterClient(ctx context.Context, log log.Logger, _ *cobra.Command, } var sslCfg kube.Ssl - if viper.GetBool(FlagSslEnabled) { + if viper.GetBool(providerflags.FlagSslEnabled) { sslCfg = kube.Ssl{ - IssuerName: viper.GetString(FlagSslIssuerName), - IssuerType: viper.GetString(FlagSslIssuerType), + IssuerName: viper.GetString(providerflags.FlagSslIssuerName), + IssuerType: viper.GetString(providerflags.FlagSslIssuerType), } } ccfg := kube.ClientConfig{Ssl: sslCfg} diff --git a/operator/operatorcommon/operator_config.go b/operator/operatorcommon/operator_config.go index 987ff01e..b66c664e 100644 --- a/operator/operatorcommon/operator_config.go +++ b/operator/operatorcommon/operator_config.go @@ -2,7 +2,6 @@ package operatorcommon import ( "github.com/akash-network/provider/cluster/kube" - providerCmd "github.com/akash-network/provider/cmd/provider-services/cmd" "time" "github.com/spf13/viper" @@ -20,10 +19,10 @@ type OperatorConfig struct { func GetOperatorConfigFromViper() OperatorConfig { var sslCfg kube.Ssl - if viper.GetBool(providerCmd.FlagSslEnabled) { + if viper.GetBool(providerflags.FlagSslEnabled) { sslCfg = kube.Ssl{ - IssuerName: viper.GetString(providerCmd.FlagSslIssuerName), - IssuerType: viper.GetString(providerCmd.FlagSslIssuerType), + IssuerName: viper.GetString(providerflags.FlagSslIssuerName), + IssuerType: viper.GetString(providerflags.FlagSslIssuerType), } } ccfg := kube.ClientConfig{Ssl: sslCfg} From 33a32541c0f9867b47bc975a09561b94c3c47036 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Wed, 19 Apr 2023 23:34:12 +0200 Subject: [PATCH 13/17] fix: unexported fields --- cluster/kube/client_ingress.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cluster/kube/client_ingress.go b/cluster/kube/client_ingress.go index f0fc795e..ab7c68af 100644 --- a/cluster/kube/client_ingress.go +++ b/cluster/kube/client_ingress.go @@ -67,12 +67,12 @@ func (c *client) kubeNginxIngressAnnotations(directive ctypes.ConnectHostnameToD } } - switch c.cfg.ssl.issuerType { + switch c.cfg.Ssl.IssuerType { case clusterIssuer: - result[fmt.Sprintf("%s/cluster-issuer", certManager)] = c.cfg.ssl.issuerName + result[fmt.Sprintf("%s/cluster-issuer", certManager)] = c.cfg.Ssl.IssuerName break case issuer: - result[fmt.Sprintf("%s/issuer", certManager)] = c.cfg.ssl.issuerName + result[fmt.Sprintf("%s/issuer", certManager)] = c.cfg.Ssl.IssuerName break } From fe672f3c59f9c0ace8127cb055185c6407d948ac Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Wed, 19 Apr 2023 23:51:51 +0200 Subject: [PATCH 14/17] fix: failing test --- cluster/kube/deploy_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/kube/deploy_test.go b/cluster/kube/deploy_test.go index 4ebdae7b..b4c4a7fd 100644 --- a/cluster/kube/deploy_test.go +++ b/cluster/kube/deploy_test.go @@ -44,7 +44,7 @@ func TestDeploy(t *testing.T) { require.NoError(t, err) log := testutil.Logger(t) - client, err := NewClient(ctx, log, "lease", "") + client, err := NewClient(ctx, log, "lease", "", ClientConfig{}) require.NoError(t, err) ctx = context.WithValue(ctx, builder.SettingsKey, builder.NewDefaultSettings()) From 3ac92180180cd12799052cba22b4cbbd0a29ce0c Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Thu, 20 Apr 2023 10:34:12 +0200 Subject: [PATCH 15/17] chore: cleanup e2e and operator code --- cluster/client.go | 2 +- cluster/kube/k8s_integration_test.go | 2 +- cluster/util/environment.go | 17 ----------------- operator/hostnameoperator/hostname_operator.go | 10 ++++++---- 4 files changed, 8 insertions(+), 23 deletions(-) delete mode 100644 cluster/util/environment.go diff --git a/cluster/client.go b/cluster/client.go index 4d47021f..3e0d9594 100644 --- a/cluster/client.go +++ b/cluster/client.go @@ -82,7 +82,7 @@ type Client interface { tsq remotecommand.TerminalSizeQueue) (ctypes.ExecResult, error) // ConnectHostnameToDeployment Connect a given hostname to a deployment - ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective, tlsEnabled bool) error + ConnectHostnameToDeployment(ctx context.Context, directive ctypes.ConnectHostnameToDeploymentDirective, sslEnabled bool) error // RemoveHostnameFromDeployment Remove a given hostname from a deployment RemoveHostnameFromDeployment(ctx context.Context, hostname string, leaseID mtypes.LeaseID, allowMissing bool) error diff --git a/cluster/kube/k8s_integration_test.go b/cluster/kube/k8s_integration_test.go index 85d31eb9..17fce30d 100644 --- a/cluster/kube/k8s_integration_test.go +++ b/cluster/kube/k8s_integration_test.go @@ -38,7 +38,7 @@ func TestNewClientNSNotFound(t *testing.T) { ctx := context.WithValue(context.Background(), builder.SettingsKey, settings) - ac, err := NewClient(ctx, atestutil.Logger(t), ns, providerflags.KubeConfigDefaultPath) + ac, err := NewClient(ctx, atestutil.Logger(t), ns, providerflags.KubeConfigDefaultPath, ClientConfig{}) require.True(t, kubeErrors.IsNotFound(err)) require.Nil(t, ac) } diff --git a/cluster/util/environment.go b/cluster/util/environment.go deleted file mode 100644 index 0d11b118..00000000 --- a/cluster/util/environment.go +++ /dev/null @@ -1,17 +0,0 @@ -package util - -import ( - "os" - "strings" -) - -func EnvironmentVariablesToMap() map[string]string { - m := make(map[string]string, len(os.Environ())) - for _, e := range os.Environ() { - if i := strings.Index(e, "="); i >= 0 { - m[e[:i]] = e[i+1:] - } - } - - return m -} diff --git a/operator/hostnameoperator/hostname_operator.go b/operator/hostnameoperator/hostname_operator.go index cbaa0b18..6b8a1828 100644 --- a/operator/hostnameoperator/hostname_operator.go +++ b/operator/hostnameoperator/hostname_operator.go @@ -47,8 +47,6 @@ type hostnameOperator struct { flagHostnamesData operatorcommon.PrepareFlagFn flagIgnoreListData operatorcommon.PrepareFlagFn - - env map[string]string } func (op *hostnameOperator) run(parentCtx context.Context) error { @@ -391,7 +389,7 @@ func (op *hostnameOperator) applyAddOrUpdateEvent(ctx context.Context, ev ctypes if shouldConnect { op.log.Debug("Updating ingress") // Update or create the existing ingress - err = op.client.ConnectHostnameToDeployment(ctx, directive, op.env["AKASH_SSL_ENABLED"] != "") + err = op.client.ConnectHostnameToDeployment(ctx, directive, op.isSslEnabled()) } } else { op.log.Debug("Swapping ingress to new deployment") @@ -400,7 +398,7 @@ func (op *hostnameOperator) applyAddOrUpdateEvent(ctx context.Context, ev ctypes if err == nil { // Remove the current entry, if the next action succeeds then it gets inserted below delete(op.hostnames, ev.GetHostname()) - err = op.client.ConnectHostnameToDeployment(ctx, directive, op.env["AKASH_SSL_ENABLED"] != "") + err = op.client.ConnectHostnameToDeployment(ctx, directive, op.isSslEnabled()) } } @@ -504,3 +502,7 @@ func Cmd() *cobra.Command { return cmd } + +func (op *hostnameOperator) isSslEnabled() bool { + return op.cfg.ClientConfig.Ssl != clusterClient.Ssl{} +} From ef2e6f030f1ea29c0297fdd9bab6a01f446a69d9 Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Thu, 20 Apr 2023 15:49:39 +0200 Subject: [PATCH 16/17] fix: e2e failing test --- cluster/kube/k8s_integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/kube/k8s_integration_test.go b/cluster/kube/k8s_integration_test.go index 17fce30d..617114f1 100644 --- a/cluster/kube/k8s_integration_test.go +++ b/cluster/kube/k8s_integration_test.go @@ -76,7 +76,7 @@ func TestNewClient(t *testing.T) { }, metav1.CreateOptions{}) require.NoError(t, err) - ac, err := NewClient(ctx, atestutil.Logger(t), ns, providerflags.KubeConfigDefaultPath) + ac, err := NewClient(ctx, atestutil.Logger(t), ns, providerflags.KubeConfigDefaultPath, ClientConfig{}) require.NoError(t, err) From c53e5726ec7cd7aa7cbc5c8cffdabe8e43fc031f Mon Sep 17 00:00:00 2001 From: Joao Luna Date: Mon, 24 Apr 2023 21:15:00 +0100 Subject: [PATCH 17/17] chore: push changes --- _run/cert-manager.yaml | 1194 ++++++++++++++++++++++++++++++++++++++++ _run/common-helm.mk | 2 +- 2 files changed, 1195 insertions(+), 1 deletion(-) create mode 100644 _run/cert-manager.yaml diff --git a/_run/cert-manager.yaml b/_run/cert-manager.yaml new file mode 100644 index 00000000..179aa25b --- /dev/null +++ b/_run/cert-manager.yaml @@ -0,0 +1,1194 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/name: cert-manager + name: cert-manager +--- +# Source: cert-manager/templates/cainjector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +--- +# Source: cert-manager/templates/webhook-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +--- +# Source: cert-manager/templates/webhook-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +data: +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["cert-manager.io"] + resources: ["certificates/finalizers", "certificaterequests/finalizers"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "orders/status"] + verbs: ["update", "patch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + # Use to update challenge resource status + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update", "patch"] + # Used to watch challenge resources + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "watch"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [ "gateway.networking.k8s.io" ] + resources: [ "httproutes" ] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require the ability to specify a custom hostname when we are creating + # new ingress resources. + # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 + - apiGroups: ["route.openshift.io"] + resources: ["routes/custom-host"] + verbs: ["create"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways", "httproutes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways/finalizers", "httproutes/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates/status"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["signers"] + verbs: ["approve"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to: +# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers +# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] + verbs: ["sign"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: +- apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-approve:cert-manager-io +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificatesigningrequests +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-webhook:subjectaccessreviews +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# leader election rules +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + # Used for leader election by the controller + # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller + # see cmd/cainjector/start.go#L113 + # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller + # see cmd/cainjector/start.go#L137 + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-controller"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +rules: +- apiGroups: [""] + resources: ["secrets"] + resourceNames: + - 'cert-manager-webhook-ca' + verbs: ["get", "list", "watch", "update"] +# It's not possible to grant CREATE permission on a single resourceName. +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: cert-manager +--- +# Source: cert-manager/templates/rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-webhook:dynamic-serving +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + name: tcp-prometheus-servicemonitor + targetPort: 9402 + selector: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" +--- +# Source: cert-manager/templates/webhook-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + type: ClusterIP + ports: + - name: https + port: 443 + protocol: TCP + targetPort: "https" + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +--- +# Source: cert-manager/templates/cainjector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + spec: + serviceAccountName: cert-manager-cainjector + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-cainjector + image: "quay.io/jetstack/cert-manager-cainjector:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --leader-election-namespace=kube-system + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: 'true' + prometheus.io/port: '9402' + spec: + serviceAccountName: cert-manager + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-controller + image: "quay.io/jetstack/cert-manager-controller:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace=kube-system + - --acme-http01-solver-image=quay.io/jetstack/cert-manager-acmesolver:v1.11.0 + - --max-concurrent-challenges=60 + ports: + - containerPort: 9402 + name: http-metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + spec: + serviceAccountName: cert-manager-webhook + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-webhook + image: "quay.io/jetstack/cert-manager-webhook:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --secure-port=10250 + - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) + - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca + - --dynamic-serving-dns-names=cert-manager-webhook + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE) + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc + + ports: + - name: https + protocol: TCP + containerPort: 10250 + - name: healthcheck + protocol: TCP + containerPort: 6080 + livenessProbe: + httpGet: + path: /livez + port: 6080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + # Only include 'sideEffects' field in Kubernetes 1.12+ + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /mutate +--- +# Source: cert-manager/templates/webhook-validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + app.kubernetes.io/managed-by: Helm + helm.sh/chart: cert-manager-v1.11.0 + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /validate + diff --git a/_run/common-helm.mk b/_run/common-helm.mk index a5516a2f..a89c45d0 100644 --- a/_run/common-helm.mk +++ b/_run/common-helm.mk @@ -21,7 +21,7 @@ kind-install-helm-chart-loki: helm upgrade --install promtail grafana/promtail \ --version $(PROMTAIL_VERSION) \ --namespace loki-stack \ - -f ../promtail-values.yaml + -f ../promtail-values.yamlk helm upgrade --install grafana grafana/grafana \ --version $(GRAFANA_VERSION) \ --namespace loki-stack \