diff --git a/pkg/controller/apiserver/apiserver_controller.go b/pkg/controller/apiserver/apiserver_controller.go index e1e74d9da5..92ac513e8a 100644 --- a/pkg/controller/apiserver/apiserver_controller.go +++ b/pkg/controller/apiserver/apiserver_controller.go @@ -450,6 +450,7 @@ func (r *ReconcileAPIServer) Reconcile(ctx context.Context, request reconcile.Re ClusterDomain: r.clusterDomain, ManagementClusterConnection: managementClusterConnection, TrustedBundle: trustedBundle, + UsePSP: r.usePSP, } pc := render.PacketCaptureAPI(packetCaptureApiCfg) pcPolicy = render.PacketCaptureAPIPolicy(packetCaptureApiCfg) diff --git a/pkg/controller/applicationlayer/applicationlayer_controller.go b/pkg/controller/applicationlayer/applicationlayer_controller.go index 3e1bb3106b..52f52f9a0f 100644 --- a/pkg/controller/applicationlayer/applicationlayer_controller.go +++ b/pkg/controller/applicationlayer/applicationlayer_controller.go @@ -91,6 +91,7 @@ func newReconciler(mgr manager.Manager, opts options.AddOptions, licenseAPIReady status: status.New(mgr.GetClient(), "applicationlayer", opts.KubernetesVersion), clusterDomain: opts.ClusterDomain, licenseAPIReady: licenseAPIReady, + usePSP: opts.UsePSP, } r.status.Run(opts.ShutdownContext) return r @@ -162,6 +163,7 @@ type ReconcileApplicationLayer struct { status status.StatusManager clusterDomain string licenseAPIReady *utils.ReadyFlag + usePSP bool } // Reconcile reads that state of the cluster for a ApplicationLayer object and makes changes @@ -280,6 +282,7 @@ func (r *ReconcileApplicationLayer) Reconcile(ctx context.Context, request recon LogRequestsPerInterval: lcSpec.LogRequestsPerInterval, LogIntervalSeconds: lcSpec.LogIntervalSeconds, ModSecurityConfigMap: modSecurityRuleSet, + UsePSP: r.usePSP, } component := applicationlayer.ApplicationLayer(config) diff --git a/pkg/controller/egressgateway/egressgateway_controller.go b/pkg/controller/egressgateway/egressgateway_controller.go index aaffbe5d6c..a5f242bf78 100644 --- a/pkg/controller/egressgateway/egressgateway_controller.go +++ b/pkg/controller/egressgateway/egressgateway_controller.go @@ -162,14 +162,12 @@ func (r *ReconcileEgressGateway) Reconcile(ctx context.Context, request reconcil // If there are no Egress Gateway resources, return. ch := utils.NewComponentHandler(log, r.client, r.scheme, nil) if len(egws) == 0 { - objects := []client.Object{} - + var objects []client.Object if r.provider == operatorv1.ProviderOpenShift { - scc := egressgateway.SecurityContextConstraints() - objects = append(objects, scc) - } else if r.usePSP { - psp := egressgateway.PodSecurityPolicy() - objects = append(objects, psp) + objects = append(objects, egressgateway.SecurityContextConstraints()) + } + if r.usePSP { + objects = append(objects, egressgateway.PodSecurityPolicy()) } err := ch.CreateOrUpdateOrDelete(ctx, render.NewDeletionPassthrough(objects...), r.status) if err != nil { diff --git a/pkg/controller/installation/core_controller.go b/pkg/controller/installation/core_controller.go index 53d8b1f0e0..a535691bb0 100644 --- a/pkg/controller/installation/core_controller.go +++ b/pkg/controller/installation/core_controller.go @@ -1321,7 +1321,6 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile csiCfg := render.CSIConfiguration{ Installation: &instance.Spec, Terminating: terminating, - Openshift: r.autoDetectedProvider == operator.ProviderOpenShift, UsePSP: r.usePSP, } components = append(components, render.CSI(&csiCfg)) diff --git a/pkg/controller/logstorage/esgateway.go b/pkg/controller/logstorage/esgateway.go index 60ebcb765d..4f520918d1 100644 --- a/pkg/controller/logstorage/esgateway.go +++ b/pkg/controller/logstorage/esgateway.go @@ -1,4 +1,4 @@ -// Copyright (c) 2023 Tigera, Inc. All rights reserved. +// Copyright (c) 2021-2023 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ import ( "context" "github.com/go-logr/logr" - "github.com/tigera/operator/pkg/tls/certificatemanagement" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -28,6 +27,7 @@ import ( "github.com/tigera/operator/pkg/controller/utils/imageset" "github.com/tigera/operator/pkg/render" "github.com/tigera/operator/pkg/render/logstorage/esgateway" + "github.com/tigera/operator/pkg/tls/certificatemanagement" ) func (r *ReconcileLogStorage) createESGateway( @@ -40,6 +40,7 @@ func (r *ReconcileLogStorage) createESGateway( ctx context.Context, gatewayKeyPair certificatemanagement.KeyPairInterface, trustedBundle certificatemanagement.TrustedBundle, + usePSP bool, ) (reconcile.Result, bool, error) { // This secret should only ever contain one key. if len(esAdminUserSecret.Data) != 1 { @@ -67,6 +68,7 @@ func (r *ReconcileLogStorage) createESGateway( ClusterDomain: r.clusterDomain, EsAdminUserName: esAdminUserName, ESGatewayKeyPair: gatewayKeyPair, + UsePSP: usePSP, } esGatewayComponent := esgateway.EsGateway(cfg) diff --git a/pkg/controller/logstorage/esmetrics.go b/pkg/controller/logstorage/esmetrics.go index ca6c61c5fd..a4418b9259 100644 --- a/pkg/controller/logstorage/esmetrics.go +++ b/pkg/controller/logstorage/esmetrics.go @@ -1,4 +1,4 @@ -// Copyright (c) 2023 Tigera, Inc. All rights reserved. +// Copyright (c) 2021-2023 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -41,6 +41,7 @@ func (r *ReconcileLogStorage) createESMetrics( hdler utils.ComponentHandler, serverKeyPair certificatemanagement.KeyPairInterface, trustedBundle certificatemanagement.TrustedBundle, + usePSP bool, ) (reconcile.Result, bool, error) { esMetricsSecret, err := utils.GetSecret(context.Background(), r.client, esmetrics.ElasticsearchMetricsSecret, common.OperatorNamespace()) if err != nil { @@ -60,6 +61,7 @@ func (r *ReconcileLogStorage) createESMetrics( ClusterDomain: r.clusterDomain, ServerTLS: serverKeyPair, TrustedBundle: trustedBundle, + UsePSP: usePSP, } esMetricsComponent := esmetrics.ElasticsearchMetrics(esMetricsCfg) if err = imageset.ApplyImageSet(ctx, r.client, variant, esMetricsComponent); err != nil { diff --git a/pkg/controller/logstorage/linseed.go b/pkg/controller/logstorage/linseed.go index 4d6b918244..45a46f2736 100644 --- a/pkg/controller/logstorage/linseed.go +++ b/pkg/controller/logstorage/linseed.go @@ -41,6 +41,7 @@ func (r *ReconcileLogStorage) createLinseed( ctx context.Context, linseedKeyPair certificatemanagement.KeyPairInterface, trustedBundle certificatemanagement.TrustedBundle, + usePSP bool, ) (reconcile.Result, bool, error) { // This secret should only ever contain one key. if len(esAdminUserSecret.Data) != 1 { @@ -61,6 +62,7 @@ func (r *ReconcileLogStorage) createLinseed( ClusterDomain: r.clusterDomain, KeyPair: linseedKeyPair, ESAdminUserName: esAdminUserName, + UsePSP: usePSP, } linseedComponent := linseed.Linseed(cfg) diff --git a/pkg/controller/logstorage/logstorage_controller.go b/pkg/controller/logstorage/logstorage_controller.go index fbd4f0956d..4c278b3111 100644 --- a/pkg/controller/logstorage/logstorage_controller.go +++ b/pkg/controller/logstorage/logstorage_controller.go @@ -20,25 +20,17 @@ import ( "time" "github.com/go-logr/logr" - "github.com/tigera/operator/pkg/dns" - rcertificatemanagement "github.com/tigera/operator/pkg/render/certificatemanagement" - "github.com/tigera/operator/pkg/render/logstorage/linseed" - - "github.com/tigera/operator/pkg/render/common/networkpolicy" - "github.com/tigera/operator/pkg/render/kubecontrollers" - "github.com/tigera/operator/pkg/tls/certificatemanagement" - "k8s.io/client-go/kubernetes" - - v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" - "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/stringsutil" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -49,30 +41,37 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/elastic/cloud-on-k8s/v2/pkg/utils/stringsutil" + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/controller/certificatemanager" logstoragecommon "github.com/tigera/operator/pkg/controller/logstorage/common" "github.com/tigera/operator/pkg/controller/options" "github.com/tigera/operator/pkg/controller/status" "github.com/tigera/operator/pkg/controller/utils" "github.com/tigera/operator/pkg/controller/utils/imageset" + "github.com/tigera/operator/pkg/dns" "github.com/tigera/operator/pkg/render" + rcertificatemanagement "github.com/tigera/operator/pkg/render/certificatemanagement" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" + "github.com/tigera/operator/pkg/render/common/networkpolicy" rsecret "github.com/tigera/operator/pkg/render/common/secret" + "github.com/tigera/operator/pkg/render/kubecontrollers" "github.com/tigera/operator/pkg/render/logstorage/esgateway" "github.com/tigera/operator/pkg/render/logstorage/esmetrics" + "github.com/tigera/operator/pkg/render/logstorage/linseed" "github.com/tigera/operator/pkg/render/monitor" + "github.com/tigera/operator/pkg/tls/certificatemanagement" ) -const ResourceName = "log-storage" - var log = logf.Log.WithName("controller_logstorage") const ( - defaultEckOperatorMemorySetting = "512Mi" DefaultElasticsearchStorageClass = "tigera-elasticsearch" LogStorageFinalizer = "tigera.io/eck-cleanup" + ResourceName = "log-storage" + + defaultEckOperatorMemorySetting = "512Mi" ) // Add creates a new LogStorage Controller and adds it to the Manager. The Manager will set fields on the Controller @@ -824,6 +823,7 @@ func (r *ReconcileLogStorage) Reconcile(ctx context.Context, request reconcile.R ctx, keyPairs.gateway, trustedBundle, + r.usePSP, ) if err != nil || !proceed { return result, err @@ -839,6 +839,7 @@ func (r *ReconcileLogStorage) Reconcile(ctx context.Context, request reconcile.R ctx, keyPairs.linseed, trustedBundle, + r.usePSP, ) if err != nil || !proceed { return result, err @@ -864,6 +865,7 @@ func (r *ReconcileLogStorage) Reconcile(ctx context.Context, request reconcile.R hdler, keyPairs.metricsServer, trustedBundle, + r.usePSP, ) if err != nil || !proceed { return result, err diff --git a/pkg/controller/monitor/monitor_controller.go b/pkg/controller/monitor/monitor_controller.go index 9f1c2c0614..f1c194104d 100644 --- a/pkg/controller/monitor/monitor_controller.go +++ b/pkg/controller/monitor/monitor_controller.go @@ -110,6 +110,7 @@ func newReconciler(mgr manager.Manager, opts options.AddOptions, prometheusReady prometheusReady: prometheusReady, tierWatchReady: tierWatchReady, clusterDomain: opts.ClusterDomain, + usePSP: opts.UsePSP, } r.status.AddStatefulSets([]types.NamespacedName{ @@ -180,6 +181,7 @@ type ReconcileMonitor struct { prometheusReady *utils.ReadyFlag tierWatchReady *utils.ReadyFlag clusterDomain string + usePSP bool } func (r *ReconcileMonitor) getMonitor(ctx context.Context) (*operatorv1.Monitor, error) { @@ -260,7 +262,7 @@ func (r *ReconcileMonitor) Reconcile(ctx context.Context, request reconcile.Requ r.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger) return reconcile.Result{}, err } - serverTLSSecret, err := certificateManager.GetOrCreateKeyPair(r.client, monitor.PrometheusTLSSecretName, common.OperatorNamespace(), dns.GetServiceDNSNames(monitor.PrometheusHTTPAPIServiceName, common.TigeraPrometheusNamespace, r.clusterDomain)) + serverTLSSecret, err := certificateManager.GetOrCreateKeyPair(r.client, monitor.PrometheusTLSSecretName, common.OperatorNamespace(), dns.GetServiceDNSNames(monitor.PrometheusServiceServiceName, common.TigeraPrometheusNamespace, r.clusterDomain)) if err != nil { r.status.SetDegraded(operatorv1.ResourceCreateError, "Error creating TLS certificate", err, reqLogger) return reconcile.Result{}, err @@ -353,6 +355,7 @@ func (r *ReconcileMonitor) Reconcile(ctx context.Context, request reconcile.Requ TrustedCertBundle: trustedBundle, Openshift: r.provider == operatorv1.ProviderOpenShift, KubeControllerPort: kubeControllersMetricsPort, + UsePSP: r.usePSP, } // Render prometheus component diff --git a/pkg/controller/utils/discovery.go b/pkg/controller/utils/discovery.go index e1424c64b8..13ada20ad7 100644 --- a/pkg/controller/utils/discovery.go +++ b/pkg/controller/utils/discovery.go @@ -195,7 +195,7 @@ func isRKE2(ctx context.Context, c kubernetes.Interface) (bool, error) { } // SupportsPodSecurityPolicies returns true if the cluster contains the policy/v1beta1 PodSecurityPolicy API, -// and false otherwise. This API is scheuled to be removed in Kubernetes v1.25, but should still be used +// and false otherwise. This API is scheduled to be removed in Kubernetes v1.25, but should still be used // in earlier Kubernetes versions. func SupportsPodSecurityPolicies(c kubernetes.Interface) (bool, error) { resources, err := c.Discovery().ServerResourcesForGroupVersion("policy/v1beta1") diff --git a/pkg/render/apiserver.go b/pkg/render/apiserver.go index 2bf8688614..5d1f280978 100644 --- a/pkg/render/apiserver.go +++ b/pkg/render/apiserver.go @@ -32,7 +32,6 @@ import ( operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/components" "github.com/tigera/operator/pkg/controller/k8sapi" - "github.com/tigera/operator/pkg/ptr" rcomp "github.com/tigera/operator/pkg/render/common/components" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/networkpolicy" @@ -186,7 +185,7 @@ func (c *apiServerComponent) Objects() ([]client.Object, []client.Object) { globalObjects, objsToDelete = populateLists(globalObjects, objsToDelete, c.authReaderRoleBinding) globalObjects, objsToDelete = populateLists(globalObjects, objsToDelete, c.webhookReaderClusterRole) globalObjects, objsToDelete = populateLists(globalObjects, objsToDelete, c.webhookReaderClusterRoleBinding) - if !c.cfg.Openshift && c.cfg.UsePSP { + if c.cfg.UsePSP { globalObjects, objsToDelete = populateLists(globalObjects, objsToDelete, c.apiServerPodSecurityPolicy) } @@ -524,7 +523,7 @@ func (c *apiServerComponent) calicoCustomResourcesClusterRole() *rbacv1.ClusterR }, }, } - if !c.cfg.Openshift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -1125,15 +1124,11 @@ func (c *apiServerComponent) apiServerPodSecurityPolicy() (client.Object, client nameToDelete = enterpriseName } - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(name) - psp.Spec.Privileged = false - psp.Spec.AllowPrivilegeEscalation = ptr.BoolToPtr(false) + psp := podsecuritypolicy.NewBasePolicy(name) psp.Spec.Volumes = append(psp.Spec.Volumes, policyv1beta1.HostPath) psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyRunAsAny - pspToDelete := podsecuritypolicy.NewBasePolicy() - pspToDelete.GetObjectMeta().SetName(nameToDelete) + pspToDelete := podsecuritypolicy.NewBasePolicy(nameToDelete) return psp, pspToDelete } @@ -1211,7 +1206,7 @@ func (c *apiServerComponent) tigeraCustomResourcesClusterRole() *rbacv1.ClusterR }, }, } - if !c.cfg.Openshift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, diff --git a/pkg/render/apiserver_test.go b/pkg/render/apiserver_test.go index 5890f54d19..b9250dbaf2 100644 --- a/pkg/render/apiserver_test.go +++ b/pkg/render/apiserver_test.go @@ -31,6 +31,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -101,6 +102,7 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { Openshift: openshift, TLSKeyPair: kp, TrustedBundle: trustedBundle, + UsePSP: true, } }) @@ -139,6 +141,7 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { {name: "tigera-network-admin", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } dnsNames := dns.GetServiceDNSNames(render.ProjectCalicoAPIServerServiceName(instance.Variant), rmeta.APIServerNamespace(instance.Variant), clusterDomain) kp, err := certificateManager.GetOrCreateKeyPair(cli, render.ProjectCalicoAPIServerTLSSecretName(instance.Variant), common.OperatorNamespace(), dnsNames) @@ -169,10 +172,8 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { // - 1 Server service Expect(resources).To(HaveLen(len(expectedResources))) - i := 0 for _, expectedRes := range expectedResources { rtest.ExpectResourceInList(resources, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ } ns := rtest.GetResource(resources, "tigera-system", "", "", "v1", "Namespace").(*corev1.Namespace) @@ -408,6 +409,7 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { {name: "tigera-network-admin", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } component, err := render.APIServer(cfg) @@ -461,6 +463,7 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { {name: "tigera-network-admin", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } component, err := render.APIServer(cfg) @@ -535,6 +538,7 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { {name: "tigera-network-admin", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } cfg.Installation.ControlPlaneNodeSelector = map[string]string{"nodeName": "control01"} @@ -602,6 +606,7 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { {name: "tigera-network-admin", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } component, err := render.APIServer(cfg) @@ -754,12 +759,11 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { {name: "tigera-network-admin", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } - i := 0 for _, expectedRes := range expectedResources { rtest.ExpectResourceInList(resources, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ } Expect(resources).To(HaveLen(len(expectedResources))) @@ -831,13 +835,12 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { {name: "tigera-network-admin", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } Expect(resources).To(HaveLen(len(expectedResources))) - i := 0 for _, expectedRes := range expectedResources { rtest.ExpectResourceInList(resources, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ } dep := rtest.GetResource(resources, "tigera-apiserver", "tigera-system", "apps", "v1", "Deployment") @@ -919,12 +922,11 @@ var _ = Describe("API server rendering tests (Calico Enterprise)", func() { {name: "tigera-network-admin", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "tigera-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } - i := 0 for _, expectedRes := range expectedResources { rtest.ExpectResourceInList(resources, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ } Expect(resources).To(HaveLen(len(expectedResources))) dep := rtest.GetResource(resources, "tigera-apiserver", "tigera-system", "apps", "v1", "Deployment") @@ -1534,6 +1536,7 @@ var _ = Describe("API server rendering tests (Calico)", func() { APIServer: apiserver, Openshift: openshift, TLSKeyPair: kp, + UsePSP: true, } }) @@ -1558,6 +1561,7 @@ var _ = Describe("API server rendering tests (Calico)", func() { {name: "calico-api", ns: "calico-apiserver", group: "", version: "v1", kind: "Service"}, {name: "calico-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "calico-apiserver-webhook-reader", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "calico-apiserver", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, {name: "allow-apiserver", ns: "calico-apiserver", group: "networking.k8s.io", version: "v1", kind: "NetworkPolicy"}, } dnsNames := dns.GetServiceDNSNames(render.ProjectCalicoAPIServerServiceName(instance.Variant), rmeta.APIServerNamespace(instance.Variant), clusterDomain) @@ -1570,10 +1574,8 @@ var _ = Describe("API server rendering tests (Calico)", func() { resources, _ := component.Objects() - i := 0 for _, expectedRes := range expectedResources { rtest.ExpectResourceInList(resources, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ } Expect(len(resources)).To(Equal(len(expectedResources))) @@ -1679,6 +1681,7 @@ var _ = Describe("API server rendering tests (Calico)", func() { &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "calico-api", Namespace: "calico-apiserver"}, TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Service"}}, &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "calico-webhook-reader"}, TypeMeta: metav1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"}}, &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "calico-apiserver-webhook-reader"}, TypeMeta: metav1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRoleBinding"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "calico-apiserver"}, TypeMeta: metav1.TypeMeta{APIVersion: "policy/v1beta1", Kind: "PodSecurityPolicy"}}, &netv1.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: "allow-apiserver", Namespace: "calico-apiserver"}, TypeMeta: metav1.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "NetworkPolicy"}}, } diff --git a/pkg/render/applicationlayer/applicationlayer.go b/pkg/render/applicationlayer/applicationlayer.go index 25c3024d09..971d1ab6bf 100644 --- a/pkg/render/applicationlayer/applicationlayer.go +++ b/pkg/render/applicationlayer/applicationlayer.go @@ -28,6 +28,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -36,14 +37,18 @@ import ( operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/common" "github.com/tigera/operator/pkg/components" + "github.com/tigera/operator/pkg/ptr" "github.com/tigera/operator/pkg/render" rmeta "github.com/tigera/operator/pkg/render/common/meta" + "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/render/common/securitycontext" ) const ( APLName = "application-layer" + RoleName = "application-layer" + PodSecurityPolicyName = "application-layer" ApplicationLayerDaemonsetName = "l7-log-collector" L7CollectorContainerName = "l7-collector" ProxyContainerName = "envoy-proxy" @@ -96,6 +101,9 @@ type Config struct { dikastesImage string dikastesEnabled bool envoyConfigMap *corev1.ConfigMap + + // Whether or not the cluster supports pod security policies. + UsePSP bool } func (c *component) ResolveImages(is *operatorv1.ImageSet) error { @@ -168,6 +176,14 @@ func (c *component) Objects() ([]client.Object, []client.Object) { objs = append(objs, c.securityContextConstraints()) } + if c.config.UsePSP { + objs = append(objs, + c.role(), + c.roleBinding(), + c.podSecurityPolicy(), + ) + } + return objs, nil } @@ -509,6 +525,63 @@ func (c *component) clusterAdminClusterRoleBinding() *rbacv1.ClusterRoleBinding } } +func (c *component) role() *rbacv1.Role { + return &rbacv1.Role{ + TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: RoleName, + Namespace: common.CalicoNamespace, + }, + Rules: []rbacv1.PolicyRule{ + { + + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{PodSecurityPolicyName}, + }, + }, + } +} + +func (c *component) roleBinding() *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: RoleName, + Namespace: common.CalicoNamespace, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: RoleName, + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: APLName, + Namespace: common.CalicoNamespace, + }, + }, + } +} + +func (c *component) podSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + psp := podsecuritypolicy.NewBasePolicy(PodSecurityPolicyName) + psp.Spec.Privileged = true + psp.Spec.AllowPrivilegeEscalation = ptr.BoolToPtr(true) + psp.Spec.RequiredDropCapabilities = nil + psp.Spec.AllowedCapabilities = []corev1.Capability{ + "NET_ADMIN", + "NET_RAW", + } + psp.Spec.HostIPC = true + psp.Spec.HostNetwork = true + psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyRunAsAny + psp.Spec.Volumes = append(psp.Spec.Volumes, policyv1beta1.CSI, policyv1beta1.FlexVolume) + return psp +} + // securityContextConstraints returns SCC needed for daemonset to run on Openshift. func (c *component) securityContextConstraints() *ocsv1.SecurityContextConstraints { privilegeEscalation := false diff --git a/pkg/render/applicationlayer/applicationlayer_test.go b/pkg/render/applicationlayer/applicationlayer_test.go index 4a1a91cbcc..85035891a6 100644 --- a/pkg/render/applicationlayer/applicationlayer_test.go +++ b/pkg/render/applicationlayer/applicationlayer_test.go @@ -30,13 +30,24 @@ import ( ) var _ = Describe("Tigera Secure Application Layer rendering tests", func() { - var installation *operatorv1.InstallationSpec + var ( + installation *operatorv1.InstallationSpec + cfg *applicationlayer.Config + ) BeforeEach(func() { // Initialize a default installation spec. installation = &operatorv1.InstallationSpec{ KubernetesProvider: operatorv1.ProviderNone, } + + cfg = &applicationlayer.Config{ + PullSecrets: nil, + Installation: installation, + OsType: rmeta.OSTypeLinux, + LogsEnabled: true, + UsePSP: true, + } }) It("should render with default l7 collector configuration", func() { @@ -50,21 +61,17 @@ var _ = Describe("Tigera Secure Application Layer rendering tests", func() { {name: applicationlayer.APLName, ns: common.CalicoNamespace, group: "", version: "v1", kind: "ServiceAccount"}, {name: applicationlayer.EnvoyConfigMapName, ns: common.CalicoNamespace, group: "", version: "v1", kind: "ConfigMap"}, {name: applicationlayer.ApplicationLayerDaemonsetName, ns: common.CalicoNamespace, group: "apps", version: "v1", kind: "DaemonSet"}, + {name: "application-layer", ns: "calico-system", group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, + {name: "application-layer", ns: "calico-system", group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, + {name: "application-layer", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } // Should render the correct resources. - component := applicationlayer.ApplicationLayer(&applicationlayer.Config{ - PullSecrets: nil, - Installation: installation, - OsType: rmeta.OSTypeLinux, - LogsEnabled: true, - }) + component := applicationlayer.ApplicationLayer(cfg) resources, _ := component.Objects() - Expect(len(resources)).To(Equal(len(expectedResources))) + Expect(resources).To(HaveLen(len(expectedResources))) - i := 0 - for _, expectedRes := range expectedResources { + for i, expectedRes := range expectedResources { rtest.ExpectResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ } ds := rtest.GetResource(resources, applicationlayer.ApplicationLayerDaemonsetName, common.CalicoNamespace, "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) @@ -195,6 +202,18 @@ var _ = Describe("Tigera Secure Application Layer rendering tests", func() { } }) + It("should render properly when PSP is not supported by the cluster", func() { + cfg.UsePSP = false + component := applicationlayer.ApplicationLayer(cfg) + Expect(component.ResolveImages(nil)).To(BeNil()) + resources, _ := component.Objects() + + // Should not contain any PodSecurityPolicies + for _, r := range resources { + Expect(r.GetObjectKind().GroupVersionKind().Kind).NotTo(Equal("PodSecurityPolicy")) + } + }) + It("should render with custom l7 collector configuration", func() { // create component with render the correct resources. // Should render the correct resources. diff --git a/pkg/render/common/podsecuritypolicy/pod_secruity_policy.go b/pkg/render/common/podsecuritypolicy/pod_secruity_policy.go index a656c6f19c..247a0dd30c 100644 --- a/pkg/render/common/podsecuritypolicy/pod_secruity_policy.go +++ b/pkg/render/common/podsecuritypolicy/pod_secruity_policy.go @@ -1,4 +1,4 @@ -// Copyright (c) 2022 Tigera, Inc. All rights reserved. +// Copyright (c) 2022-2023 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,13 +18,13 @@ import ( corev1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/tigera/operator/pkg/ptr" ) // NewBasePolicy creates the base pod security policy with the minimal required permissions to be overridden if // necessary. -func NewBasePolicy() *policyv1beta1.PodSecurityPolicy { - falseBool := false - ptrBoolFalse := &falseBool +func NewBasePolicy(name string) *policyv1beta1.PodSecurityPolicy { // This PodSecurityPolicy is equivalent to a "restricted" pod security standard, // according to: https://kubernetes.io/docs/reference/access-authn-authz/psp-to-pod-security-standards/ return &policyv1beta1.PodSecurityPolicy{ @@ -33,18 +33,19 @@ func NewBasePolicy() *policyv1beta1.PodSecurityPolicy { Annotations: map[string]string{ "seccomp.security.alpha.kubernetes.io/allowedProfileNames": "*", }, + Name: name, }, Spec: policyv1beta1.PodSecurityPolicySpec{ Privileged: false, - AllowPrivilegeEscalation: ptrBoolFalse, + AllowPrivilegeEscalation: ptr.BoolToPtr(false), RequiredDropCapabilities: []corev1.Capability{"ALL"}, Volumes: []policyv1beta1.FSType{ policyv1beta1.ConfigMap, + policyv1beta1.DownwardAPI, policyv1beta1.EmptyDir, + policyv1beta1.PersistentVolumeClaim, policyv1beta1.Projected, policyv1beta1.Secret, - policyv1beta1.DownwardAPI, - policyv1beta1.PersistentVolumeClaim, }, HostPorts: []policyv1beta1.HostPortRange{{ Min: int32(0), diff --git a/pkg/render/compliance.go b/pkg/render/compliance.go index 9be4d359e5..55d7c86408 100644 --- a/pkg/render/compliance.go +++ b/pkg/render/compliance.go @@ -215,13 +215,16 @@ func (c *complianceComponent) Objects() ([]client.Object, []client.Object) { if c.cfg.Openshift { complianceObjs = append(complianceObjs, c.complianceBenchmarkerSecurityContextConstraints()) - } else if c.cfg.UsePSP { + } + + if c.cfg.UsePSP { complianceObjs = append(complianceObjs, c.complianceBenchmarkerPodSecurityPolicy(), c.complianceControllerPodSecurityPolicy(), c.complianceReporterPodSecurityPolicy(), c.complianceServerPodSecurityPolicy(), - c.complianceSnapshotterPodSecurityPolicy()) + c.complianceSnapshotterPodSecurityPolicy(), + ) } // Need to grant cluster admin permissions in DockerEE to the controller since a pod starting pods with @@ -271,7 +274,7 @@ func (c *complianceComponent) complianceControllerRole() *rbacv1.Role { }, } - if !c.cfg.Openshift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -432,9 +435,7 @@ func (c *complianceComponent) complianceControllerDeployment() *appsv1.Deploymen } func (c *complianceComponent) complianceControllerPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(ComplianceControllerName) - return psp + return podsecuritypolicy.NewBasePolicy(ComplianceControllerName) } func (c *complianceComponent) complianceReporterServiceAccount() *corev1.ServiceAccount { @@ -453,7 +454,7 @@ func (c *complianceComponent) complianceReporterClusterRole() *rbacv1.ClusterRol }, } - if !c.cfg.Openshift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -558,8 +559,7 @@ func (c *complianceComponent) complianceReporterPodTemplate() *corev1.PodTemplat } func (c *complianceComponent) complianceReporterPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName("compliance-reporter") + psp := podsecuritypolicy.NewBasePolicy("compliance-reporter") psp.Spec.Volumes = append(psp.Spec.Volumes, policyv1beta1.HostPath) psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyRunAsAny return psp @@ -595,7 +595,7 @@ func (c *complianceComponent) complianceServerClusterRole() *rbacv1.ClusterRole }, } - if !c.cfg.Openshift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster clusterRole.Rules = append(clusterRole.Rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -749,9 +749,7 @@ func (c *complianceComponent) complianceServerDeployment() *appsv1.Deployment { } func (c *complianceComponent) complianceServerPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(ComplianceServerName) - return psp + return podsecuritypolicy.NewBasePolicy(ComplianceServerName) } func (c *complianceComponent) complianceServerVolumeMounts() []corev1.VolumeMount { @@ -811,7 +809,7 @@ func (c *complianceComponent) complianceSnapshotterClusterRole() *rbacv1.Cluster }, } - if !c.cfg.Openshift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -908,9 +906,7 @@ func (c *complianceComponent) complianceSnapshotterDeployment() *appsv1.Deployme } func (c *complianceComponent) complianceSnapshotterPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(ComplianceSnapshotterName) - return psp + return podsecuritypolicy.NewBasePolicy(ComplianceSnapshotterName) } func (c *complianceComponent) complianceBenchmarkerServiceAccount() *corev1.ServiceAccount { @@ -934,7 +930,7 @@ func (c *complianceComponent) complianceBenchmarkerClusterRole() *rbacv1.Cluster }, } - if !c.cfg.Openshift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -1089,8 +1085,7 @@ func (c *complianceComponent) complianceBenchmarkerSecurityContextConstraints() } func (c *complianceComponent) complianceBenchmarkerPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName("compliance-benchmarker") + psp := podsecuritypolicy.NewBasePolicy("compliance-benchmarker") psp.Spec.Volumes = append(psp.Spec.Volumes, policyv1beta1.HostPath) psp.Spec.AllowedHostPaths = []policyv1beta1.AllowedHostPath{ { diff --git a/pkg/render/csi.go b/pkg/render/csi.go index e006e7acef..fe46daa3d3 100644 --- a/pkg/render/csi.go +++ b/pkg/render/csi.go @@ -48,7 +48,6 @@ const ( type CSIConfiguration struct { Installation *operatorv1.InstallationSpec Terminating bool - Openshift bool UsePSP bool } @@ -266,7 +265,7 @@ func (c *csiComponent) csiTemplate() corev1.PodTemplateSpec { Volumes: c.csiVolumes(), } - if !c.cfg.Openshift && c.cfg.UsePSP { + if c.cfg.UsePSP { templateSpec.ServiceAccountName = CSIDaemonSetName } @@ -320,36 +319,29 @@ func (c *csiComponent) serviceAccount() *corev1.ServiceAccount { // podSecurityPolicy sets up a PodSecurityPolicy for CSI Driver to allow usage of privileged // securityContext and hostPath volume. func (c *csiComponent) podSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(CSIDaemonSetName) + psp := podsecuritypolicy.NewBasePolicy(CSIDaemonSetName) psp.Spec.Privileged = true psp.Spec.AllowPrivilegeEscalation = ptr.BoolToPtr(true) psp.Spec.Volumes = append(psp.Spec.Volumes, policyv1beta1.HostPath) psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyRunAsAny - return psp } func (c *csiComponent) role() *rbacv1.Role { - policyRules := []rbacv1.PolicyRule{} - - // Allow access to the pod security policy in case this is enforced on the cluster - if !c.cfg.Openshift && c.cfg.UsePSP { - policyRules = append(policyRules, rbacv1.PolicyRule{ - APIGroups: []string{"policy"}, - Resources: []string{"podsecuritypolicies"}, - Verbs: []string{"use"}, - ResourceNames: []string{CSIDaemonSetName}, - }) - } - return &rbacv1.Role{ TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: CSIDaemonSetName, Namespace: CSIDaemonSetNamespace, }, - Rules: policyRules, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{CSIDaemonSetName}, + }, + }, } } @@ -401,20 +393,17 @@ func (c *csiComponent) ResolveImages(is *operatorv1.ImageSet) error { } func (c *csiComponent) Objects() (objsToCreate, objsToDelete []client.Object) { - objs := []client.Object{} - - objs = append(objs, c.csiDriver()) - objs = append(objs, c.csiDaemonset()) + objs := []client.Object{c.csiDriver(), c.csiDaemonset()} // create PSP and corresponding clusterrole if it allows, clusterroles are currently // only for attaching the PSP to CSI's DaemonSet, do not render them if the PSPs // are also not rendered - if !c.cfg.Openshift && c.cfg.UsePSP { + if c.cfg.UsePSP { objs = append(objs, c.serviceAccount(), - c.podSecurityPolicy(), c.role(), c.roleBinding(), + c.podSecurityPolicy(), ) } diff --git a/pkg/render/csi_test.go b/pkg/render/csi_test.go index 21c810032b..d0cfe76ab6 100644 --- a/pkg/render/csi_test.go +++ b/pkg/render/csi_test.go @@ -61,8 +61,8 @@ var _ = Describe("CSI rendering tests", func() { Expect(comp.ResolveImages(nil)).To(BeNil()) createObjs, delObjs := comp.Objects() - Expect(len(delObjs)).To(Equal(0)) - Expect(len(createObjs)).To(Equal(len(expectedCreateObjs))) + Expect(createObjs).To(HaveLen(len(expectedCreateObjs))) + Expect(delObjs).To(HaveLen(0)) for i, expectedRes := range expectedCreateObjs { rtest.ExpectResource(createObjs[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) @@ -150,11 +150,13 @@ var _ = Describe("CSI rendering tests", func() { }) It("should render CSI's PSP and the corresponding clusterroles when UsePSP is set true", func() { - cfg.Openshift = false cfg.UsePSP = true - resources, _ := render.CSI(&cfg).Objects() + ds := rtest.GetResource(resources, render.CSIDaemonSetName, common.CalicoNamespace, "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) + Expect(ds).NotTo(BeNil()) + Expect(ds.Spec.Template.Spec.ServiceAccountName).To(Equal("csi-node-driver")) + serviceAccount := rtest.GetResource(resources, render.CSIDaemonSetName, render.CSIDaemonSetNamespace, "", "v1", "ServiceAccount") Expect(serviceAccount).ToNot(BeNil()) @@ -185,22 +187,12 @@ var _ = Describe("CSI rendering tests", func() { )) }) - It("should not add ServiceAccountName field when UsePSP is false or not on Openshift", func() { - cfg.Openshift = false + It("should not add ServiceAccountName field when UsePSP is false", func() { cfg.UsePSP = false - resources, _ := render.CSI(&cfg).Objects() ds := rtest.GetResource(resources, render.CSIDaemonSetName, common.CalicoNamespace, "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) Expect(ds.Spec.Template.Spec.ServiceAccountName).To(BeEmpty()) - - cfg.Openshift = true - cfg.UsePSP = true - - resources, _ = render.CSI(&cfg).Objects() - - ds = rtest.GetResource(resources, render.CSIDaemonSetName, common.CalicoNamespace, "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) - Expect(ds.Spec.Template.Spec.ServiceAccountName).To(BeEmpty()) }) Context("With csi-node-driver DaemonSet overrides", func() { diff --git a/pkg/render/egressgateway/egressgateway.go b/pkg/render/egressgateway/egressgateway.go index 0e4ad35b7e..997e90739f 100644 --- a/pkg/render/egressgateway/egressgateway.go +++ b/pkg/render/egressgateway/egressgateway.go @@ -101,19 +101,28 @@ func (c *component) SupportedOSType() rmeta.OSType { } func (c *component) Objects() ([]client.Object, []client.Object) { - objectsToCreate := []client.Object{} - objectsToDelete := []client.Object{} - objectsToCreate = append(objectsToCreate, c.egwServiceAccount()) + objectsToCreate := []client.Object{c.egwServiceAccount()} if c.config.OpenShift { objectsToCreate = append(objectsToCreate, c.getSecurityContextConstraints()) - } else if c.config.UsePSP { - objectsToCreate = append(objectsToCreate, PodSecurityPolicy()) - objectsToCreate = append(objectsToCreate, c.egwRole()) - objectsToCreate = append(objectsToCreate, c.egwRoleBinding()) + } + + var objectsToDelete []client.Object + if c.config.UsePSP { + objectsToCreate = append(objectsToCreate, + PodSecurityPolicy(), + c.egwRole(), + c.egwRoleBinding(), + ) } else { - objectsToDelete = append(objectsToDelete, c.egwRole()) - objectsToDelete = append(objectsToDelete, c.egwRoleBinding()) + // It is possible to have multiple egress gateway resources in different namespaces. + // We only delete namespaced role and role binding here. The cluster-level psp is + // deleted in egressgateway_controller when no egress gateway is in the cluster. + objectsToDelete = append(objectsToDelete, + c.egwRole(), + c.egwRoleBinding(), + ) } + objectsToCreate = append(objectsToCreate, c.egwDeployment()) return objectsToCreate, objectsToDelete } @@ -288,13 +297,9 @@ func (c *component) egwInitEnvVars() []corev1.EnvVar { } func PodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - boolTrue := true - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(podSecurityPolicyName) - psp.Spec.AllowedCapabilities = []corev1.Capability{ - corev1.Capability("NET_ADMIN"), - } - psp.Spec.AllowPrivilegeEscalation = &boolTrue + psp := podsecuritypolicy.NewBasePolicy(podSecurityPolicyName) + psp.Spec.AllowedCapabilities = []corev1.Capability{"NET_ADMIN", "NET_RAW"} + psp.Spec.AllowPrivilegeEscalation = ptr.BoolToPtr(true) psp.Spec.HostIPC = true psp.Spec.HostNetwork = true psp.Spec.HostPID = true @@ -302,9 +307,6 @@ func PodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { psp.Spec.RunAsUser = policyv1beta1.RunAsUserStrategyOptions{ Rule: policyv1beta1.RunAsUserStrategyRunAsAny, } - psp.Spec.SELinux = policyv1beta1.SELinuxStrategyOptions{ - Rule: policyv1beta1.SELinuxStrategyRunAsAny, - } psp.Spec.SupplementalGroups = policyv1beta1.SupplementalGroupsStrategyOptions{ Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny, } diff --git a/pkg/render/fluentd.go b/pkg/render/fluentd.go index a2d9bc5103..1a150142f7 100644 --- a/pkg/render/fluentd.go +++ b/pkg/render/fluentd.go @@ -276,13 +276,12 @@ func (c *fluentdComponent) Objects() ([]client.Object, []client.Object) { objs = append(objs, c.filtersConfigMap()) } if c.cfg.EKSConfig != nil && c.cfg.OSType == rmeta.OSTypeLinux { - if c.cfg.Installation.KubernetesProvider != operatorv1.ProviderOpenShift { + if c.cfg.UsePSP { objs = append(objs, c.eksLogForwarderClusterRole(), - c.eksLogForwarderClusterRoleBinding()) - if c.cfg.UsePSP { - objs = append(objs, c.eksLogForwarderPodSecurityPolicy()) - } + c.eksLogForwarderClusterRoleBinding(), + c.eksLogForwarderPodSecurityPolicy(), + ) } objs = append(objs, c.eksLogForwarderServiceAccount(), c.eksLogForwarderSecret(), @@ -291,13 +290,12 @@ func (c *fluentdComponent) Objects() ([]client.Object, []client.Object) { // Windows PSP does not support allowedHostPaths yet. // See: https://github.com/kubernetes/kubernetes/issues/93165#issuecomment-693049808 - if c.cfg.Installation.KubernetesProvider != operatorv1.ProviderOpenShift && c.cfg.OSType == rmeta.OSTypeLinux { + if c.cfg.UsePSP && c.cfg.OSType == rmeta.OSTypeLinux { objs = append(objs, c.fluentdClusterRole(), - c.fluentdClusterRoleBinding()) - if c.cfg.UsePSP { - objs = append(objs, c.fluentdPodSecurityPolicy()) - } + c.fluentdClusterRoleBinding(), + c.fluentdPodSecurityPolicy(), + ) } objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(LogCollectorNamespace, c.cfg.ESSecrets...)...)...) @@ -876,12 +874,7 @@ func (c *fluentdComponent) volumes() []corev1.Volume { } func (c *fluentdComponent) fluentdPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(c.fluentdName()) - psp.Spec.RequiredDropCapabilities = nil - psp.Spec.AllowedCapabilities = []corev1.Capability{ - corev1.Capability("CAP_CHOWN"), - } + psp := podsecuritypolicy.NewBasePolicy(c.fluentdName()) psp.Spec.Volumes = append(psp.Spec.Volumes, policyv1beta1.HostPath) psp.Spec.AllowedHostPaths = []policyv1beta1.AllowedHostPath{ { @@ -1070,8 +1063,7 @@ func (c *fluentdComponent) eksLogForwarderVolumes() []corev1.Volume { } func (c *fluentdComponent) eksLogForwarderPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(eksLogForwarderName) + psp := podsecuritypolicy.NewBasePolicy(eksLogForwarderName) psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyRunAsAny return psp } @@ -1103,7 +1095,6 @@ func (c *fluentdComponent) eksLogForwarderClusterRole() *rbacv1.ClusterRole { ObjectMeta: metav1.ObjectMeta{ Name: eksLogForwarderName, }, - Rules: []rbacv1.PolicyRule{ { // Allow access to the pod security policy in case this is enforced on the cluster diff --git a/pkg/render/guardian.go b/pkg/render/guardian.go index c3e5739ae8..68f57e83fd 100644 --- a/pkg/render/guardian.go +++ b/pkg/render/guardian.go @@ -31,7 +31,6 @@ import ( "github.com/tigera/api/pkg/lib/numorstring" operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/components" - "github.com/tigera/operator/pkg/ptr" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/networkpolicy" "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" @@ -129,7 +128,7 @@ func (c *GuardianComponent) Objects() ([]client.Object, []client.Object) { // Add tigera-manager service account for impersonation CreateNamespace(ManagerNamespace, c.cfg.Installation.KubernetesProvider, PSSRestricted), managerServiceAccount(), - managerClusterRole(false, true, c.cfg.Openshift), + managerClusterRole(false, true, c.cfg.UsePSP), managerClusterRoleBinding(), managerClusterWideSettingsGroup(), managerUserSpecificSettingsGroup(), @@ -137,10 +136,9 @@ func (c *GuardianComponent) Objects() ([]client.Object, []client.Object) { managerClusterWideDefaultView(), ) - if !c.cfg.Openshift && c.cfg.UsePSP { - objs = append(objs, c.podsecuritypolicy()) + if c.cfg.UsePSP { + objs = append(objs, c.podSecurityPolicy()) } - return objs, nil } @@ -182,24 +180,18 @@ func (c *GuardianComponent) service() *corev1.Service { } } -func (c *GuardianComponent) serviceAccount() client.Object { +func (c *GuardianComponent) serviceAccount() *corev1.ServiceAccount { return &corev1.ServiceAccount{ TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: GuardianServiceAccountName, Namespace: GuardianNamespace}, } } -func (c *GuardianComponent) podsecuritypolicy() client.Object { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(GuardianPodSecurityPolicyName) - psp.Spec.Privileged = false - psp.Spec.AllowPrivilegeEscalation = ptr.BoolToPtr(false) - psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyMustRunAsNonRoot - - return psp +func (c *GuardianComponent) podSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + return podsecuritypolicy.NewBasePolicy(GuardianPodSecurityPolicyName) } -func (c *GuardianComponent) clusterRole() client.Object { +func (c *GuardianComponent) clusterRole() *rbacv1.ClusterRole { policyRules := []rbacv1.PolicyRule{ { APIGroups: []string{""}, @@ -208,7 +200,7 @@ func (c *GuardianComponent) clusterRole() client.Object { }, } - if !c.cfg.Openshift && c.cfg.UsePSP { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster policyRules = append(policyRules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -227,7 +219,7 @@ func (c *GuardianComponent) clusterRole() client.Object { } } -func (c *GuardianComponent) clusterRoleBinding() client.Object { +func (c *GuardianComponent) clusterRoleBinding() *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -248,7 +240,7 @@ func (c *GuardianComponent) clusterRoleBinding() client.Object { } } -func (c *GuardianComponent) deployment() client.Object { +func (c *GuardianComponent) deployment() *appsv1.Deployment { var replicas int32 = 1 return &appsv1.Deployment{ diff --git a/pkg/render/intrusion_detection.go b/pkg/render/intrusion_detection.go index bddde9478f..7dbb09c336 100644 --- a/pkg/render/intrusion_detection.go +++ b/pkg/render/intrusion_detection.go @@ -58,12 +58,13 @@ const ( IntrusionDetectionControllerPolicyName = networkpolicy.TigeraComponentPolicyPrefix + IntrusionDetectionControllerName IntrusionDetectionInstallerPolicyName = networkpolicy.TigeraComponentPolicyPrefix + "intrusion-detection-elastic" - ADAPIObjectName = "anomaly-detection-api" - ADAPIObjectPortName = "anomaly-detection-api-https" - ADAPITLSSecretName = "anomaly-detection-api-tls" - ADAPIExpectedServiceName = "anomaly-detection-api.tigera-intrusion-detection.svc" - ADAPIPolicyName = networkpolicy.TigeraComponentPolicyPrefix + ADAPIObjectName - adAPIPort = 8080 + ADAPIObjectName = "anomaly-detection-api" + ADAPIPodSecurityPolicyName = "anomaly-detection-api" + ADAPIObjectPortName = "anomaly-detection-api-https" + ADAPITLSSecretName = "anomaly-detection-api-tls" + ADAPIExpectedServiceName = "anomaly-detection-api.tigera-intrusion-detection.svc" + ADAPIPolicyName = networkpolicy.TigeraComponentPolicyPrefix + ADAPIObjectName + adAPIPort = 8080 ADPersistentVolumeClaimName = "tigera-anomaly-detection" DefaultAnomalyDetectionPVRequestSizeGi = "10Gi" @@ -256,13 +257,13 @@ func (c *intrusionDetectionComponent) Objects() ([]client.Object, []client.Objec } } - if !c.cfg.Openshift { + if c.cfg.UsePSP { objs = append(objs, c.intrusionDetectionPSPClusterRole(), - c.intrusionDetectionPSPClusterRoleBinding()) - if c.cfg.UsePSP { - objs = append(objs, c.intrusionDetectionPodSecurityPolicy()) - } + c.intrusionDetectionPSPClusterRoleBinding(), + c.intrusionDetectionPodSecurityPolicy(), + c.adAPIPodSecurityPolicy(), + ) } if c.cfg.HasNoLicense { @@ -1242,9 +1243,7 @@ func (c *intrusionDetectionComponent) adJobsGlobalertTemplates() []client.Object } func (c *intrusionDetectionComponent) intrusionDetectionPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName("intrusion-detection") - + psp := podsecuritypolicy.NewBasePolicy("intrusion-detection") if c.syslogForwardingIsEnabled() { psp.Spec.Volumes = append(psp.Spec.Volumes, policyv1beta1.HostPath) psp.Spec.AllowedHostPaths = []policyv1beta1.AllowedHostPath{ @@ -1253,9 +1252,8 @@ func (c *intrusionDetectionComponent) intrusionDetectionPodSecurityPolicy() *pol ReadOnly: false, }, } + psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyRunAsAny } - - psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyRunAsAny return psp } @@ -1319,23 +1317,34 @@ func (c *intrusionDetectionComponent) adAPIServiceAccount() *corev1.ServiceAccou } func (c *intrusionDetectionComponent) adAPIAccessClusterRole() *rbacv1.ClusterRole { + rules := []rbacv1.PolicyRule{ + { + APIGroups: []string{"authorization.k8s.io"}, + Resources: []string{"subjectaccessreviews"}, + Verbs: []string{"create"}, + }, + { + APIGroups: []string{"authentication.k8s.io"}, + Resources: []string{"tokenreviews"}, + Verbs: []string{"create"}, + }, + } + + if c.cfg.UsePSP { + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{ADAPIPodSecurityPolicyName}, + }) + } + return &rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: ADAPIObjectName, }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"authorization.k8s.io"}, - Resources: []string{"subjectaccessreviews"}, - Verbs: []string{"create"}, - }, - { - APIGroups: []string{"authentication.k8s.io"}, - Resources: []string{"tokenreviews"}, - Verbs: []string{"create"}, - }, - }, + Rules: rules, } } @@ -1414,6 +1423,10 @@ func (c *intrusionDetectionComponent) adPersistentVolumeClaim() *corev1.Persiste return &adPVC } +func (c *intrusionDetectionComponent) adAPIPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + return podsecuritypolicy.NewBasePolicy(ADAPIPodSecurityPolicyName) +} + func (c *intrusionDetectionComponent) adAPIDeployment() *appsv1.Deployment { var adModelVolumeSource corev1.VolumeSource sc := securitycontext.NewNonRootContext() diff --git a/pkg/render/intrusion_detection_test.go b/pkg/render/intrusion_detection_test.go index 9d2d533aba..fa183bb4e6 100644 --- a/pkg/render/intrusion_detection_test.go +++ b/pkg/render/intrusion_detection_test.go @@ -163,9 +163,10 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, + {name: "anomaly-detection-api", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } - Expect(len(resources)).To(Equal(len(expectedResources))) + Expect(resources).To(HaveLen(len(expectedResources))) for i, expectedRes := range expectedResources { rtest.ExpectResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) @@ -313,13 +314,17 @@ var _ = Describe("Intrusion Detection rendering tests", func() { })) adAPIClusterRole := rtest.GetResource(resources, render.ADAPIObjectName, "", "rbac.authorization.k8s.io", "v1", "ClusterRole").(*rbacv1.ClusterRole) - Expect(len(adAPIClusterRole.Rules)).To(Equal(2)) + Expect(adAPIClusterRole.Rules).To(HaveLen(3)) Expect(adAPIClusterRole.Rules[0].APIGroups).To(ConsistOf("authorization.k8s.io")) Expect(adAPIClusterRole.Rules[0].Resources).To(ConsistOf("subjectaccessreviews")) Expect(adAPIClusterRole.Rules[0].Verbs).To(ConsistOf("create")) Expect(adAPIClusterRole.Rules[1].APIGroups).To(ConsistOf("authentication.k8s.io")) Expect(adAPIClusterRole.Rules[1].Resources).To(ConsistOf("tokenreviews")) Expect(adAPIClusterRole.Rules[1].Verbs).To(ConsistOf("create")) + Expect(adAPIClusterRole.Rules[2].APIGroups).To(ConsistOf("policy")) + Expect(adAPIClusterRole.Rules[2].Resources).To(ConsistOf("podsecuritypolicies")) + Expect(adAPIClusterRole.Rules[2].Verbs).To(ConsistOf("use")) + Expect(adAPIClusterRole.Rules[2].ResourceNames).To(ConsistOf("anomaly-detection-api")) adAPIClusterRoleBinding := rtest.GetResource(resources, render.ADAPIObjectName, "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding").(*rbacv1.ClusterRoleBinding) Expect(adAPIClusterRoleBinding.RoleRef).To(Equal(rbacv1.RoleRef{ @@ -496,9 +501,10 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, + {name: "anomaly-detection-api", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } - Expect(len(resources)).To(Equal(len(expectedResources))) + Expect(resources).To(HaveLen(len(expectedResources))) for i, expectedRes := range expectedResources { rtest.ExpectResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) @@ -620,9 +626,10 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, + {name: "anomaly-detection-api", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } - Expect(len(resources)).To(Equal(len(expectedResources))) + Expect(resources).To(HaveLen(len(expectedResources))) for i, expectedRes := range expectedResources { rtest.ExpectResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) @@ -814,6 +821,7 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, + {name: "anomaly-detection-api", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } Expect(toCreate).To(HaveLen(len(expectedResources))) diff --git a/pkg/render/kubecontrollers/kube-controllers.go b/pkg/render/kubecontrollers/kube-controllers.go index 9bb16672ac..01d55e60bc 100644 --- a/pkg/render/kubecontrollers/kube-controllers.go +++ b/pkg/render/kubecontrollers/kube-controllers.go @@ -240,7 +240,7 @@ func (c *kubeControllersComponent) Objects() ([]client.Object, []client.Object) secret.CopyToNamespace(common.CalicoNamespace, c.cfg.KubeControllersGatewaySecret)...)...) } - if c.cfg.Installation.KubernetesProvider != operatorv1.ProviderOpenShift && c.cfg.UsePSP { + if c.cfg.UsePSP { objectsToCreate = append(objectsToCreate, c.controllersPodSecurityPolicy()) } @@ -315,7 +315,7 @@ func kubeControllersRoleCommonRules(cfg *KubeControllersConfiguration, kubeContr }, } - if cfg.Installation.KubernetesProvider != operatorv1.ProviderOpenShift { + if cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -616,9 +616,7 @@ func (c *kubeControllersComponent) annotations() map[string]string { } func (c *kubeControllersComponent) controllersPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(c.kubeControllerName) - return psp + return podsecuritypolicy.NewBasePolicy(c.kubeControllerName) } func (c *kubeControllersComponent) kubeControllersVolumeMounts() []corev1.VolumeMount { diff --git a/pkg/render/logstorage.go b/pkg/render/logstorage.go index 4150765759..4282da8713 100644 --- a/pkg/render/logstorage.go +++ b/pkg/render/logstorage.go @@ -43,6 +43,7 @@ import ( operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/components" "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/ptr" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/networkpolicy" @@ -319,25 +320,19 @@ func (es *elasticsearchComponent) Objects() ([]client.Object, []client.Object) { toCreate = append(toCreate, es.eckOperatorClusterAdminClusterRoleBinding()) } - // Apply the pod security policies for all providers except OpenShift - if es.cfg.Provider != operatorv1.ProviderOpenShift { + if es.cfg.UsePSP { toCreate = append(toCreate, es.elasticsearchClusterRoleBinding(), - es.elasticsearchClusterRole()) - - if es.cfg.UsePSP { - toCreate = append(toCreate, - es.eckOperatorPodSecurityPolicy(), - es.elasticsearchPodSecurityPolicy()) - } - + es.elasticsearchClusterRole(), + es.eckOperatorPodSecurityPolicy(), + es.elasticsearchPodSecurityPolicy(), + ) if !operatorv1.IsFIPSModeEnabled(es.cfg.Installation.FIPSMode) { toCreate = append(toCreate, es.kibanaClusterRoleBinding(), - es.kibanaClusterRole()) - if es.cfg.UsePSP { - toCreate = append(toCreate, es.kibanaPodSecurityPolicy()) - } + es.kibanaClusterRole(), + es.kibanaPodSecurityPolicy(), + ) } } @@ -394,14 +389,12 @@ func (es *elasticsearchComponent) Objects() ([]client.Object, []client.Object) { toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(ElasticsearchNamespace, es.cfg.CuratorSecrets...)...)...) toCreate = append(toCreate, es.esCuratorServiceAccount()) - // If the provider is not OpenShift apply the pod security policy for the curator. - if es.cfg.Provider != operatorv1.ProviderOpenShift { + if es.cfg.UsePSP { toCreate = append(toCreate, es.curatorClusterRole(), - es.curatorClusterRoleBinding()) - if es.cfg.UsePSP { - toCreate = append(toCreate, es.curatorPodSecurityPolicy()) - } + es.curatorClusterRoleBinding(), + es.curatorPodSecurityPolicy(), + ) } toCreate = append(toCreate, es.curatorCronJob()) @@ -1141,7 +1134,7 @@ func (es elasticsearchComponent) eckOperatorClusterRole() *rbacv1.ClusterRole { }, } - if es.cfg.Provider != operatorv1.ProviderOpenShift { + if es.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster rules = append(rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -1315,9 +1308,7 @@ func (es elasticsearchComponent) eckOperatorStatefulSet() *appsv1.StatefulSet { } func (es elasticsearchComponent) eckOperatorPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(ECKOperatorName) - return psp + return podsecuritypolicy.NewBasePolicy(ECKOperatorName) } func (es elasticsearchComponent) kibanaServiceAccount() *corev1.ServiceAccount { @@ -1589,9 +1580,7 @@ func (es elasticsearchComponent) curatorClusterRoleBinding() *rbacv1.ClusterRole } func (es elasticsearchComponent) curatorPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(EsCuratorName) - return psp + return podsecuritypolicy.NewBasePolicy(EsCuratorName) } // Applying this in the eck namespace will start a trial license for enterprise features. @@ -1648,15 +1637,14 @@ func (es elasticsearchComponent) elasticsearchClusterRoleBinding() *rbacv1.Clust } func (es elasticsearchComponent) elasticsearchPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - trueBool := true - ptrBoolTrue := &trueBool - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName("tigera-elasticsearch") + psp := podsecuritypolicy.NewBasePolicy("tigera-elasticsearch") psp.Spec.Privileged = true - psp.Spec.AllowPrivilegeEscalation = ptrBoolTrue + psp.Spec.AllowPrivilegeEscalation = ptr.BoolToPtr(true) psp.Spec.RequiredDropCapabilities = nil psp.Spec.AllowedCapabilities = []corev1.Capability{ - corev1.Capability("CAP_CHOWN"), + "SETGID", + "SETUID", + "SYS_CHROOT", } psp.Spec.RunAsUser.Rule = policyv1beta1.RunAsUserStrategyRunAsAny return psp @@ -1700,9 +1688,7 @@ func (es elasticsearchComponent) kibanaClusterRoleBinding() *rbacv1.ClusterRoleB } func (es elasticsearchComponent) kibanaPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName("tigera-kibana") - return psp + return podsecuritypolicy.NewBasePolicy("tigera-kibana") } func (es elasticsearchComponent) oidcUserRole() client.Object { diff --git a/pkg/render/logstorage/esgateway/esgateway.go b/pkg/render/logstorage/esgateway/esgateway.go index 537690e51a..6eed6ee9a2 100644 --- a/pkg/render/logstorage/esgateway/esgateway.go +++ b/pkg/render/logstorage/esgateway/esgateway.go @@ -20,6 +20,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -34,6 +35,7 @@ import ( rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/networkpolicy" "github.com/tigera/operator/pkg/render/common/podaffinity" + "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/render/common/securitycontext" "github.com/tigera/operator/pkg/render/intrusiondetection/dpi" @@ -45,6 +47,7 @@ const ( DeploymentName = "tigera-secure-es-gateway" ServiceAccountName = "tigera-secure-es-gateway" RoleName = "tigera-secure-es-gateway" + PodSecurityPolicyName = "tigera-esgateway" ServiceName = "tigera-secure-es-gateway-http" PolicyName = networkpolicy.TigeraComponentPolicyPrefix + "es-gateway-access" ElasticsearchPortName = "es-gateway-elasticsearch-port" @@ -77,6 +80,9 @@ type Config struct { TrustedBundle certificatemanagement.TrustedBundle ClusterDomain string EsAdminUserName string + + // Whether or not the cluster supports pod security policies. + UsePSP bool } func (e *esGateway) ResolveImages(is *operatorv1.ImageSet) error { @@ -116,6 +122,9 @@ func (e *esGateway) Objects() (toCreate, toDelete []client.Object) { } else { toCreate = append(toCreate, render.CreateCertificateSecret(e.cfg.ESGatewayKeyPair.GetCertificatePEM(), elasticsearch.PublicCertSecret, common.OperatorNamespace())) } + if e.cfg.UsePSP { + toCreate = append(toCreate, e.esGatewayPodSecurityPolicy()) + } return toCreate, toDelete } @@ -127,25 +136,36 @@ func (e *esGateway) SupportedOSType() rmeta.OSType { return rmeta.OSTypeLinux } -func (e esGateway) esGatewayRole() *rbacv1.Role { +func (e *esGateway) esGatewayRole() *rbacv1.Role { + rules := []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + ResourceNames: []string{}, + Verbs: []string{"get", "list", "watch"}, + }, + } + + if e.cfg.UsePSP { + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{PodSecurityPolicyName}, + }) + } + return &rbacv1.Role{ TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: RoleName, Namespace: render.ElasticsearchNamespace, }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"secrets"}, - ResourceNames: []string{}, - Verbs: []string{"get", "list", "watch"}, - }, - }, + Rules: rules, } } -func (e esGateway) esGatewayRoleBinding() *rbacv1.RoleBinding { +func (e *esGateway) esGatewayRoleBinding() *rbacv1.RoleBinding { return &rbacv1.RoleBinding{ TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -167,7 +187,11 @@ func (e esGateway) esGatewayRoleBinding() *rbacv1.RoleBinding { } } -func (e esGateway) esGatewayDeployment() *appsv1.Deployment { +func (e *esGateway) esGatewayPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + return podsecuritypolicy.NewBasePolicy(PodSecurityPolicyName) +} + +func (e *esGateway) esGatewayDeployment() *appsv1.Deployment { envVars := []corev1.EnvVar{ {Name: "ES_GATEWAY_LOG_LEVEL", Value: "INFO"}, {Name: "ES_GATEWAY_ELASTIC_ENDPOINT", Value: ElasticsearchHTTPSEndpoint}, @@ -266,7 +290,7 @@ func (e esGateway) esGatewayDeployment() *appsv1.Deployment { } } -func (e esGateway) esGatewayServiceAccount() *corev1.ServiceAccount { +func (e *esGateway) esGatewayServiceAccount() *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: ServiceAccountName, @@ -275,7 +299,7 @@ func (e esGateway) esGatewayServiceAccount() *corev1.ServiceAccount { } } -func (e esGateway) esGatewayService() *corev1.Service { +func (e *esGateway) esGatewayService() *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/render/logstorage/esgateway/esgateway_test.go b/pkg/render/logstorage/esgateway/esgateway_test.go index b762aaa1d0..7175fd0b69 100644 --- a/pkg/render/logstorage/esgateway/esgateway_test.go +++ b/pkg/render/logstorage/esgateway/esgateway_test.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -84,6 +85,7 @@ var _ = Describe("ES Gateway rendering tests", func() { }, ClusterDomain: clusterDomain, EsAdminUserName: "elastic", + UsePSP: true, } }) @@ -99,6 +101,7 @@ var _ = Describe("ES Gateway rendering tests", func() { {ServiceAccountName, render.ElasticsearchNamespace, &corev1.ServiceAccount{}, nil}, {DeploymentName, render.ElasticsearchNamespace, &appsv1.Deployment{}, nil}, {relasticsearch.PublicCertSecret, common.OperatorNamespace(), &corev1.Secret{}, nil}, + {"tigera-esgateway", "", &policyv1beta1.PodSecurityPolicy{}, nil}, } component := EsGateway(cfg) @@ -141,6 +144,7 @@ var _ = Describe("ES Gateway rendering tests", func() { {ServiceAccountName, render.ElasticsearchNamespace, &corev1.ServiceAccount{}, nil}, {DeploymentName, render.ElasticsearchNamespace, &appsv1.Deployment{}, nil}, {relasticsearch.PublicCertSecret, common.OperatorNamespace(), &corev1.Secret{}, nil}, + {"tigera-esgateway", "", &policyv1beta1.PodSecurityPolicy{}, nil}, } component := EsGateway(cfg) @@ -149,6 +153,18 @@ var _ = Describe("ES Gateway rendering tests", func() { compareResources(createResources, expectedResources) }) + It("should render properly when PSP is not supported by the cluster", func() { + cfg.UsePSP = false + component := EsGateway(cfg) + Expect(component.ResolveImages(nil)).To(BeNil()) + resources, _ := component.Objects() + + // Should not contain any PodSecurityPolicies + for _, r := range resources { + Expect(r.GetObjectKind().GroupVersionKind().Kind).NotTo(Equal("PodSecurityPolicy")) + } + }) + It("should not render PodAffinity when ControlPlaneReplicas is 1", func() { var replicas int32 = 1 installation.ControlPlaneReplicas = &replicas diff --git a/pkg/render/logstorage/esmetrics/elasticsearch_metrics.go b/pkg/render/logstorage/esmetrics/elasticsearch_metrics.go index f6d5d3354b..46f7c2192a 100644 --- a/pkg/render/logstorage/esmetrics/elasticsearch_metrics.go +++ b/pkg/render/logstorage/esmetrics/elasticsearch_metrics.go @@ -19,6 +19,8 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,17 +33,20 @@ import ( relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/networkpolicy" + "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/render/common/securitycontext" "github.com/tigera/operator/pkg/tls/certificatemanagement" ) const ( - ElasticsearchMetricsSecret = "tigera-ee-elasticsearch-metrics-elasticsearch-access" - ElasticsearchMetricsServerTLSSecret = "tigera-ee-elasticsearch-metrics-tls" - ElasticsearchMetricsName = "tigera-elasticsearch-metrics" - ElasticsearchMetricsPolicyName = networkpolicy.TigeraComponentPolicyPrefix + "elasticsearch-metrics" - ElasticsearchMetricsPort = 9081 + ElasticsearchMetricsSecret = "tigera-ee-elasticsearch-metrics-elasticsearch-access" + ElasticsearchMetricsServerTLSSecret = "tigera-ee-elasticsearch-metrics-tls" + ElasticsearchMetricsName = "tigera-elasticsearch-metrics" + ElasticsearchMetricsRoleName = "tigera-elasticsearch-metrics" + ElasticsearchMetricsPodSecurityPolicyName = "tigera-elasticsearch-metrics" + ElasticsearchMetricsPolicyName = networkpolicy.TigeraComponentPolicyPrefix + "elasticsearch-metrics" + ElasticsearchMetricsPort = 9081 ) var ESMetricsSourceEntityRule = networkpolicy.CreateSourceEntityRule(render.ElasticsearchNamespace, ElasticsearchMetricsName) @@ -60,6 +65,9 @@ type Config struct { ClusterDomain string ServerTLS certificatemanagement.KeyPairInterface TrustedBundle certificatemanagement.TrustedBundle + + // Whether or not the cluster supports pod security policies. + UsePSP bool } type elasticsearchMetrics struct { @@ -89,6 +97,13 @@ func (e *elasticsearchMetrics) Objects() (objsToCreate, objsToDelete []client.Ob toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(render.ElasticsearchNamespace, e.cfg.ESMetricsCredsSecret)...)...) toCreate = append(toCreate, e.metricsService(), e.metricsDeployment(), e.serviceAccount()) + if e.cfg.UsePSP { + toCreate = append(toCreate, + e.metricsRole(), + e.metricsRoleBinding(), + e.metricsPodSecurityPolicy(), + ) + } return toCreate, objsToDelete } @@ -110,6 +125,50 @@ func (e *elasticsearchMetrics) SupportedOSType() rmeta.OSType { return rmeta.OSTypeLinux } +func (e *elasticsearchMetrics) metricsRole() *rbacv1.Role { + return &rbacv1.Role{ + TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: ElasticsearchMetricsRoleName, + Namespace: render.ElasticsearchNamespace, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{ElasticsearchMetricsPodSecurityPolicyName}, + }, + }, + } +} + +func (e *elasticsearchMetrics) metricsRoleBinding() *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: ElasticsearchMetricsRoleName, + Namespace: render.ElasticsearchNamespace, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: ElasticsearchMetricsRoleName, + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: ElasticsearchMetricsName, + Namespace: render.ElasticsearchNamespace, + }, + }, + } +} + +func (e *elasticsearchMetrics) metricsPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + return podsecuritypolicy.NewBasePolicy(ElasticsearchMetricsPodSecurityPolicyName) +} + func (e *elasticsearchMetrics) metricsService() *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, diff --git a/pkg/render/logstorage/esmetrics/elasticsearch_metrics_test.go b/pkg/render/logstorage/esmetrics/elasticsearch_metrics_test.go index 9422da93b7..2887c03a8a 100644 --- a/pkg/render/logstorage/esmetrics/elasticsearch_metrics_test.go +++ b/pkg/render/logstorage/esmetrics/elasticsearch_metrics_test.go @@ -74,6 +74,7 @@ var _ = Describe("Elasticsearch metrics", func() { ClusterDomain: "cluster.local", ServerTLS: secret, TrustedBundle: bundle, + UsePSP: true, } }) @@ -102,8 +103,11 @@ var _ = Describe("Elasticsearch metrics", func() { {ElasticsearchMetricsName, render.ElasticsearchNamespace, "", "v1", "Service"}, {ElasticsearchMetricsName, render.ElasticsearchNamespace, "apps", "v1", "Deployment"}, {ElasticsearchMetricsName, render.ElasticsearchNamespace, "", "v1", "ServiceAccount"}, + {"tigera-elasticsearch-metrics", "tigera-elasticsearch", "rbac.authorization.k8s.io", "v1", "Role"}, + {"tigera-elasticsearch-metrics", "tigera-elasticsearch", "rbac.authorization.k8s.io", "v1", "RoleBinding"}, + {"tigera-elasticsearch-metrics", "", "policy", "v1beta1", "PodSecurityPolicy"}, } - Expect(len(resources)).To(Equal(len(expectedResources))) + Expect(resources).To(HaveLen(len(expectedResources))) for i, expectedRes := range expectedResources { rtest.ExpectResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) } @@ -243,6 +247,18 @@ var _ = Describe("Elasticsearch metrics", func() { })) }) + It("should render properly when PSP is not supported by the cluster", func() { + cfg.UsePSP = false + component := ElasticsearchMetrics(cfg) + Expect(component.ResolveImages(nil)).To(BeNil()) + resources, _ := component.Objects() + + // Should not contain any PodSecurityPolicies + for _, r := range resources { + Expect(r.GetObjectKind().GroupVersionKind().Kind).NotTo(Equal("PodSecurityPolicy")) + } + }) + It("should apply controlPlaneNodeSelector correctly", func() { cfg.Installation.ControlPlaneNodeSelector = map[string]string{"foo": "bar"} diff --git a/pkg/render/logstorage/linseed/linseed.go b/pkg/render/logstorage/linseed/linseed.go index 36b1bd9b7f..09dc1fd877 100644 --- a/pkg/render/logstorage/linseed/linseed.go +++ b/pkg/render/logstorage/linseed/linseed.go @@ -28,6 +28,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -38,6 +39,7 @@ import ( "github.com/tigera/operator/pkg/render" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/podaffinity" + "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/tls/certificatemanagement" ) @@ -47,6 +49,7 @@ const ( ServiceAccountName = "tigera-linseed" RoleName = "tigera-linseed" ServiceName = "tigera-linseed" + PodSecurityPolicyName = "tigera-linseed" PolicyName = networkpolicy.TigeraComponentPolicyPrefix + "linseed-access" PortName = "tigera-linseed" TargetPort = 8444 @@ -89,6 +92,9 @@ type Config struct { // ESAdminUserName is the admin user used to connect to Elastic ESAdminUserName string + + // Whether or not the cluster supports pod security policies. + UsePSP bool } func (l *linseed) ResolveImages(is *operatorv1.ImageSet) error { @@ -123,6 +129,9 @@ func (l *linseed) Objects() (toCreate, toDelete []client.Object) { toCreate = append(toCreate, l.linseedRoleBinding()) toCreate = append(toCreate, l.linseedServiceAccount()) toCreate = append(toCreate, l.linseedDeployment()) + if l.cfg.UsePSP { + toCreate = append(toCreate, l.linseedPodSecurityPolicy()) + } return toCreate, toDelete } @@ -134,26 +143,37 @@ func (l *linseed) SupportedOSType() rmeta.OSType { return rmeta.OSTypeLinux } -func (l linseed) linseedRole() *rbacv1.Role { +func (l *linseed) linseedRole() *rbacv1.Role { + rules := []rbacv1.PolicyRule{ + { + // Linseed uses subject access review to perform authorization of clients. + APIGroups: []string{"authorization.k8s.io"}, + Resources: []string{"subjectaccessreview"}, + ResourceNames: []string{}, + Verbs: []string{"create"}, + }, + } + + if l.cfg.UsePSP { + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{PodSecurityPolicyName}, + }) + } + return &rbacv1.Role{ TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: RoleName, Namespace: l.namespace, }, - Rules: []rbacv1.PolicyRule{ - { - // Linseed uses subject access review to perform authorization of clients. - APIGroups: []string{"authorization.k8s.io"}, - Resources: []string{"subjectaccessreview"}, - ResourceNames: []string{}, - Verbs: []string{"create"}, - }, - }, + Rules: rules, } } -func (l linseed) linseedRoleBinding() *rbacv1.RoleBinding { +func (l *linseed) linseedRoleBinding() *rbacv1.RoleBinding { return &rbacv1.RoleBinding{ TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -175,7 +195,11 @@ func (l linseed) linseedRoleBinding() *rbacv1.RoleBinding { } } -func (l linseed) linseedDeployment() *appsv1.Deployment { +func (l *linseed) linseedPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + return podsecuritypolicy.NewBasePolicy(PodSecurityPolicyName) +} + +func (l *linseed) linseedDeployment() *appsv1.Deployment { envVars := []corev1.EnvVar{ {Name: "LINSEED_LOG_LEVEL", Value: "INFO"}, {Name: "LINSEED_FIPS_MODE_ENABLED", Value: operatorv1.IsFIPSModeEnabledString(l.cfg.Installation.FIPSMode)}, @@ -287,7 +311,7 @@ func (l linseed) linseedDeployment() *appsv1.Deployment { } } -func (l linseed) linseedServiceAccount() *corev1.ServiceAccount { +func (l *linseed) linseedServiceAccount() *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: ServiceAccountName, @@ -296,7 +320,7 @@ func (l linseed) linseedServiceAccount() *corev1.ServiceAccount { } } -func (l linseed) linseedService() *corev1.Service { +func (l *linseed) linseedService() *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/render/logstorage/linseed/linseed_test.go b/pkg/render/logstorage/linseed/linseed_test.go index c6298d2799..0fdee0a5b0 100644 --- a/pkg/render/logstorage/linseed/linseed_test.go +++ b/pkg/render/logstorage/linseed/linseed_test.go @@ -18,34 +18,33 @@ import ( "context" "fmt" - "github.com/tigera/operator/pkg/common" - "github.com/tigera/operator/pkg/ptr" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - - v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" - "github.com/tigera/operator/pkg/render/testutils" - . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" - "github.com/tigera/operator/pkg/apis" - "github.com/tigera/operator/pkg/controller/certificatemanager" - "github.com/tigera/operator/pkg/dns" - "github.com/tigera/operator/pkg/tls/certificatemanagement" - "sigs.k8s.io/controller-runtime/pkg/client/fake" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" operatorv1 "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/pkg/apis" + "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/ptr" "github.com/tigera/operator/pkg/render" "github.com/tigera/operator/pkg/render/common/podaffinity" rtest "github.com/tigera/operator/pkg/render/common/test" + "github.com/tigera/operator/pkg/render/testutils" + "github.com/tigera/operator/pkg/tls/certificatemanagement" ) type resourceTestObj struct { @@ -71,6 +70,7 @@ var _ = Describe("Linseed rendering tests", func() { {RoleName, render.ElasticsearchNamespace, &rbacv1.RoleBinding{}, nil}, {ServiceAccountName, render.ElasticsearchNamespace, &corev1.ServiceAccount{}, nil}, {DeploymentName, render.ElasticsearchNamespace, &appsv1.Deployment{}, nil}, + {"tigera-linseed", "", &policyv1beta1.PodSecurityPolicy{}, nil}, } BeforeEach(func() { @@ -90,6 +90,7 @@ var _ = Describe("Linseed rendering tests", func() { TrustedBundle: bundle, ClusterDomain: clusterDomain, ESAdminUserName: "elastic", + UsePSP: true, } }) @@ -100,6 +101,18 @@ var _ = Describe("Linseed rendering tests", func() { compareResources(createResources, expectedResources, false) }) + It("should render properly when PSP is not supported by the cluster", func() { + cfg.UsePSP = false + component := Linseed(cfg) + Expect(component.ResolveImages(nil)).To(BeNil()) + resources, _ := component.Objects() + + // Should not contain any PodSecurityPolicies + for _, r := range resources { + Expect(r.GetObjectKind().GroupVersionKind().Kind).NotTo(Equal("PodSecurityPolicy")) + } + }) + It("should render an Linseed deployment and all supporting resources when CertificateManagement is enabled", func() { secret, err := certificatemanagement.CreateSelfSignedSecret("", "", "", nil) Expect(err).NotTo(HaveOccurred()) @@ -114,6 +127,7 @@ var _ = Describe("Linseed rendering tests", func() { TrustedBundle: bundle, ClusterDomain: clusterDomain, ESAdminUserName: "elastic", + UsePSP: true, } component := Linseed(cfg) @@ -242,7 +256,7 @@ func getTLS(installation *operatorv1.InstallationSpec) (certificatemanagement.Ke } func compareResources(resources []client.Object, expectedResources []resourceTestObj, useCSR bool) { - Expect(len(resources)).To(Equal(len(expectedResources))) + Expect(resources).To(HaveLen(len(expectedResources))) for i, expectedResource := range expectedResources { resource := resources[i] actualName := resource.(metav1.ObjectMetaAccessor).GetObjectMeta().GetName() @@ -296,6 +310,12 @@ func compareResources(resources []client.Object, expectedResources []resourceTes ResourceNames: []string{}, Verbs: []string{"create"}, }, + { + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + ResourceNames: []string{"tigera-linseed"}, + Verbs: []string{"use"}, + }, })) clusterRoleBinding := rtest.GetResource(resources, RoleName, render.ElasticsearchNamespace, "rbac.authorization.k8s.io", "v1", "RoleBinding").(*rbacv1.RoleBinding) Expect(clusterRoleBinding.RoleRef.Name).To(Equal(RoleName)) diff --git a/pkg/render/manager.go b/pkg/render/manager.go index 1e4177a5d1..4875e87ee6 100644 --- a/pkg/render/manager.go +++ b/pkg/render/manager.go @@ -193,7 +193,7 @@ func (c *managerComponent) Objects() ([]client.Object, []client.Object) { objs = append(objs, managerServiceAccount(), - managerClusterRole(c.cfg.ManagementCluster != nil, false, c.cfg.Openshift), + managerClusterRole(c.cfg.ManagementCluster != nil, false, c.cfg.UsePSP), managerClusterRoleBinding(), managerClusterWideSettingsGroup(), managerUserSpecificSettingsGroup(), @@ -208,8 +208,9 @@ func (c *managerComponent) Objects() ([]client.Object, []client.Object) { // If we're running on openshift, we need to add in an SCC. if c.cfg.Openshift { objs = append(objs, c.securityContextConstraints()) - } else if c.cfg.UsePSP { - // If we're not running openshift, we need to add pod security policies. + } + + if c.cfg.UsePSP { objs = append(objs, c.managerPodSecurityPolicy()) } objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(ManagerNamespace, c.cfg.ESSecrets...)...)...) @@ -562,7 +563,7 @@ func managerServiceAccount() *corev1.ServiceAccount { } // managerClusterRole returns a clusterrole that allows authn/authz review requests. -func managerClusterRole(managementCluster, managedCluster, openshift bool) *rbacv1.ClusterRole { +func managerClusterRole(managementCluster, managedCluster, usePSP bool) *rbacv1.ClusterRole { cr := &rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -673,7 +674,7 @@ func managerClusterRole(managementCluster, managedCluster, openshift bool) *rbac ) } - if !openshift { + if usePSP { // Allow access to the pod security policy in case this is enforced on the cluster cr.Rules = append(cr.Rules, rbacv1.PolicyRule{ @@ -742,9 +743,7 @@ func (c *managerComponent) getTLSObjects() []client.Object { } func (c *managerComponent) managerPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName("tigera-manager") - return psp + return podsecuritypolicy.NewBasePolicy("tigera-manager") } // Allow users to access Calico Enterprise Manager. diff --git a/pkg/render/monitor/monitor.go b/pkg/render/monitor/monitor.go index fed68f6042..fcec1760c5 100644 --- a/pkg/render/monitor/monitor.go +++ b/pkg/render/monitor/monitor.go @@ -22,6 +22,7 @@ import ( monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,6 +39,7 @@ import ( "github.com/tigera/operator/pkg/render/common/configmap" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/networkpolicy" + "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/render/common/securitycontext" "github.com/tigera/operator/pkg/render/logstorage/esmetrics" @@ -45,22 +47,24 @@ import ( ) const ( - MonitoringAPIVersion = "monitoring.coreos.com/v1" - CalicoNodeAlertmanager = "calico-node-alertmanager" - CalicoNodeMonitor = "calico-node-monitor" - CalicoNodePrometheus = "calico-node-prometheus" - ElasticsearchMetrics = "elasticsearch-metrics" - FluentdMetrics = "fluentd-metrics" - TigeraPrometheusObjectName = "tigera-prometheus" - TigeraPrometheusDPRate = "tigera-prometheus-dp-rate" - TigeraPrometheusRole = "tigera-prometheus-role" - TigeraPrometheusRoleBinding = "tigera-prometheus-role-binding" + MonitoringAPIVersion = "monitoring.coreos.com/v1" + CalicoNodeAlertmanager = "calico-node-alertmanager" + CalicoNodeMonitor = "calico-node-monitor" + CalicoNodePrometheus = "calico-node-prometheus" + + CalicoPrometheusOperator = "calico-prometheus-operator" + + TigeraPrometheusObjectName = "tigera-prometheus" + TigeraPrometheusDPRate = "tigera-prometheus-dp-rate" + TigeraPrometheusRole = "tigera-prometheus-role" + TigeraPrometheusRoleBinding = "tigera-prometheus-role-binding" + TigeraPrometheusPodSecurityPolicyName = "tigera-prometheus" PrometheusAPIPolicyName = networkpolicy.TigeraComponentPolicyPrefix + "tigera-prometheus-api" PrometheusClientTLSSecretName = "calico-node-prometheus-client-tls" PrometheusClusterRoleName = "prometheus" PrometheusDefaultPort = 9090 - PrometheusHTTPAPIServiceName = "prometheus-http-api" + PrometheusServiceServiceName = "prometheus-http-api" PrometheusOperatorPolicyName = networkpolicy.TigeraComponentPolicyPrefix + "prometheus-operator" PrometheusPolicyName = networkpolicy.TigeraComponentPolicyPrefix + "prometheus" PrometheusProxyPort = 9095 @@ -72,6 +76,9 @@ const ( AlertmanagerPort = 9093 MeshAlertManagerPolicyName = AlertManagerPolicyName + "-mesh" + ElasticsearchMetrics = "elasticsearch-metrics" + FluentdMetrics = "fluentd-metrics" + calicoNodePrometheusServiceName = "calico-node-prometheus" tigeraPrometheusServiceHealthEndpoint = "/health" @@ -113,6 +120,7 @@ type Config struct { TrustedCertBundle certificatemanagement.TrustedBundle Openshift bool KubeControllerPort int + UsePSP bool } type monitorComponent struct { @@ -167,37 +175,34 @@ func (mc *monitorComponent) Objects() ([]client.Object, []client.Object) { } // Create role and role bindings first. - // Operator needs the create/update roles for Alertmanger configuration secret for example. + // Operator needs the create/update roles for Alertmanager configuration secret for example. toCreate = append(toCreate, - mc.role(), - mc.roleBinding(), + mc.operatorRole(), + mc.operatorRoleBinding(), ) toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(common.TigeraPrometheusNamespace, mc.cfg.PullSecrets...)...)...) toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(common.TigeraPrometheusNamespace, mc.cfg.AlertmanagerConfigSecret)...)...) - // This is to delete a service that had been released in v3.8 with a typo in the name. - // TODO Remove the toDelete object after we drop support for v3.8. - toDelete := []client.Object{ - mc.serviceMonitorElasicsearchToDelete(), - } - toCreate = append(toCreate, - mc.alertmanagerService(), - mc.alertmanager(), + mc.prometheusOperatorServiceAccount(), + mc.prometheusOperatorClusterRole(), + mc.prometheusOperatorClusterRoleBinding(), mc.prometheusServiceAccount(), mc.prometheusClusterRole(), mc.prometheusClusterRoleBinding(), mc.prometheus(), + mc.alertmanagerService(), + mc.alertmanager(), + mc.prometheusServiceService(), + mc.prometheusServiceClusterRole(), + mc.prometheusServiceClusterRoleBinding(), mc.prometheusRule(), mc.serviceMonitorCalicoNode(), mc.serviceMonitorElasticsearch(), mc.serviceMonitorFluentd(), mc.serviceMonitorQueryServer(), mc.serviceMonitorCalicoKubeControllers(), - mc.prometheusHTTPAPIService(), - mc.clusterRole(), - mc.clusterRoleBinding(), ) if mc.cfg.KeyValidatorConfig != nil { @@ -205,58 +210,151 @@ func (mc *monitorComponent) Objects() ([]client.Object, []client.Object) { toCreate = append(toCreate, configmap.ToRuntimeObjects(mc.cfg.KeyValidatorConfig.RequiredConfigMaps(common.TigeraPrometheusNamespace)...)...) } + if mc.cfg.UsePSP { + toCreate = append(toCreate, mc.prometheusOperatorPodSecurityPolicy()) + } + // Remove the pod monitor that existed prior to v1.25. + var toDelete []client.Object toDelete = append(toDelete, &monitoringv1.PodMonitor{ObjectMeta: metav1.ObjectMeta{Name: FluentdMetrics, Namespace: common.TigeraPrometheusNamespace}}) return toCreate, toDelete } -func (mc *monitorComponent) clusterRole() client.Object { +func (mc *monitorComponent) Ready() bool { + return true +} + +func (mc *monitorComponent) prometheusOperatorServiceAccount() *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: CalicoPrometheusOperator, + Namespace: common.TigeraPrometheusNamespace, + }, + } +} + +func (mc *monitorComponent) prometheusOperatorClusterRole() *rbacv1.ClusterRole { rules := []rbacv1.PolicyRule{ { - APIGroups: []string{"authentication.k8s.io"}, - Resources: []string{"tokenreviews"}, - Verbs: []string{"create"}, + APIGroups: []string{"monitoring.coreos.com"}, + Resources: []string{ + "alertmanagers", + "alertmanagers/finalizers", + "alertmanagerconfigs", + "prometheuses", + "prometheuses/finalizers", + "prometheuses/status", + "thanosrulers", + "thanosrulers/finalizers", + "servicemonitors", + "podmonitors", + "probes", + "prometheusrules", + }, + Verbs: []string{"*"}, }, { - APIGroups: []string{"authorization.k8s.io"}, - Resources: []string{"subjectaccessreviews"}, - Verbs: []string{"create"}, + APIGroups: []string{"apps"}, + Resources: []string{"statefulsets"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{""}, + Resources: []string{ + "configmaps", + "secrets", + }, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{ + "delete", + "list", + }, + }, + { + APIGroups: []string{""}, + Resources: []string{ + "services", + "services/finalizers", + "endpoints", + }, + Verbs: []string{ + "create", + "delete", + "get", + "update", + }, }, + { + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{ + "list", + "watch", + }, + }, + { + APIGroups: []string{""}, + Resources: []string{"namespaces"}, + Verbs: []string{ + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{"networking.k8s.io"}, + Resources: []string{"ingresses"}, + Verbs: []string{ + "get", + "list", + "watch", + }, + }, + } + + if mc.cfg.UsePSP { + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{TigeraPrometheusPodSecurityPolicyName}, + }) } return &rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: TigeraPrometheusObjectName, - }, - Rules: rules, + TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{Name: CalicoPrometheusOperator}, + Rules: rules, } } -func (mc *monitorComponent) clusterRoleBinding() client.Object { +func (mc *monitorComponent) prometheusOperatorClusterRoleBinding() *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: TigeraPrometheusObjectName, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: TigeraPrometheusObjectName, - }, + TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{Name: CalicoPrometheusOperator}, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: PrometheusServiceAccountName, + Name: CalicoPrometheusOperator, Namespace: common.TigeraPrometheusNamespace, }, }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: CalicoPrometheusOperator, + }, } } -func (mc *monitorComponent) Ready() bool { - return true +func (mc *monitorComponent) prometheusOperatorPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + return podsecuritypolicy.NewBasePolicy(TigeraPrometheusPodSecurityPolicyName) } func (mc *monitorComponent) alertmanager() *monitoringv1.Alertmanager { @@ -267,13 +365,14 @@ func (mc *monitorComponent) alertmanager() *monitoringv1.Alertmanager { Namespace: common.TigeraPrometheusNamespace, }, Spec: monitoringv1.AlertmanagerSpec{ - Image: &mc.alertmanagerImage, - ImagePullSecrets: secret.GetReferenceList(mc.cfg.PullSecrets), - Replicas: ptr.Int32ToPtr(3), - Version: components.ComponentCoreOSAlertmanager.Version, - Tolerations: mc.cfg.Installation.ControlPlaneTolerations, - NodeSelector: mc.cfg.Installation.ControlPlaneNodeSelector, - SecurityContext: securitycontext.NewNonRootPodContext(), + Image: &mc.alertmanagerImage, + ImagePullSecrets: secret.GetReferenceList(mc.cfg.PullSecrets), + NodeSelector: mc.cfg.Installation.ControlPlaneNodeSelector, + Replicas: ptr.Int32ToPtr(3), + SecurityContext: securitycontext.NewNonRootPodContext(), + ServiceAccountName: PrometheusServiceAccountName, + Tolerations: mc.cfg.Installation.ControlPlaneTolerations, + Version: components.ComponentCoreOSAlertmanager.Version, }, } } @@ -448,40 +547,51 @@ func (mc *monitorComponent) prometheusServiceAccount() *corev1.ServiceAccount { } func (mc *monitorComponent) prometheusClusterRole() *rbacv1.ClusterRole { - return &rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, - ObjectMeta: metav1.ObjectMeta{Name: PrometheusClusterRoleName}, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{ - "endpoints", - "nodes", - "pods", - "services", - }, - Verbs: []string{ - "get", - "list", - "watch", - }, - }, - { - APIGroups: []string{""}, - Resources: []string{"configmaps"}, - Verbs: []string{"get"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"services/proxy"}, - ResourceNames: []string{"https:tigera-api:8080"}, - Verbs: []string{"get"}, + rules := []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{ + "endpoints", + "nodes", + "pods", + "services", }, - { - NonResourceURLs: []string{"/metrics"}, - Verbs: []string{"get"}, + Verbs: []string{ + "get", + "list", + "watch", }, }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"services/proxy"}, + ResourceNames: []string{"https:tigera-api:8080"}, + Verbs: []string{"get"}, + }, + { + NonResourceURLs: []string{"/metrics"}, + Verbs: []string{"get"}, + }, + } + + if mc.cfg.UsePSP { + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{TigeraPrometheusPodSecurityPolicyName}, + }) + } + + return &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{Name: PrometheusClusterRoleName}, + Rules: rules, } } @@ -504,15 +614,59 @@ func (mc *monitorComponent) prometheusClusterRoleBinding() *rbacv1.ClusterRoleBi } } -// prometheusHTTPAPIService sets up a service to open http connection for the prometheus instance -func (mc *monitorComponent) prometheusHTTPAPIService() *corev1.Service { +func (mc *monitorComponent) prometheusServiceClusterRole() client.Object { + rules := []rbacv1.PolicyRule{ + { + APIGroups: []string{"authentication.k8s.io"}, + Resources: []string{"tokenreviews"}, + Verbs: []string{"create"}, + }, + { + APIGroups: []string{"authorization.k8s.io"}, + Resources: []string{"subjectaccessreviews"}, + Verbs: []string{"create"}, + }, + } + + return &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: TigeraPrometheusObjectName, + }, + Rules: rules, + } +} + +func (mc *monitorComponent) prometheusServiceClusterRoleBinding() client.Object { + return &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: TigeraPrometheusObjectName, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: TigeraPrometheusObjectName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: PrometheusServiceAccountName, + Namespace: common.TigeraPrometheusNamespace, + }, + }, + } +} + +// prometheusServiceService sets up a service to open http connection for the prometheus instance +func (mc *monitorComponent) prometheusServiceService() *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: PrometheusHTTPAPIServiceName, + Name: PrometheusServiceServiceName, Namespace: common.TigeraPrometheusNamespace, }, Spec: corev1.ServiceSpec{ @@ -692,20 +846,7 @@ func (mc *monitorComponent) serviceMonitorQueryServer() *monitoringv1.ServiceMon } } -// This is to delete a service that had been released in v3.8 with a typo in the name. -// TODO Remove this object after we drop support for v3.8. -func (mc *monitorComponent) serviceMonitorElasicsearchToDelete() *monitoringv1.ServiceMonitor { - return &monitoringv1.ServiceMonitor{ - TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: MonitoringAPIVersion}, - ObjectMeta: metav1.ObjectMeta{ - Name: "elasticearch-metrics", - Namespace: common.TigeraPrometheusNamespace, - Labels: map[string]string{"team": "network-operators"}, - }, - } -} - -func (mc *monitorComponent) role() *rbacv1.Role { +func (mc *monitorComponent) operatorRole() *rbacv1.Role { // list and watch have to be cluster scopes for watches to work. // In controller-runtime, watches are by default non-namespaced. return &rbacv1.Role{ @@ -738,7 +879,7 @@ func (mc *monitorComponent) role() *rbacv1.Role { } } -func (mc *monitorComponent) roleBinding() *rbacv1.RoleBinding { +func (mc *monitorComponent) operatorRoleBinding() *rbacv1.RoleBinding { return &rbacv1.RoleBinding{ TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/render/monitor/monitor_test.go b/pkg/render/monitor/monitor_test.go index 41097d1b91..9fca51ad7a 100644 --- a/pkg/render/monitor/monitor_test.go +++ b/pkg/render/monitor/monitor_test.go @@ -93,6 +93,7 @@ var _ = Describe("monitor rendering tests", func() { AlertmanagerConfigSecret: defaultAlertmanagerConfigSecret, ClusterDomain: "example.org", TrustedCertBundle: bundle, + UsePSP: true, } }) @@ -114,21 +115,25 @@ var _ = Describe("monitor rendering tests", func() { {"tigera-prometheus-role-binding", common.TigeraPrometheusNamespace, "rbac.authorization.k8s.io", "v1", "RoleBinding"}, {"tigera-pull-secret", common.TigeraPrometheusNamespace, "", "", ""}, {"alertmanager-calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Secret"}, - {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Service"}, - {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.AlertmanagersKind}, + {"calico-prometheus-operator", "tigera-prometheus", "", "v1", "ServiceAccount"}, + {"calico-prometheus-operator", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, + {"calico-prometheus-operator", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, {"prometheus", common.TigeraPrometheusNamespace, "", "v1", "ServiceAccount"}, {"prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, {"prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, {"calico-node-prometheus", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusesKind}, + {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Service"}, + {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.AlertmanagersKind}, + {"prometheus-http-api", common.TigeraPrometheusNamespace, "", "v1", "Service"}, + {"tigera-prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, + {"tigera-prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, {"tigera-prometheus-dp-rate", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusRuleKind}, {"calico-node-monitor", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, {"elasticsearch-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, {"fluentd-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, {"tigera-api", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, {"calico-kube-controllers-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"prometheus-http-api", common.TigeraPrometheusNamespace, "", "v1", "Service"}, - {name: monitor.TigeraPrometheusObjectName, ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: monitor.TigeraPrometheusObjectName, ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {"tigera-prometheus", "", "policy", "v1beta1", "PodSecurityPolicy"}, } Expect(toCreate).To(HaveLen(len(expectedResources))) @@ -138,10 +143,7 @@ var _ = Describe("monitor rendering tests", func() { rtest.ExpectResource(obj, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) } - Expect(toDelete).To(HaveLen(2)) - - obj := toDelete[0] - rtest.ExpectResource(obj, "elasticearch-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind) + Expect(toDelete).To(HaveLen(1)) // Check the namespace. namespace := rtest.GetResource(toCreate, "tigera-prometheus", "", "", "v1", "Namespace").(*corev1.Namespace) @@ -154,6 +156,9 @@ var _ = Describe("monitor rendering tests", func() { Expect(component.ResolveImages(nil)).NotTo(HaveOccurred()) toCreate, _ := component.Objects() + // Prometheus Operator + // TODO + // Alertmanager alertmanagerObj, ok := rtest.GetResource(toCreate, monitor.CalicoNodeAlertmanager, common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.AlertmanagersKind).(*monitoringv1.Alertmanager) Expect(ok).To(BeTrue()) @@ -218,7 +223,7 @@ var _ = Describe("monitor rendering tests", func() { // Prometheus ClusterRole prometheusClusterRoleObj, ok := rtest.GetResource(toCreate, "prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole").(*rbacv1.ClusterRole) Expect(ok).To(BeTrue()) - Expect(prometheusClusterRoleObj.Rules).To(HaveLen(4)) + Expect(prometheusClusterRoleObj.Rules).To(HaveLen(5)) Expect(prometheusClusterRoleObj.Rules[0].APIGroups).To(HaveLen(1)) Expect(prometheusClusterRoleObj.Rules[0].APIGroups[0]).To(Equal("")) Expect(prometheusClusterRoleObj.Rules[0].Resources).To(HaveLen(4)) @@ -252,6 +257,14 @@ var _ = Describe("monitor rendering tests", func() { Expect(prometheusClusterRoleObj.Rules[3].NonResourceURLs[0]).To(Equal("/metrics")) Expect(prometheusClusterRoleObj.Rules[3].Verbs).To(HaveLen(1)) Expect(prometheusClusterRoleObj.Rules[3].Verbs[0]).To(Equal("get")) + Expect(prometheusClusterRoleObj.Rules[4].APIGroups).To(HaveLen(1)) + Expect(prometheusClusterRoleObj.Rules[4].APIGroups[0]).To(Equal("policy")) + Expect(prometheusClusterRoleObj.Rules[4].Resources).To(HaveLen(1)) + Expect(prometheusClusterRoleObj.Rules[4].Resources[0]).To(Equal("podsecuritypolicies")) + Expect(prometheusClusterRoleObj.Rules[4].ResourceNames).To(HaveLen(1)) + Expect(prometheusClusterRoleObj.Rules[4].ResourceNames[0]).To(Equal("tigera-prometheus")) + Expect(prometheusClusterRoleObj.Rules[4].Verbs).To(HaveLen(1)) + Expect(prometheusClusterRoleObj.Rules[4].Verbs[0]).To(Equal("use")) // Prometheus ClusterRoleBinding prometheusClusterRolebindingObj, ok := rtest.GetResource(toCreate, "prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding").(*rbacv1.ClusterRoleBinding) @@ -265,7 +278,7 @@ var _ = Describe("monitor rendering tests", func() { Expect(prometheusClusterRolebindingObj.Subjects[0].Namespace).To(Equal("tigera-prometheus")) // Prometheus HTTP API service - prometheusServiceObj, ok := rtest.GetResource(toCreate, monitor.PrometheusHTTPAPIServiceName, common.TigeraPrometheusNamespace, "", "v1", "Service").(*corev1.Service) + prometheusServiceObj, ok := rtest.GetResource(toCreate, "prometheus-http-api", common.TigeraPrometheusNamespace, "", "v1", "Service").(*corev1.Service) Expect(ok).To(BeTrue()) Expect(prometheusServiceObj.Spec.Selector).To(HaveLen(1)) Expect(prometheusServiceObj.Spec.Selector["prometheus"]).To(Equal("calico-node-prometheus")) @@ -402,6 +415,18 @@ var _ = Describe("monitor rendering tests", func() { Expect(rolebindingObj.Subjects[0].Namespace).To(Equal(common.OperatorNamespace())) }) + It("should render properly when PSP is not supported by the cluster", func() { + cfg.UsePSP = false + component := monitor.Monitor(cfg) + Expect(component.ResolveImages(nil)).To(BeNil()) + resources, _ := component.Objects() + + // Should not contain any PodSecurityPolicies + for _, r := range resources { + Expect(r.GetObjectKind().GroupVersionKind().Kind).NotTo(Equal("PodSecurityPolicy")) + } + }) + It("Should render Prometheus resources when Dex is enabled", func() { authentication := &operatorv1.Authentication{ Spec: operatorv1.AuthenticationSpec{ @@ -435,21 +460,25 @@ var _ = Describe("monitor rendering tests", func() { {"tigera-prometheus-role-binding", common.TigeraPrometheusNamespace, "rbac.authorization.k8s.io", "v1", "RoleBinding"}, {"tigera-pull-secret", common.TigeraPrometheusNamespace, "", "", ""}, {"alertmanager-calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Secret"}, - {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Service"}, - {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.AlertmanagersKind}, + {"calico-prometheus-operator", "tigera-prometheus", "", "v1", "ServiceAccount"}, + {"calico-prometheus-operator", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, + {"calico-prometheus-operator", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, {"prometheus", common.TigeraPrometheusNamespace, "", "v1", "ServiceAccount"}, {"prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, {"prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, {"calico-node-prometheus", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusesKind}, + {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Service"}, + {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.AlertmanagersKind}, + {"prometheus-http-api", common.TigeraPrometheusNamespace, "", "v1", "Service"}, + {"tigera-prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, + {"tigera-prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, {"tigera-prometheus-dp-rate", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusRuleKind}, {"calico-node-monitor", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, {"elasticsearch-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, {"fluentd-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, {"tigera-api", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, {"calico-kube-controllers-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"prometheus-http-api", common.TigeraPrometheusNamespace, "", "v1", "Service"}, - {monitor.TigeraPrometheusObjectName, "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, - {monitor.TigeraPrometheusObjectName, "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, + {"tigera-prometheus", "", "policy", "v1beta1", "PodSecurityPolicy"}, } Expect(toCreate).To(HaveLen(len(expectedResources))) @@ -459,7 +488,7 @@ var _ = Describe("monitor rendering tests", func() { rtest.ExpectResource(obj, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) } - Expect(toDelete).To(HaveLen(2)) + Expect(toDelete).To(HaveLen(1)) // Prometheus prometheusObj, ok := rtest.GetResource(toCreate, monitor.CalicoNodePrometheus, common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusesKind).(*monitoringv1.Prometheus) diff --git a/pkg/render/node.go b/pkg/render/node.go index 93f9edc6a1..d4c5c5c70b 100644 --- a/pkg/render/node.go +++ b/pkg/render/node.go @@ -215,7 +215,7 @@ func (c *nodeComponent) Objects() ([]client.Object, []client.Object) { objs = append(objs, c.clusterAdminClusterRoleBinding()) } - if c.cfg.Installation.KubernetesProvider != operatorv1.ProviderOpenShift && c.cfg.UsePSP { + if c.cfg.UsePSP { objs = append(objs, c.nodePodSecurityPolicy()) } @@ -480,7 +480,7 @@ func (c *nodeComponent) nodeRole() *rbacv1.ClusterRole { } role.Rules = append(role.Rules, extraRules...) } - if c.cfg.Installation.KubernetesProvider != operatorv1.ProviderOpenShift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster role.Rules = append(role.Rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -1619,8 +1619,7 @@ func (c *nodeComponent) nodeMetricsService() *corev1.Service { } func (c *nodeComponent) nodePodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(common.NodeDaemonSetName) + psp := podsecuritypolicy.NewBasePolicy(common.NodeDaemonSetName) psp.Spec.Privileged = true psp.Spec.AllowPrivilegeEscalation = ptr.BoolToPtr(true) psp.Spec.Volumes = append(psp.Spec.Volumes, policyv1beta1.HostPath) diff --git a/pkg/render/node_test.go b/pkg/render/node_test.go index 735de120b4..2ea1223c08 100644 --- a/pkg/render/node_test.go +++ b/pkg/render/node_test.go @@ -1714,6 +1714,7 @@ var _ = Describe("Node rendering tests", func() { {name: "calico-node", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "calico-node", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "cni-config", ns: common.CalicoNamespace, group: "", version: "v1", kind: "ConfigMap"}, + {name: common.NodeDaemonSetName, ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, {name: common.NodeDaemonSetName, ns: common.CalicoNamespace, group: "apps", version: "v1", kind: "DaemonSet"}, } @@ -1826,6 +1827,7 @@ var _ = Describe("Node rendering tests", func() { {name: "calico-node", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "calico-node-metrics", ns: "calico-system", group: "", version: "v1", kind: "Service"}, {name: "cni-config", ns: common.CalicoNamespace, group: "", version: "v1", kind: "ConfigMap"}, + {name: common.NodeDaemonSetName, ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, {name: common.NodeDaemonSetName, ns: common.CalicoNamespace, group: "apps", version: "v1", kind: "DaemonSet"}, } @@ -2017,6 +2019,7 @@ var _ = Describe("Node rendering tests", func() { {name: "calico-node", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "cni-config", ns: common.CalicoNamespace, group: "", version: "v1", kind: "ConfigMap"}, {name: render.BirdTemplatesConfigMapName, ns: common.CalicoNamespace, group: "", version: "v1", kind: "ConfigMap"}, + {name: common.NodeDaemonSetName, ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, {name: common.NodeDaemonSetName, ns: common.CalicoNamespace, group: "apps", version: "v1", kind: "DaemonSet"}, } @@ -2177,7 +2180,7 @@ var _ = Describe("Node rendering tests", func() { component := render.Node(&cfg) Expect(component.ResolveImages(nil)).To(BeNil()) resources, _ := component.Objects() - Expect(len(resources)).To(Equal(defaultNumExpectedResources-1), fmt.Sprintf("resources are %v", resources)) + Expect(len(resources)).To(Equal(defaultNumExpectedResources), fmt.Sprintf("resources are %v", resources)) // Should render the correct resources. Expect(rtest.GetResource(resources, "calico-node", "calico-system", "", "v1", "ServiceAccount")).ToNot(BeNil()) diff --git a/pkg/render/packet_capture_api.go b/pkg/render/packet_capture_api.go index 907268df6e..e6bf579094 100644 --- a/pkg/render/packet_capture_api.go +++ b/pkg/render/packet_capture_api.go @@ -17,6 +17,7 @@ package render import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -30,6 +31,7 @@ import ( "github.com/tigera/operator/pkg/render/common/configmap" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/networkpolicy" + "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/render/common/securitycontext" "github.com/tigera/operator/pkg/tls/certificatemanagement" @@ -45,6 +47,7 @@ const ( PacketCaptureClusterRoleBindingName = PacketCaptureName PacketCaptureDeploymentName = PacketCaptureName PacketCaptureServiceName = PacketCaptureName + PacketCapturePodSecurityPolicyName = PacketCaptureName PacketCapturePolicyName = networkpolicy.TigeraComponentPolicyPrefix + PacketCaptureName PacketCapturePort = 8444 @@ -66,6 +69,9 @@ type PacketCaptureApiConfiguration struct { TrustedBundle certificatemanagement.TrustedBundle ClusterDomain string ManagementClusterConnection *operatorv1.ManagementClusterConnection + + // Whether or not the cluster supports pod security policies. + UsePSP bool } type packetCaptureApiComponent struct { @@ -122,6 +128,10 @@ func (pc *packetCaptureApiComponent) Objects() ([]client.Object, []client.Object if pc.cfg.TrustedBundle != nil { objs = append(objs, pc.cfg.TrustedBundle.ConfigMap(PacketCaptureNamespace)) } + + if pc.cfg.UsePSP { + objs = append(objs, pc.podSecurityPolicy()) + } return objs, nil } @@ -152,7 +162,7 @@ func (pc *packetCaptureApiComponent) service() *corev1.Service { } } -func (pc *packetCaptureApiComponent) serviceAccount() client.Object { +func (pc *packetCaptureApiComponent) serviceAccount() *corev1.ServiceAccount { return &corev1.ServiceAccount{ TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: PacketCaptureServiceAccountName, Namespace: PacketCaptureNamespace}, @@ -183,6 +193,15 @@ func (pc *packetCaptureApiComponent) clusterRole() client.Object { }, } + if pc.cfg.UsePSP { + rules = append(rules, rbacv1.PolicyRule{ + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{PacketCapturePodSecurityPolicyName}, + }) + } + return &rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -192,7 +211,7 @@ func (pc *packetCaptureApiComponent) clusterRole() client.Object { } } -func (pc *packetCaptureApiComponent) clusterRoleBinding() client.Object { +func (pc *packetCaptureApiComponent) clusterRoleBinding() *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -213,7 +232,11 @@ func (pc *packetCaptureApiComponent) clusterRoleBinding() client.Object { } } -func (pc *packetCaptureApiComponent) deployment() client.Object { +func (pc *packetCaptureApiComponent) podSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + return podsecuritypolicy.NewBasePolicy(PacketCapturePodSecurityPolicyName) +} + +func (pc *packetCaptureApiComponent) deployment() *appsv1.Deployment { return &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/render/packetcapture_api_test.go b/pkg/render/packet_capture_api_test.go similarity index 97% rename from pkg/render/packetcapture_api_test.go rename to pkg/render/packet_capture_api_test.go index aea3befaa0..89ca962b77 100644 --- a/pkg/render/packetcapture_api_test.go +++ b/pkg/render/packet_capture_api_test.go @@ -17,12 +17,20 @@ package render_test import ( "fmt" - "github.com/tigera/operator/pkg/render/testutils" - "k8s.io/apimachinery/pkg/types" - . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/apis" "github.com/tigera/operator/pkg/common" @@ -34,16 +42,9 @@ import ( "github.com/tigera/operator/pkg/render/common/authentication" rmeta "github.com/tigera/operator/pkg/render/common/meta" rtest "github.com/tigera/operator/pkg/render/common/test" + "github.com/tigera/operator/pkg/render/testutils" "github.com/tigera/operator/pkg/tls" "github.com/tigera/operator/pkg/tls/certificatemanagement" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" ) var _ = Describe("Rendering tests for PacketCapture API component", func() { @@ -82,6 +83,7 @@ var _ = Describe("Rendering tests for PacketCapture API component", func() { Installation: &i, KeyValidatorConfig: config, ServerCertSecret: secret, + UsePSP: true, } pc := render.PacketCaptureAPI(cfg) Expect(pc.ResolveImages(nil)).To(BeNil()) @@ -106,6 +108,7 @@ var _ = Describe("Rendering tests for PacketCapture API component", func() { {name: render.PacketCaptureClusterRoleBindingName, ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: render.PacketCaptureDeploymentName, ns: render.PacketCaptureNamespace, group: "apps", version: "v1", kind: "Deployment"}, {name: render.PacketCaptureServiceName, ns: render.PacketCaptureNamespace, group: "", version: "v1", kind: "Service"}, + {name: "tigera-packetcapture", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, } return resources @@ -319,6 +322,12 @@ var _ = Describe("Rendering tests for PacketCapture API component", func() { Resources: []string{"packetcaptures/status"}, Verbs: []string{"update"}, }, + { + APIGroups: []string{"policy"}, + ResourceNames: []string{"tigera-packetcapture"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + }, })) clusterRoleBinding := rtest.GetResource(resources, render.PacketCaptureClusterRoleBindingName, "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding").(*rbacv1.ClusterRoleBinding) Expect(clusterRoleBinding.RoleRef.Name).To(Equal(render.PacketCaptureClusterRoleName)) diff --git a/pkg/render/typha.go b/pkg/render/typha.go index 0311107128..f3a8d6c0cc 100644 --- a/pkg/render/typha.go +++ b/pkg/render/typha.go @@ -117,7 +117,7 @@ func (c *typhaComponent) Objects() ([]client.Object, []client.Object) { c.typhaPodDisruptionBudget(), } - if c.cfg.Installation.KubernetesProvider != operatorv1.ProviderOpenShift && c.cfg.UsePSP { + if c.cfg.UsePSP { objs = append(objs, c.typhaPodSecurityPolicy()) } @@ -338,7 +338,7 @@ func (c *typhaComponent) typhaRole() *rbacv1.ClusterRole { } role.Rules = append(role.Rules, extraRules...) } - if c.cfg.Installation.KubernetesProvider != operatorv1.ProviderOpenShift { + if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster role.Rules = append(role.Rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, @@ -642,8 +642,7 @@ func (c *typhaComponent) typhaService() *corev1.Service { } func (c *typhaComponent) typhaPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { - psp := podsecuritypolicy.NewBasePolicy() - psp.GetObjectMeta().SetName(common.TyphaDeploymentName) + psp := podsecuritypolicy.NewBasePolicy(common.TyphaDeploymentName) psp.Spec.HostNetwork = true return psp }