From 56c5a6806ff5ed4cb087b0a042231d81771d374a Mon Sep 17 00:00:00 2001 From: Kyle Schochenmaier Date: Thu, 8 Apr 2021 17:48:17 -0500 Subject: [PATCH] tproxy remove cleanup controller (#476) Remove the cleanup controller and replace it with the supporting logic from #457. --- connect-inject/cleanup_resource.go | 281 ---------------- connect-inject/cleanup_resource_ent_test.go | 324 ------------------- connect-inject/cleanup_resource_test.go | 338 -------------------- subcommand/inject-connect/command.go | 249 +++++--------- subcommand/inject-connect/command_test.go | 64 ---- subcommand/server-acl-init/command.go | 5 - subcommand/server-acl-init/command_test.go | 22 -- subcommand/server-acl-init/rules.go | 4 - subcommand/server-acl-init/rules_test.go | 44 +-- 9 files changed, 93 insertions(+), 1238 deletions(-) delete mode 100644 connect-inject/cleanup_resource.go delete mode 100644 connect-inject/cleanup_resource_ent_test.go delete mode 100644 connect-inject/cleanup_resource_test.go diff --git a/connect-inject/cleanup_resource.go b/connect-inject/cleanup_resource.go deleted file mode 100644 index e555ce306e..0000000000 --- a/connect-inject/cleanup_resource.go +++ /dev/null @@ -1,281 +0,0 @@ -package connectinject - -import ( - "fmt" - "sync" - "time" - - "github.com/hashicorp/consul-k8s/consul" - capi "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-hclog" - "golang.org/x/net/context" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" -) - -// CleanupResource implements Resource and is used to clean up Consul service -// instances that weren't deregistered when their pods were deleted. -// Usually the preStop hook in the pods handles this but during a force delete -// or OOM the preStop hook doesn't run. -type CleanupResource struct { - Log hclog.Logger - KubernetesClient kubernetes.Interface - // ConsulClient points at the agent on the same node as this pod. - ConsulClient *capi.Client - ReconcilePeriod time.Duration - Ctx context.Context - // ConsulScheme is the scheme to use when making API calls to Consul, - // i.e. "http" or "https". - ConsulScheme string - // ConsulPort is the port to make HTTP API calls to Consul agents on. - ConsulPort string - EnableConsulNamespaces bool - - lock sync.Mutex -} - -// Run starts the long-running Reconcile loop that runs on a timer. -func (c *CleanupResource) Run(stopCh <-chan struct{}) { - reconcileTimer := time.NewTimer(c.ReconcilePeriod) - defer reconcileTimer.Stop() - - for { - c.reconcile() - reconcileTimer.Reset(c.ReconcilePeriod) - - select { - case <-stopCh: - c.Log.Info("received stop signal, shutting down") - return - - case <-reconcileTimer.C: - // Fall through and continue the loop. - } - } -} - -// reconcile checks all registered Consul connect service instances and ensures -// the pod they represent is still running. If the pod is no longer running, -// it deregisters the service instance from its agent. -func (c *CleanupResource) reconcile() { - c.Log.Debug("starting reconcile") - - // currentPods is a map of currently existing pods. Keys are in the form - // "namespace/pod-name". Value doesn't matter since we're using this - // solely for quick "exists" checks. - // Use currentPodsKey() to construct the key when accessing this map. - currentPods := make(map[string]bool) - - // Gather needed data on nodes, services and pods. - kubeNodes, err := c.KubernetesClient.CoreV1().Nodes().List(c.Ctx, metav1.ListOptions{}) - if err != nil { - c.Log.Error("unable to get nodes", "error", err) - return - } - - // namespacesToServiceNames maps from Consul namespace to the list of service - // names registered in that namespace. - // If Consul namespaces are disabled, there will be only one key with value - // "", i.e. the empty string. - namespacesToServiceNames := make(map[string][]string) - if c.EnableConsulNamespaces { - namespaces, _, err := c.ConsulClient.Namespaces().List(nil) - if err != nil { - c.Log.Error("unable to get Consul namespaces", "error", err) - return - } - for _, ns := range namespaces { - namespacesToServiceNames[ns.Name] = nil - } - } else { - // This allows us to treat OSS the same as enterprise for the rest of - // the code path. - namespacesToServiceNames[""] = nil - } - - for ns := range namespacesToServiceNames { - serviceNames, _, err := c.ConsulClient.Catalog().Services(&capi.QueryOptions{Namespace: ns}) - if err != nil { - c.Log.Error("unable to get Consul services", "error", err) - return - } - namespacesToServiceNames[ns] = keys(serviceNames) - } - - podList, err := c.KubernetesClient.CoreV1().Pods(corev1.NamespaceAll).List(c.Ctx, - metav1.ListOptions{LabelSelector: annotationStatus}) - if err != nil { - c.Log.Error("unable to get pods", "error", err) - return - } - - // Build up our map of currently running pods. - for _, pod := range podList.Items { - currentPods[currentPodsKey(pod.Name, pod.Namespace)] = true - } - - // For each registered service, find the associated pod. - for ns, serviceNames := range namespacesToServiceNames { - for _, serviceName := range serviceNames { - serviceInstances, _, err := c.ConsulClient.Catalog().Service(serviceName, "", &capi.QueryOptions{Namespace: ns}) - if err != nil { - c.Log.Error("unable to get Consul service", "name", serviceName, "error", err) - return - } - for _, instance := range serviceInstances { - podName, hasPodMeta := instance.ServiceMeta[MetaKeyPodName] - k8sNamespace, hasNSMeta := instance.ServiceMeta[MetaKeyKubeNS] - if hasPodMeta && hasNSMeta { - - // Check if the instance matches a running pod. If not, deregister it. - if _, podExists := currentPods[currentPodsKey(podName, k8sNamespace)]; !podExists { - if !nodeInCluster(kubeNodes, instance.Node) { - c.Log.Debug("skipping deregistration because instance is from node not in this cluster", - "pod", podName, "id", instance.ServiceID, "ns", ns, "node", instance.Node) - continue - } - - c.Log.Info("found service instance from terminated pod still registered", "pod", podName, "id", instance.ServiceID, "ns", ns) - err := c.deregisterInstance(instance, instance.Address) - if err != nil { - c.Log.Error("unable to deregister service instance", "id", instance.ServiceID, "ns", ns, "error", err) - continue - } - c.Log.Info("service instance deregistered", "id", instance.ServiceID, "ns", ns) - } - } - } - } - } - - c.Log.Debug("finished reconcile") - return -} - -// Delete is called when a pod is deleted. It checks that the Consul service -// instance for that pod is no longer registered with Consul. -// If the instance is still registered, it deregisters it. -func (c *CleanupResource) Delete(key string, obj interface{}) error { - pod, ok := obj.(*corev1.Pod) - if !ok { - return fmt.Errorf("expected pod, got: %#v", obj) - } - if pod == nil { - return fmt.Errorf("object for key %s was nil", key) - } - serviceName, ok := pod.ObjectMeta.Annotations[annotationService] - if !ok { - return fmt.Errorf("pod did not have %s annotation", annotationService) - } - kubeNS := pod.Namespace - podName := pod.Name - // NOTE: This will be an empty string with Consul OSS. - consulNS := pod.ObjectMeta.Annotations[annotationConsulNamespace] - - // Look for both the service and its sidecar proxy. - consulServiceNames := []string{serviceName, fmt.Sprintf("%s-sidecar-proxy", serviceName)} - - for _, consulServiceName := range consulServiceNames { - instances, _, err := c.ConsulClient.Catalog().Service(consulServiceName, "", &capi.QueryOptions{ - Filter: fmt.Sprintf(`ServiceMeta[%q] == %q and ServiceMeta[%q] == %q`, MetaKeyPodName, podName, MetaKeyKubeNS, kubeNS), - Namespace: consulNS, - }) - if err != nil { - c.Log.Error("unable to get Consul Services", "error", err) - return err - } - if len(instances) == 0 { - c.Log.Debug("terminated pod had no still-registered instances", "service", consulServiceName, "pod", podName, "ns", consulNS) - continue - } - - // NOTE: We only expect a single instance because there's only one - // per pod but we may as well range over all of them just to be safe. - for _, instance := range instances { - // NOTE: We don't need to check if this instance belongs to a kube - // node in this cluster (like we do in Reconcile) because we only - // get the delete event if a pod in this cluster is deleted so - // we know this is one of our instances. - - c.Log.Info("found service instance from terminated pod still registered", "pod", podName, "id", instance.ServiceID, "ns", consulNS) - err := c.deregisterInstance(instance, pod.Status.HostIP) - if err != nil { - c.Log.Error("unable to deregister service instance", "id", instance.ServiceID, "error", err) - return err - } - c.Log.Info("service instance deregistered", "id", instance.ServiceID, "ns", consulNS) - } - } - return nil -} - -// Upsert is a no-op because we're only interested in pods that are deleted. -func (c *CleanupResource) Upsert(_ string, _ interface{}) error { - return nil -} - -func (c *CleanupResource) Informer() cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return c.KubernetesClient.CoreV1().Pods(metav1.NamespaceAll).List(c.Ctx, - metav1.ListOptions{LabelSelector: annotationStatus}) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return c.KubernetesClient.CoreV1().Pods(metav1.NamespaceAll).Watch(c.Ctx, - metav1.ListOptions{LabelSelector: annotationStatus}) - }, - }, - &corev1.Pod{}, - // Resync is 0 because we perform our own reconcile loop on our own timer. - 0, - cache.Indexers{}, - ) -} - -// deregisterInstance deregisters instance from Consul by calling the agent at -// hostIP's deregister service API. -func (c *CleanupResource) deregisterInstance(instance *capi.CatalogService, hostIP string) error { - fullAddr := fmt.Sprintf("%s://%s:%s", c.ConsulScheme, hostIP, c.ConsulPort) - localConfig := capi.DefaultConfig() - if instance.Namespace != "" { - localConfig.Namespace = instance.Namespace - } - localConfig.Address = fullAddr - client, err := consul.NewClient(localConfig) - if err != nil { - return fmt.Errorf("constructing client for address %q: %s", hostIP, err) - } - - return client.Agent().ServiceDeregister(instance.ServiceID) -} - -// currentPodsKey should be used to construct the key to access the -// currentPods map. -func currentPodsKey(podName, k8sNamespace string) string { - return fmt.Sprintf("%s/%s", k8sNamespace, podName) -} - -// nodeInCluster returns whether nodeName is the name of a node in nodes, i.e. -// it's the name of a node in this cluster. -func nodeInCluster(nodes *corev1.NodeList, nodeName string) bool { - for _, n := range nodes.Items { - if n.Name == nodeName { - return true - } - } - return false -} - -// keys returns the keys of m. -func keys(m map[string][]string) []string { - var ks []string - for k := range m { - ks = append(ks, k) - } - return ks -} diff --git a/connect-inject/cleanup_resource_ent_test.go b/connect-inject/cleanup_resource_ent_test.go deleted file mode 100644 index 7e3ae56452..0000000000 --- a/connect-inject/cleanup_resource_ent_test.go +++ /dev/null @@ -1,324 +0,0 @@ -// +build enterprise - -package connectinject - -import ( - "net/url" - "testing" - - capi "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/fake" -) - -func TestReconcile_ConsulNamespaces(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - ConsulServices []capi.AgentServiceRegistration - KubePods []runtime.Object - // ExpConsulServiceIDs maps from Consul namespace to - // list of expected service ids in that namespace. - ExpConsulServiceIDs map[string][]string - }{ - "default namespace, pod deleted": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvcDefaultNS, - }, - KubePods: nil, - ExpConsulServiceIDs: map[string][]string{ - "default": {"consul"}, - }, - }, - "default namespace, pod not deleted": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvcDefaultNS, - }, - KubePods: []runtime.Object{consulFooPodDefaultNS}, - ExpConsulServiceIDs: map[string][]string{ - "default": {"consul", "foo-abc123-foo"}, - }, - }, - "foo namespace, pod deleted": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvcFooNS, - }, - KubePods: nil, - ExpConsulServiceIDs: map[string][]string{ - "default": {"consul"}, - "foo": nil, - }, - }, - "foo namespace, pod not deleted": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvcFooNS, - }, - KubePods: []runtime.Object{consulFooPodFooNS}, - ExpConsulServiceIDs: map[string][]string{ - "default": {"consul"}, - "foo": {"foo-abc123-foo"}, - }, - }, - "does not delete instances with same id in different namespaces": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvcFooNS, - consulFooSvcBarNS, - }, - KubePods: []runtime.Object{consulFooPodFooNS}, - ExpConsulServiceIDs: map[string][]string{ - "default": {"consul"}, - "foo": {"foo-abc123-foo"}, - "bar": nil, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - require := require.New(t) - - // Start Consul server. - server, err := testutil.NewTestServerConfigT(t, nil) - defer server.Stop() - require.NoError(err) - server.WaitForSerfCheck(t) - consulClient, err := capi.NewClient(&capi.Config{Address: server.HTTPAddr}) - require.NoError(err) - - // Register Consul services. - for _, svc := range c.ConsulServices { - _, _, err := consulClient.Namespaces().Create(&capi.Namespace{ - Name: svc.Namespace, - }, nil) - require.NoError(err) - require.NoError(consulClient.Agent().ServiceRegister(&svc)) - } - - // Create the cleanup resource. - log := hclog.Default().Named("cleanupResource") - log.SetLevel(hclog.Debug) - consulURL, err := url.Parse("http://" + server.HTTPAddr) - require.NoError(err) - node := nodeName(t, consulClient) - // NOTE: we need to add the node because the reconciler checks if - // the node the service is registered with actually exists in this - // cluster. - kubeResources := append(c.KubePods, &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: node, - }, - }) - cleanupResource := CleanupResource{ - Log: log, - KubernetesClient: fake.NewSimpleClientset(kubeResources...), - ConsulClient: consulClient, - ConsulScheme: consulURL.Scheme, - ConsulPort: consulURL.Port(), - EnableConsulNamespaces: true, - } - - // Run Reconcile. - cleanupResource.reconcile() - - // Test that the remaining services are what we expect. - for ns, expSvcs := range c.ExpConsulServiceIDs { - // Note: we need to use the catalog endpoints because - // Agent().Services() does not currently support namespaces - // (https://github.com/hashicorp/consul/issues/9710). - services, _, err := consulClient.Catalog().Services(&capi.QueryOptions{Namespace: ns}) - require.NoError(err) - - var actualServiceIDs []string - for actSvcName := range services { - services, _, err := consulClient.Catalog().Service(actSvcName, "", &capi.QueryOptions{Namespace: ns}) - require.NoError(err) - for _, actSvc := range services { - actualServiceIDs = append(actualServiceIDs, actSvc.ServiceID) - } - } - require.ElementsMatch(actualServiceIDs, expSvcs, "ns=%s act=%v", ns, actualServiceIDs) - } - }) - } -} - -func TestDelete_ConsulNamespaces(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - Pod *corev1.Pod - ConsulServices []capi.AgentServiceRegistration - // ExpConsulServiceIDs maps from Consul namespace to - // list of expected service ids in that namespace. - ExpConsulServiceIDs map[string][]string - ExpErr string - }{ - "default namespace": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvcDefaultNS, - }, - Pod: consulFooPodDefaultNS, - ExpConsulServiceIDs: map[string][]string{ - "default": {"consul"}, - }, - }, - "foo namespace": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvcFooNS, - }, - Pod: consulFooPodFooNS, - ExpConsulServiceIDs: map[string][]string{ - "default": {"consul"}, - "foo": nil, - }, - }, - "does not delete instances with same id in different namespaces": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvcFooNS, - consulFooSvcBarNS, - }, - Pod: consulFooPodFooNS, - ExpConsulServiceIDs: map[string][]string{ - "default": {"consul"}, - "foo": nil, - "bar": {"foo-abc123-foo"}, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - require := require.New(t) - - // Start Consul server. - server, err := testutil.NewTestServerConfigT(t, nil) - defer server.Stop() - require.NoError(err) - server.WaitForSerfCheck(t) - consulClient, err := capi.NewClient(&capi.Config{Address: server.HTTPAddr}) - require.NoError(err) - - // Register Consul services. - for _, svc := range c.ConsulServices { - _, _, err := consulClient.Namespaces().Create(&capi.Namespace{ - Name: svc.Namespace, - }, nil) - require.NoError(err) - require.NoError(consulClient.Agent().ServiceRegister(&svc)) - } - - // Create the cleanup resource. - log := hclog.Default().Named("cleanupResource") - log.SetLevel(hclog.Debug) - consulURL, err := url.Parse("http://" + server.HTTPAddr) - require.NoError(err) - cleanupResource := CleanupResource{ - Log: log, - KubernetesClient: fake.NewSimpleClientset(), - ConsulClient: consulClient, - ConsulScheme: consulURL.Scheme, - ConsulPort: consulURL.Port(), - EnableConsulNamespaces: true, - } - - // Run Delete. - err = cleanupResource.Delete("default/foo", c.Pod) - if c.ExpErr != "" { - require.EqualError(err, c.ExpErr) - } else { - require.NoError(err) - - // Test that the remaining services are what we expect. - for ns, expSvcs := range c.ExpConsulServiceIDs { - // Note: we need to use the catalog endpoints because - // Agent().Services() does not currently support namespaces - // (https://github.com/hashicorp/consul/issues/9710). - services, _, err := consulClient.Catalog().Services(&capi.QueryOptions{Namespace: ns}) - require.NoError(err) - - var actualServiceIDs []string - for actSvcName := range services { - services, _, err := consulClient.Catalog().Service(actSvcName, "", &capi.QueryOptions{Namespace: ns}) - require.NoError(err) - for _, actSvc := range services { - actualServiceIDs = append(actualServiceIDs, actSvc.ServiceID) - } - } - require.ElementsMatch(actualServiceIDs, expSvcs, "ns=%s act=%v", ns, actualServiceIDs) - } - } - }) - } -} - -var ( - consulFooSvcDefaultNS = capi.AgentServiceRegistration{ - ID: "foo-abc123-foo", - Name: "foo", - Namespace: "default", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyPodName: "foo-abc123", - MetaKeyKubeNS: "default", - }, - } - consulFooSvcFooNS = capi.AgentServiceRegistration{ - ID: "foo-abc123-foo", - Name: "foo", - Namespace: "foo", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyPodName: "foo-abc123", - MetaKeyKubeNS: "default", - }, - } - consulFooSvcBarNS = capi.AgentServiceRegistration{ - ID: "foo-abc123-foo", - Name: "foo", - Namespace: "bar", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyPodName: "foo-abc123", - MetaKeyKubeNS: "bar", - }, - } - consulFooPodDefaultNS = &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-abc123", - Namespace: "default", - Labels: map[string]string{ - annotationStatus: injected, - }, - Annotations: map[string]string{ - annotationStatus: injected, - annotationService: "foo", - annotationConsulNamespace: "default", - }, - }, - Status: corev1.PodStatus{ - HostIP: "127.0.0.1", - }, - } - consulFooPodFooNS = &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-abc123", - Namespace: "default", - Labels: map[string]string{ - annotationStatus: injected, - }, - Annotations: map[string]string{ - annotationStatus: injected, - annotationService: "foo", - annotationConsulNamespace: "foo", - }, - }, - Status: corev1.PodStatus{ - HostIP: "127.0.0.1", - }, - } -) diff --git a/connect-inject/cleanup_resource_test.go b/connect-inject/cleanup_resource_test.go deleted file mode 100644 index 6ccc254172..0000000000 --- a/connect-inject/cleanup_resource_test.go +++ /dev/null @@ -1,338 +0,0 @@ -package connectinject - -import ( - "net/url" - "testing" - - capi "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/fake" -) - -func TestReconcile(t *testing.T) { - t.Parallel() - - cases := map[string]struct { - ConsulServices []capi.AgentServiceRegistration - KubePods []runtime.Object - ExpConsulServiceIDs []string - // OutOfClusterNode controls whether the services are registered on a - // node that does not exist in this Kube cluster. - OutOfClusterNode bool - }{ - "no instances running": { - ConsulServices: nil, - KubePods: nil, - ExpConsulServiceIDs: nil, - }, - "instance does not have pod-name meta key": { - ConsulServices: []capi.AgentServiceRegistration{consulNoPodNameMetaSvc}, - ExpConsulServiceIDs: []string{"foo-abc123-foo"}, - }, - "instance does not have k8s-namespace meta key": { - ConsulServices: []capi.AgentServiceRegistration{consulNoK8sNSMetaSvc}, - ExpConsulServiceIDs: []string{"foo-abc123-foo"}, - }, - "out of cluster node": { - ConsulServices: []capi.AgentServiceRegistration{consulFooSvc, consulFooSvcSidecar}, - ExpConsulServiceIDs: []string{"foo-abc123-foo", "foo-abc123-foo-sidecar-proxy"}, - OutOfClusterNode: true, - }, - "app and sidecar still running": { - ConsulServices: []capi.AgentServiceRegistration{consulFooSvc, consulFooSvcSidecar}, - KubePods: []runtime.Object{fooPod}, - ExpConsulServiceIDs: []string{"foo-abc123-foo", "foo-abc123-foo-sidecar-proxy"}, - }, - "app and sidecar terminated": { - ConsulServices: []capi.AgentServiceRegistration{consulFooSvc, consulFooSvcSidecar}, - KubePods: nil, - ExpConsulServiceIDs: nil, - }, - "only app is registered, no sidecar": { - ConsulServices: []capi.AgentServiceRegistration{consulFooSvc}, - KubePods: nil, - ExpConsulServiceIDs: nil, - }, - "only sidecar is registered, no app": { - ConsulServices: []capi.AgentServiceRegistration{consulFooSvcSidecar}, - KubePods: nil, - ExpConsulServiceIDs: nil, - }, - "multiple instances of the same service": { - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvc, - consulFooSvcSidecar, - consulFooSvcPod2, - consulFooSvcSidecarPod2, - }, - KubePods: []runtime.Object{fooPod}, - ExpConsulServiceIDs: []string{"foo-abc123-foo", "foo-abc123-foo-sidecar-proxy"}, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - require := require.New(t) - - // Start Consul server. - server, err := testutil.NewTestServerConfigT(t, nil) - defer server.Stop() - require.NoError(err) - server.WaitForLeader(t) - consulClient, err := capi.NewClient(&capi.Config{Address: server.HTTPAddr}) - require.NoError(err) - - // Register Consul services. - for _, svc := range c.ConsulServices { - require.NoError(consulClient.Agent().ServiceRegister(&svc)) - } - - // Create the cleanup resource. - log := hclog.Default().Named("cleanupResource") - log.SetLevel(hclog.Debug) - consulURL, err := url.Parse("http://" + server.HTTPAddr) - require.NoError(err) - kubeResources := c.KubePods - if !c.OutOfClusterNode { - node := nodeName(t, consulClient) - // NOTE: we need to add the node because the reconciler checks if - // the node the service is registered with actually exists in this - // cluster. - kubeResources = append(kubeResources, &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: node, - }, - }) - - } - cleanupResource := CleanupResource{ - Log: log, - KubernetesClient: fake.NewSimpleClientset(kubeResources...), - ConsulClient: consulClient, - ConsulScheme: consulURL.Scheme, - ConsulPort: consulURL.Port(), - } - - // Run Reconcile. - cleanupResource.reconcile() - - // Test that the remaining services are what we expect. - services, err := consulClient.Agent().Services() - require.NoError(err) - var actualServiceIDs []string - for id := range services { - actualServiceIDs = append(actualServiceIDs, id) - } - require.ElementsMatch(actualServiceIDs, c.ExpConsulServiceIDs) - }) - } -} - -func TestDelete(t *testing.T) { - t.Parallel() - - var nilPod *corev1.Pod - cases := map[string]struct { - Pod interface{} - ConsulServices []capi.AgentServiceRegistration - ExpConsulServiceIDs []string - ExpErr string - }{ - "pod is nil": { - Pod: nilPod, - ExpErr: "object for key default/foo was nil", - }, - "not expected type": { - Pod: &corev1.Service{}, - ExpErr: "expected pod, got: &v1.Service", - }, - "pod does not have service-name annotation": { - Pod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-abc123", - Namespace: "default", - }, - Status: corev1.PodStatus{ - HostIP: "127.0.0.1", - }, - }, - ExpErr: "pod did not have consul.hashicorp.com/connect-service annotation", - }, - "instance does not have pod-name meta": { - Pod: fooPod, - ConsulServices: []capi.AgentServiceRegistration{consulNoPodNameMetaSvc}, - ExpConsulServiceIDs: []string{"foo-abc123-foo"}, - }, - "instance does not have k8s-namespace meta": { - Pod: fooPod, - ConsulServices: []capi.AgentServiceRegistration{consulNoK8sNSMetaSvc}, - ExpConsulServiceIDs: []string{"foo-abc123-foo"}, - }, - "no instances still registered": { - Pod: fooPod, - ConsulServices: nil, - ExpConsulServiceIDs: nil, - }, - "app and sidecar terminated": { - Pod: fooPod, - ConsulServices: []capi.AgentServiceRegistration{consulFooSvc, consulFooSvcSidecar}, - ExpConsulServiceIDs: nil, - }, - "only app is registered, no sidecar": { - Pod: fooPod, - ConsulServices: []capi.AgentServiceRegistration{consulFooSvc}, - ExpConsulServiceIDs: nil, - }, - "only sidecar is registered, no app": { - Pod: fooPod, - ConsulServices: []capi.AgentServiceRegistration{consulFooSvcSidecar}, - ExpConsulServiceIDs: nil, - }, - "multiple instances of the same service": { - Pod: fooPod, - ConsulServices: []capi.AgentServiceRegistration{ - consulFooSvc, - consulFooSvcSidecar, - consulFooSvcPod2, - consulFooSvcSidecarPod2, - }, - ExpConsulServiceIDs: []string{"foo-def456-foo", "foo-def456-foo-sidecar-proxy"}, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - require := require.New(t) - - // Start Consul server. - server, err := testutil.NewTestServerConfigT(t, nil) - defer server.Stop() - require.NoError(err) - server.WaitForLeader(t) - consulClient, err := capi.NewClient(&capi.Config{Address: server.HTTPAddr}) - require.NoError(err) - - // Register Consul services. - for _, svc := range c.ConsulServices { - require.NoError(consulClient.Agent().ServiceRegister(&svc)) - } - - // Create the cleanup resource. - log := hclog.Default().Named("cleanupResource") - log.SetLevel(hclog.Debug) - consulURL, err := url.Parse("http://" + server.HTTPAddr) - require.NoError(err) - cleanupResource := CleanupResource{ - Log: log, - KubernetesClient: fake.NewSimpleClientset(), - ConsulClient: consulClient, - ConsulScheme: consulURL.Scheme, - ConsulPort: consulURL.Port(), - } - - // Run Delete. - err = cleanupResource.Delete("default/foo", c.Pod) - if c.ExpErr != "" { - require.Error(err) - require.Contains(err.Error(), c.ExpErr) - } else { - require.NoError(err) - - // Test that the remaining services are what we expect. - services, err := consulClient.Agent().Services() - require.NoError(err) - var actualServiceIDs []string - for id := range services { - actualServiceIDs = append(actualServiceIDs, id) - } - require.ElementsMatch(actualServiceIDs, c.ExpConsulServiceIDs) - } - }) - } -} - -// nodeName returns the Consul node name for the agent that client -// points at. -func nodeName(t *testing.T, client *capi.Client) string { - self, err := client.Agent().Self() - require.NoError(t, err) - require.Contains(t, self, "Config") - require.Contains(t, self["Config"], "NodeName") - return self["Config"]["NodeName"].(string) -} - -var ( - consulFooSvc = capi.AgentServiceRegistration{ - ID: "foo-abc123-foo", - Name: "foo", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyPodName: "foo-abc123", - MetaKeyKubeNS: "default", - }, - } - consulFooSvcSidecar = capi.AgentServiceRegistration{ - ID: "foo-abc123-foo-sidecar-proxy", - Name: "foo-sidecar-proxy", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyPodName: "foo-abc123", - MetaKeyKubeNS: "default", - }, - } - consulFooSvcPod2 = capi.AgentServiceRegistration{ - ID: "foo-def456-foo", - Name: "foo", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyPodName: "foo-def456", - MetaKeyKubeNS: "default", - }, - } - consulFooSvcSidecarPod2 = capi.AgentServiceRegistration{ - ID: "foo-def456-foo-sidecar-proxy", - Name: "foo-sidecar-proxy", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyPodName: "foo-def456", - MetaKeyKubeNS: "default", - }, - } - consulNoPodNameMetaSvc = capi.AgentServiceRegistration{ - ID: "foo-abc123-foo", - Name: "foo", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyKubeNS: "default", - }, - } - consulNoK8sNSMetaSvc = capi.AgentServiceRegistration{ - ID: "foo-abc123-foo", - Name: "foo", - Address: "127.0.0.1", - Meta: map[string]string{ - MetaKeyPodName: "foo-abc123", - }, - } - fooPod = &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-abc123", - Namespace: "default", - Labels: map[string]string{ - annotationStatus: injected, - }, - Annotations: map[string]string{ - annotationStatus: injected, - annotationService: "foo", - }, - }, - Status: corev1.PodStatus{ - HostIP: "127.0.0.1", - }, - } -) diff --git a/subcommand/inject-connect/command.go b/subcommand/inject-connect/command.go index 2ba64c8d77..6cb4571a83 100644 --- a/subcommand/inject-connect/command.go +++ b/subcommand/inject-connect/command.go @@ -2,24 +2,18 @@ package connectinject import ( "context" - "crypto/tls" "flag" "fmt" "io/ioutil" "net/http" "net/url" - "os" - "os/signal" "strconv" "strings" "sync" "sync/atomic" - "syscall" - "time" connectinject "github.com/hashicorp/consul-k8s/connect-inject" "github.com/hashicorp/consul-k8s/consul" - "github.com/hashicorp/consul-k8s/helper/controller" "github.com/hashicorp/consul-k8s/subcommand/common" "github.com/hashicorp/consul-k8s/subcommand/flags" "github.com/hashicorp/consul/api" @@ -64,10 +58,6 @@ type Command struct { flagK8SNSMirroringPrefix string // Prefix added to Consul namespaces created when mirroring flagCrossNamespaceACLPolicy string // The name of the ACL policy to add to every created namespace if ACLs are enabled - // Flags for cleanup controller. - flagEnableCleanupController bool // Start the cleanup controller. - flagCleanupControllerReconcilePeriod time.Duration // Period for cleanup controller reconcile. - // Flags for endpoints controller. flagReleaseName string flagReleaseNamespace string @@ -103,10 +93,9 @@ type Command struct { consulClient *api.Client clientset kubernetes.Interface - sigCh chan os.Signal - once sync.Once - help string - cert atomic.Value + once sync.Once + help string + cert atomic.Value } var ( @@ -146,9 +135,6 @@ func (c *Command) init() { "K8s namespaces to explicitly allow. May be specified multiple times.") c.flagSet.Var((*flags.AppendSliceValue)(&c.flagDenyK8sNamespacesList), "deny-k8s-namespace", "K8s namespaces to explicitly deny. Takes precedence over allow. May be specified multiple times.") - c.flagSet.BoolVar(&c.flagEnableCleanupController, "enable-cleanup-controller", true, - "Enables cleanup controller that cleans up stale Consul service instances.") - c.flagSet.DurationVar(&c.flagCleanupControllerReconcilePeriod, "cleanup-controller-reconcile-period", 5*time.Minute, "Reconcile period for cleanup controller.") c.flagSet.StringVar(&c.flagReleaseName, "release-name", "consul", "The Consul Helm installation release name, e.g 'helm install '") c.flagSet.StringVar(&c.flagReleaseNamespace, "release-namespace", "default", "The Consul Helm installation namespace, e.g 'helm install --namespace '") c.flagSet.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, @@ -200,13 +186,6 @@ func (c *Command) init() { // the default flagSet. That's why we need to merge it to have access with our flagSet. flags.Merge(c.flagSet, flag.CommandLine) c.help = flags.Usage(help, c.flagSet) - - // Wait on an interrupt or terminate for exit, be sure to init it before running - // the controller so that we don't receive an interrupt before it's ready. - if c.sigCh == nil { - c.sigCh = make(chan os.Signal, 1) - signal.Notify(c.sigCh, syscall.SIGINT, syscall.SIGTERM) - } } func (c *Command) Run(args []string) int { @@ -366,143 +345,96 @@ func (c *Command) Run(args []string) int { allowK8sNamespaces := flags.ToSet(c.flagAllowK8sNamespacesList) denyK8sNamespaces := flags.ToSet(c.flagDenyK8sNamespacesList) - // Start the cleanup controller that cleans up Consul service instances - // still registered after the pod has been deleted (usually due to a force delete). - ctrlExitCh := make(chan error) - // Start the endpoints controller - { - zapLogger := zap.New(zap.UseDevMode(true), zap.Level(zapcore.InfoLevel)) - ctrl.SetLogger(zapLogger) - klog.SetLogger(zapLogger) - listenSplits := strings.SplitN(c.flagListen, ":", 2) - if len(listenSplits) < 2 { - c.UI.Error(fmt.Sprintf("missing port in address: %s", c.flagListen)) - return 1 - } - port, err := strconv.Atoi(listenSplits[1]) - if err != nil { - c.UI.Error(fmt.Sprintf("unable to parse port string: %s", err)) - return 1 - } - - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - LeaderElection: false, - Host: listenSplits[0], - Port: port, - Logger: zapLogger, - MetricsBindAddress: "0.0.0.0:9444", - }) - if err != nil { - setupLog.Error(err, "unable to start manager") - return 1 - } - - metricsConfig := connectinject.MetricsConfig{ - DefaultEnableMetrics: c.flagDefaultEnableMetrics, - DefaultEnableMetricsMerging: c.flagDefaultEnableMetricsMerging, - DefaultMergedMetricsPort: c.flagDefaultMergedMetricsPort, - DefaultPrometheusScrapePort: c.flagDefaultPrometheusScrapePort, - DefaultPrometheusScrapePath: c.flagDefaultPrometheusScrapePath, - } - - if err = (&connectinject.EndpointsController{ - Client: mgr.GetClient(), - ConsulClient: c.consulClient, - ConsulScheme: consulURL.Scheme, - ConsulPort: consulURL.Port(), - AllowK8sNamespacesSet: allowK8sNamespaces, - DenyK8sNamespacesSet: denyK8sNamespaces, - Log: ctrl.Log.WithName("controller").WithName("endpoints-controller"), - Scheme: mgr.GetScheme(), - ReleaseName: c.flagReleaseName, - ReleaseNamespace: c.flagReleaseNamespace, - MetricsConfig: metricsConfig, - Context: ctx, - ConsulClientCfg: cfg, - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", connectinject.EndpointsController{}) - return 1 - } - - mgr.GetWebhookServer().CertDir = c.flagCertDir - - mgr.GetWebhookServer().Register("/mutate", - &webhook.Admission{Handler: &connectinject.Handler{ - ConsulClient: c.consulClient, - ImageConsul: c.flagConsulImage, - ImageEnvoy: c.flagEnvoyImage, - EnvoyExtraArgs: c.flagEnvoyExtraArgs, - ImageConsulK8S: c.flagConsulK8sImage, - RequireAnnotation: !c.flagDefaultInject, - AuthMethod: c.flagACLAuthMethod, - ConsulCACert: string(consulCACert), - DefaultProxyCPURequest: sidecarProxyCPURequest, - DefaultProxyCPULimit: sidecarProxyCPULimit, - DefaultProxyMemoryRequest: sidecarProxyMemoryRequest, - DefaultProxyMemoryLimit: sidecarProxyMemoryLimit, - InitContainerResources: initResources, - ConsulSidecarResources: consulSidecarResources, - EnableNamespaces: c.flagEnableNamespaces, - AllowK8sNamespacesSet: allowK8sNamespaces, - DenyK8sNamespacesSet: denyK8sNamespaces, - ConsulDestinationNamespace: c.flagConsulDestinationNamespace, - EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, - K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, - CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, - MetricsConfig: metricsConfig, - Log: logger.Named("handler"), - }}) - - if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") - // Use an existing channel for ctrl exists in case manager failed to start properly. - ctrlExitCh <- fmt.Errorf("endpoints controller exited unexpectedly") - } + zapLogger := zap.New(zap.UseDevMode(true), zap.Level(zapcore.InfoLevel)) + ctrl.SetLogger(zapLogger) + klog.SetLogger(zapLogger) + listenSplits := strings.SplitN(c.flagListen, ":", 2) + if len(listenSplits) < 2 { + c.UI.Error(fmt.Sprintf("missing port in address: %s", c.flagListen)) + return 1 + } + port, err := strconv.Atoi(listenSplits[1]) + if err != nil { + c.UI.Error(fmt.Sprintf("unable to parse port string: %s", err)) + return 1 } - if c.flagEnableCleanupController { - cleanupResource := connectinject.CleanupResource{ - Log: logger.Named("cleanupResource"), - KubernetesClient: c.clientset, - Ctx: ctx, - ReconcilePeriod: c.flagCleanupControllerReconcilePeriod, - ConsulClient: c.consulClient, - ConsulScheme: consulURL.Scheme, - ConsulPort: consulURL.Port(), - EnableConsulNamespaces: c.flagEnableNamespaces, - } - cleanupCtrl := &controller.Controller{ - Log: logger.Named("cleanupController"), - Resource: &cleanupResource, - } - go func() { - cleanupCtrl.Run(ctx.Done()) - if ctx.Err() == nil { - ctrlExitCh <- fmt.Errorf("cleanup controller exited unexpectedly") - } - }() - } - - // Block until we get a signal or something errors. - select { - case sig := <-c.sigCh: - c.UI.Info(fmt.Sprintf("%s received, shutting down", sig)) - return 0 - - case err := <-ctrlExitCh: - c.UI.Error(fmt.Sprintf("controller error: %v", err)) + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + LeaderElection: false, + Host: listenSplits[0], + Port: port, + Logger: zapLogger, + MetricsBindAddress: "0.0.0.0:9444", + }) + if err != nil { + setupLog.Error(err, "unable to start manager") return 1 } -} -func (c *Command) interrupt() { - c.sendSignal(syscall.SIGINT) -} + metricsConfig := connectinject.MetricsConfig{ + DefaultEnableMetrics: c.flagDefaultEnableMetrics, + DefaultEnableMetricsMerging: c.flagDefaultEnableMetricsMerging, + DefaultMergedMetricsPort: c.flagDefaultMergedMetricsPort, + DefaultPrometheusScrapePort: c.flagDefaultPrometheusScrapePort, + DefaultPrometheusScrapePath: c.flagDefaultPrometheusScrapePath, + } + + if err = (&connectinject.EndpointsController{ + Client: mgr.GetClient(), + ConsulClient: c.consulClient, + ConsulScheme: consulURL.Scheme, + ConsulPort: consulURL.Port(), + AllowK8sNamespacesSet: allowK8sNamespaces, + DenyK8sNamespacesSet: denyK8sNamespaces, + Log: ctrl.Log.WithName("controller").WithName("endpoints-controller"), + Scheme: mgr.GetScheme(), + ReleaseName: c.flagReleaseName, + ReleaseNamespace: c.flagReleaseNamespace, + MetricsConfig: metricsConfig, + Context: ctx, + ConsulClientCfg: cfg, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", connectinject.EndpointsController{}) + return 1 + } -func (c *Command) sendSignal(sig os.Signal) { - c.sigCh <- sig + mgr.GetWebhookServer().CertDir = c.flagCertDir + + mgr.GetWebhookServer().Register("/mutate", + &webhook.Admission{Handler: &connectinject.Handler{ + ConsulClient: c.consulClient, + ImageConsul: c.flagConsulImage, + ImageEnvoy: c.flagEnvoyImage, + EnvoyExtraArgs: c.flagEnvoyExtraArgs, + ImageConsulK8S: c.flagConsulK8sImage, + RequireAnnotation: !c.flagDefaultInject, + AuthMethod: c.flagACLAuthMethod, + ConsulCACert: string(consulCACert), + DefaultProxyCPURequest: sidecarProxyCPURequest, + DefaultProxyCPULimit: sidecarProxyCPULimit, + DefaultProxyMemoryRequest: sidecarProxyMemoryRequest, + DefaultProxyMemoryLimit: sidecarProxyMemoryLimit, + InitContainerResources: initResources, + ConsulSidecarResources: consulSidecarResources, + EnableNamespaces: c.flagEnableNamespaces, + AllowK8sNamespacesSet: allowK8sNamespaces, + DenyK8sNamespacesSet: denyK8sNamespaces, + ConsulDestinationNamespace: c.flagConsulDestinationNamespace, + EnableK8SNSMirroring: c.flagEnableK8SNSMirroring, + K8SNSMirroringPrefix: c.flagK8SNSMirroringPrefix, + CrossNamespaceACLPolicy: c.flagCrossNamespaceACLPolicy, + MetricsConfig: metricsConfig, + Log: logger.Named("handler"), + }}) + + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "problem running manager") + return 1 + } + c.UI.Info("shutting down") + return 0 } func (c *Command) handleReady(rw http.ResponseWriter, req *http.Request) { @@ -512,15 +444,6 @@ func (c *Command) handleReady(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(204) } -func (c *Command) getCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) { - certRaw := c.cert.Load() - if certRaw == nil { - return nil, fmt.Errorf("No certificate available.") - } - - return certRaw.(*tls.Certificate), nil -} - func (c *Command) parseAndValidateResourceFlags() (corev1.ResourceRequirements, corev1.ResourceRequirements, error) { // Init container var initContainerCPULimit, initContainerCPURequest, initContainerMemoryLimit, initContainerMemoryRequest resource.Quantity diff --git a/subcommand/inject-connect/command_test.go b/subcommand/inject-connect/command_test.go index c775a713d2..aa3576fd58 100644 --- a/subcommand/inject-connect/command_test.go +++ b/subcommand/inject-connect/command_test.go @@ -1,14 +1,10 @@ package connectinject import ( - "fmt" "os" - "syscall" "testing" - "time" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/freeport" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes/fake" @@ -215,63 +211,3 @@ func TestRun_ValidationConsulHTTPAddr(t *testing.T) { require.Equal(t, 1, code) require.Contains(t, ui.ErrorWriter.String(), "error parsing consul address \"http://%\": parse \"http://%\": invalid URL escape \"%") } - -// Test that when cleanup controller is enabled that SIGINT/SIGTERM exits the -// command cleanly. -func TestRun_CommandExitsCleanlyAfterSignal(t *testing.T) { - // TODO: This test will be removed when the cleanupController is removed. - t.Skip("This test will be rewritten when the manager handles all signal handling") - t.Run("SIGINT", testSignalHandling(syscall.SIGINT)) - t.Run("SIGTERM", testSignalHandling(syscall.SIGTERM)) -} - -func testSignalHandling(sig os.Signal) func(*testing.T) { - return func(t *testing.T) { - k8sClient := fake.NewSimpleClientset() - ui := cli.NewMockUi() - cmd := Command{ - UI: ui, - clientset: k8sClient, - } - ports := freeport.MustTake(1) - - // NOTE: This url doesn't matter because Consul is never called. - os.Setenv(api.HTTPAddrEnvName, "http://0.0.0.0:9999") - defer os.Unsetenv(api.HTTPAddrEnvName) - - // Start the command asynchronously and then we'll send an interrupt. - exitChan := runCommandAsynchronously(&cmd, []string{ - "-consul-k8s-image", "hashicorp/consul-k8s", "-consul-image", "foo", "-envoy-image", "envoy:1.16.0", - "-listen", fmt.Sprintf(":%d", ports[0]), - }) - - // Send the signal - cmd.sendSignal(sig) - - // Assert that it exits cleanly or timeout. - select { - case exitCode := <-exitChan: - require.Equal(t, 0, exitCode, ui.ErrorWriter.String()) - case <-time.After(time.Second * 2): - // Fail if the stopCh was not caught. - require.Fail(t, "timeout waiting for command to exit") - } - } -} - -// This function starts the command asynchronously and returns a non-blocking chan. -// When finished, the command will send its exit code to the channel. -// Note that it's the responsibility of the caller to terminate the command by calling stopCommand, -// otherwise it can run forever. -func runCommandAsynchronously(cmd *Command, args []string) chan int { - // We have to run cmd.init() to ensure that the channel the command is - // using to watch for os interrupts is initialized. If we don't do this, - // then if stopCommand is called immediately, it will block forever - // because it calls interrupt() which will attempt to send on a nil channel. - cmd.init() - exitChan := make(chan int, 1) - go func() { - exitChan <- cmd.Run(args) - }() - return exitChan -} diff --git a/subcommand/server-acl-init/command.go b/subcommand/server-acl-init/command.go index 373c2bb29d..167504ba91 100644 --- a/subcommand/server-acl-init/command.go +++ b/subcommand/server-acl-init/command.go @@ -80,8 +80,6 @@ type Command struct { // Flag to support a custom bootstrap token flagBootstrapTokenFile string - flagEnableCleanupController bool - flagLogLevel string flagTimeout time.Duration @@ -194,9 +192,6 @@ func (c *Command) init() { "Path to file containing ACL token for creating policies and tokens. This token must have 'acl:write' permissions."+ "When provided, servers will not be bootstrapped and their policies and tokens will not be updated.") - c.flags.BoolVar(&c.flagEnableCleanupController, "enable-cleanup-controller", true, - "Toggle for adding ACL rules for the cleanup controller to the connect ACL token. Requires -create-inject-token to be also be set.") - c.flags.DurationVar(&c.flagTimeout, "timeout", 10*time.Minute, "How long we'll try to bootstrap ACLs for before timing out, e.g. 1ms, 2s, 3m") c.flags.StringVar(&c.flagLogLevel, "log-level", "info", diff --git a/subcommand/server-acl-init/command_test.go b/subcommand/server-acl-init/command_test.go index 464b268aaf..01464c4141 100644 --- a/subcommand/server-acl-init/command_test.go +++ b/subcommand/server-acl-init/command_test.go @@ -245,14 +245,6 @@ func TestRun_TokensPrimaryDC(t *testing.T) { SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, LocalToken: true, }, - { - TestName: "Cleanup controller token", - TokenFlags: []string{"-create-inject-token", "-enable-cleanup-controller"}, - PolicyNames: []string{"connect-inject-token"}, - PolicyDCs: []string{"dc1"}, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - LocalToken: true, - }, } for _, c := range cases { t.Run(c.TestName, func(t *testing.T) { @@ -408,14 +400,6 @@ func TestRun_TokensReplicatedDC(t *testing.T) { SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, LocalToken: true, }, - { - TestName: "Cleanup controller ACL token", - TokenFlags: []string{"-create-inject-token", "-enable-cleanup-controller"}, - PolicyNames: []string{"connect-inject-token-dc2"}, - PolicyDCs: []string{"dc2"}, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - LocalToken: true, - }, { TestName: "Controller token", TokenFlags: []string{"-create-controller-token"}, @@ -553,12 +537,6 @@ func TestRun_TokensWithProvidedBootstrapToken(t *testing.T) { PolicyNames: []string{"acl-replication-token"}, SecretNames: []string{resourcePrefix + "-acl-replication-acl-token"}, }, - { - TestName: "Cleanup controller ACL token", - TokenFlags: []string{"-create-inject-token", "-enable-cleanup-controller"}, - PolicyNames: []string{"connect-inject-token"}, - SecretNames: []string{resourcePrefix + "-connect-inject-acl-token"}, - }, { TestName: "Controller token", TokenFlags: []string{"-create-controller-token"}, diff --git a/subcommand/server-acl-init/rules.go b/subcommand/server-acl-init/rules.go index 07e6594a35..a324a1731a 100644 --- a/subcommand/server-acl-init/rules.go +++ b/subcommand/server-acl-init/rules.go @@ -15,7 +15,6 @@ type rulesData struct { InjectEnableNSMirroring bool InjectNSMirroringPrefix string SyncConsulNodeName string - EnableCleanupController bool } type gatewayRulesData struct { @@ -207,8 +206,6 @@ namespace "{{ .SyncConsulDestNS }}" { func (c *Command) injectRules() (string, error) { // The Connect injector needs permissions to create namespaces when namespaces are enabled. // It must also create/update service health checks via the endpoints controller. - // If the cleanup controller is enabled, it must be able to delete service - // instances from every client. injectRulesTpl := ` {{- if .EnableNamespaces }} operator = "write" @@ -290,7 +287,6 @@ func (c *Command) rulesData() rulesData { InjectEnableNSMirroring: c.flagEnableInjectK8SNSMirroring, InjectNSMirroringPrefix: c.flagInjectK8SNSMirroringPrefix, SyncConsulNodeName: c.flagSyncConsulNodeName, - EnableCleanupController: c.flagEnableCleanupController, } } diff --git a/subcommand/server-acl-init/rules_test.go b/subcommand/server-acl-init/rules_test.go index ddfa51cc9a..be0a434fd4 100644 --- a/subcommand/server-acl-init/rules_test.go +++ b/subcommand/server-acl-init/rules_test.go @@ -494,27 +494,14 @@ namespace_prefix "prefix-" { } } -// Test the inject rules through the 4 permutations of Namespaces/controller enabled or disabled. +// Test the inject rules with namespaces enabled or disabled. func TestInjectRules(t *testing.T) { cases := []struct { - EnableNamespaces bool - EnableCleanupController bool - Expected string + EnableNamespaces bool + Expected string }{ { - EnableNamespaces: false, - EnableCleanupController: false, - Expected: ` -node_prefix "" { - policy = "write" -} - service_prefix "" { - policy = "write" - }`, - }, - { - EnableNamespaces: false, - EnableCleanupController: true, + EnableNamespaces: false, Expected: ` node_prefix "" { policy = "write" @@ -524,22 +511,7 @@ node_prefix "" { }`, }, { - EnableNamespaces: true, - EnableCleanupController: false, - Expected: ` -operator = "write" -node_prefix "" { - policy = "write" -} -namespace_prefix "" { - service_prefix "" { - policy = "write" - } -}`, - }, - { - EnableNamespaces: true, - EnableCleanupController: true, + EnableNamespaces: true, Expected: ` operator = "write" node_prefix "" { @@ -554,14 +526,12 @@ namespace_prefix "" { } for _, tt := range cases { - caseName := fmt.Sprintf("ns=%t cleanup=%t", - tt.EnableNamespaces, tt.EnableCleanupController) + caseName := fmt.Sprintf("ns=%t", tt.EnableNamespaces) t.Run(caseName, func(t *testing.T) { require := require.New(t) cmd := Command{ - flagEnableNamespaces: tt.EnableNamespaces, - flagEnableCleanupController: tt.EnableCleanupController, + flagEnableNamespaces: tt.EnableNamespaces, } injectorRules, err := cmd.injectRules()