From 4b196202a6e116cfaf3e1e20c35837a368c58f51 Mon Sep 17 00:00:00 2001 From: DanStough Date: Mon, 11 Sep 2023 17:05:10 -0400 Subject: [PATCH] feat: v2 mesh-init command --- control-plane/commands.go | 8 +- control-plane/consul/dataplane_client.go | 28 ++ control-plane/consul/dataplane_client_test.go | 206 +++++++++ control-plane/go.mod | 22 +- control-plane/go.sum | 40 +- control-plane/subcommand/mesh-init/command.go | 283 ++++++++++++ .../subcommand/mesh-init/command_ent_test.go | 118 +++++ .../subcommand/mesh-init/command_test.go | 425 ++++++++++++++++++ 8 files changed, 1098 insertions(+), 32 deletions(-) create mode 100644 control-plane/consul/dataplane_client.go create mode 100644 control-plane/consul/dataplane_client_test.go create mode 100644 control-plane/subcommand/mesh-init/command.go create mode 100644 control-plane/subcommand/mesh-init/command_ent_test.go create mode 100644 control-plane/subcommand/mesh-init/command_test.go diff --git a/control-plane/commands.go b/control-plane/commands.go index e2bcb0f693..01f5163bc3 100644 --- a/control-plane/commands.go +++ b/control-plane/commands.go @@ -6,6 +6,8 @@ package main import ( "os" + "github.com/mitchellh/cli" + cmdACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/acl-init" cmdConnectInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/connect-init" cmdConsulLogout "github.com/hashicorp/consul-k8s/control-plane/subcommand/consul-logout" @@ -18,6 +20,7 @@ import ( cmdGossipEncryptionAutogenerate "github.com/hashicorp/consul-k8s/control-plane/subcommand/gossip-encryption-autogenerate" cmdInjectConnect "github.com/hashicorp/consul-k8s/control-plane/subcommand/inject-connect" cmdInstallCNI "github.com/hashicorp/consul-k8s/control-plane/subcommand/install-cni" + cmdMeshInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/mesh-init" cmdPartitionInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/partition-init" cmdServerACLInit "github.com/hashicorp/consul-k8s/control-plane/subcommand/server-acl-init" cmdSyncCatalog "github.com/hashicorp/consul-k8s/control-plane/subcommand/sync-catalog" @@ -25,7 +28,6 @@ import ( cmdVersion "github.com/hashicorp/consul-k8s/control-plane/subcommand/version" webhookCertManager "github.com/hashicorp/consul-k8s/control-plane/subcommand/webhook-cert-manager" "github.com/hashicorp/consul-k8s/control-plane/version" - "github.com/mitchellh/cli" ) // Commands is the mapping of all available consul-k8s commands. @@ -43,6 +45,10 @@ func init() { return &cmdConnectInit.Command{UI: ui}, nil }, + "mesh-init": func() (cli.Command, error) { + return &cmdMeshInit.Command{UI: ui}, nil + }, + "inject-connect": func() (cli.Command, error) { return &cmdInjectConnect.Command{UI: ui}, nil }, diff --git a/control-plane/consul/dataplane_client.go b/control-plane/consul/dataplane_client.go new file mode 100644 index 0000000000..628d353252 --- /dev/null +++ b/control-plane/consul/dataplane_client.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "fmt" + + "github.com/hashicorp/consul/proto-public/pbdataplane" +) + +// NewDataplaneServiceClient creates a pbdataplane.DataplaneServiceClient for gathering proxy bootstrap config. +// It is initialized with a consul-server-connection-manager Watcher to continuously find Consul +// server addresses. +func NewDataplaneServiceClient(watcher ServerConnectionManager) (pbdataplane.DataplaneServiceClient, error) { + + // We recycle the GRPC connection from the discovery client because it + // should have all the necessary dial options, including the resolver that + // continuously updates Consul server addresses. Otherwise, a lot of code from consul-server-connection-manager + // would need to be duplicated + state, err := watcher.State() + if err != nil { + return nil, fmt.Errorf("unable to get connection manager state: %w", err) + } + dpClient := pbdataplane.NewDataplaneServiceClient(state.GRPCConn) + + return dpClient, nil +} diff --git a/control-plane/consul/dataplane_client_test.go b/control-plane/consul/dataplane_client_test.go new file mode 100644 index 0000000000..20c9166925 --- /dev/null +++ b/control-plane/consul/dataplane_client_test.go @@ -0,0 +1,206 @@ +package consul + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/consul-server-connection-manager/discovery" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbdataplane" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" +) + +func Test_NewDataplaneServiceClient(t *testing.T) { + + var serverConfig *testutil.TestServerConfig + server, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverConfig = c + }) + require.NoError(t, err) + defer server.Stop() + + server.WaitForLeader(t) + server.WaitForActiveCARoot(t) + + t.Logf("server grpc address on %d", serverConfig.Ports.GRPC) + + // Create discovery configuration + discoverConfig := discovery.Config{ + Addresses: "127.0.0.1", + GRPCPort: serverConfig.Ports.GRPC, + } + + opts := hclog.LoggerOptions{Name: "dataplane-service-client"} + logger := hclog.New(&opts) + + watcher, err := discovery.NewWatcher(context.Background(), discoverConfig, logger) + require.NoError(t, err) + require.NotNil(t, watcher) + + defer watcher.Stop() + go watcher.Run() + + // Create a workload and create a proxyConfiguration + createWorkload(t, watcher, "foo") + pc := createProxyConfiguration(t, watcher, "foo") + + client, err := NewDataplaneServiceClient(watcher) + require.NoError(t, err) + require.NotNil(t, client) + require.NotNil(t, watcher) + + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: "foo", + Namespace: "default", + Partition: "default", + } + + res, err := client.GetEnvoyBootstrapParams(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, "foo", res.GetIdentity()) + require.Equal(t, "default", res.GetNamespace()) + require.Equal(t, "default", res.GetPartition()) + + if diff := cmp.Diff(pc.BootstrapConfig, res.GetBootstrapConfig(), protocmp.Transform()); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } + + // NOTE: currently it isn't possible to test that the grpc connection responds to changes in the + // discovery server. The discovery response only includes the IP address of the host, so all servers + // for a local test are de-duplicated as a single entry. +} + +func createWorkload(t *testing.T, watcher ServerConnectionManager, name string) { + + client, err := NewResourceServiceClient(watcher) + require.NoError(t, err) + + workload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + "mesh": { + Port: 20000, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: "k8s-node-0-virtual", + Identity: name, + } + + id := &pbresource.ID{ + Name: name, + Type: &pbresource.Type{ + Group: "catalog", + GroupVersion: "v1alpha1", + Kind: "Workload", + }, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + PeerName: "local", + }, + } + + proto, err := anypb.New(workload) + require.NoError(t, err) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: id, + Data: proto, + }, + } + + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + + resourceHasPersisted(t, client, id) +} + +func createProxyConfiguration(t *testing.T, watcher ServerConnectionManager, name string) *pbmesh.ProxyConfiguration { + + client, err := NewResourceServiceClient(watcher) + require.NoError(t, err) + + pc := &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"foo"}, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsBindAddr: "127.0.0.2:1234", + ReadyBindAddr: "127.0.0.3:5678", + }, + } + + id := &pbresource.ID{ + Name: name, + Type: &pbresource.Type{ + Group: "mesh", + GroupVersion: "v1alpha1", + Kind: "ProxyConfiguration", + }, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + PeerName: "local", + }, + } + + proto, err := anypb.New(pc) + require.NoError(t, err) + + req := &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: id, + Data: proto, + }, + } + + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + + resourceHasPersisted(t, client, id) + return pc +} + +// resourceHasPersisted checks that a recently written resource exists in the Consul +// state store with a valid version. This must be true before a resource is overwritten +// or deleted. +// TODO: refactor so that there isn't an import cycle when using test.ResourceHasPersisted +func resourceHasPersisted(t *testing.T, client pbresource.ResourceServiceClient, id *pbresource.ID) { + req := &pbresource.ReadRequest{Id: id} + + require.Eventually(t, func() bool { + res, err := client.Read(context.Background(), req) + if err != nil { + return false + } + + if res.GetResource().GetVersion() == "" { + return false + } + + return true + }, 5*time.Second, + time.Second) +} diff --git a/control-plane/go.mod b/control-plane/go.mod index 2fafd2ed1c..c31589838e 100644 --- a/control-plane/go.mod +++ b/control-plane/go.mod @@ -15,7 +15,7 @@ require ( github.com/hashicorp/consul-k8s/control-plane/cni v0.0.0-20230825213844-4ea04860c5ed github.com/hashicorp/consul-server-connection-manager v0.1.4 github.com/hashicorp/consul/api v1.10.1-0.20230906155245-56917eb4c968 - github.com/hashicorp/consul/proto-public v0.1.2-0.20230906155245-56917eb4c968 + github.com/hashicorp/consul/proto-public v0.1.2-0.20230907203049-2e7d95108602 github.com/hashicorp/consul/sdk v0.14.1 github.com/hashicorp/go-bexpr v0.1.11 github.com/hashicorp/go-discover v0.0.0-20230519164032-214571b6a530 @@ -23,6 +23,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-netaddrs v0.1.0 github.com/hashicorp/go-rootcerts v1.0.2 + github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/serf v0.10.1 github.com/hashicorp/vault/api v1.8.3 @@ -33,8 +34,8 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/stretchr/testify v1.8.3 go.uber.org/zap v1.24.0 - golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 - golang.org/x/text v0.11.0 + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/text v0.12.0 golang.org/x/time v0.3.0 gomodules.xyz/jsonpatch/v2 v2.3.0 google.golang.org/grpc v1.55.0 @@ -104,7 +105,6 @@ require ( github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/mdns v1.0.4 // indirect @@ -151,14 +151,14 @@ require ( go.opencensus.io v0.24.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.13.0 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect diff --git a/control-plane/go.sum b/control-plane/go.sum index 435a090003..9d42dff137 100644 --- a/control-plane/go.sum +++ b/control-plane/go.sum @@ -265,8 +265,8 @@ github.com/hashicorp/consul-server-connection-manager v0.1.4 h1:wrcSRV6WGXFBNpNb github.com/hashicorp/consul-server-connection-manager v0.1.4/go.mod h1:LMqHkALoLP0HUQKOG21xXYr0YPUayIQIHNTlmxG100E= github.com/hashicorp/consul/api v1.10.1-0.20230906155245-56917eb4c968 h1:lQ7QmlL0N4/ftLBex8n73Raji29o7EVssqCoeeczKac= github.com/hashicorp/consul/api v1.10.1-0.20230906155245-56917eb4c968/go.mod h1:NZJGRFYruc/80wYowkPFCp1LbGmJC9L8izrwfyVx/Wg= -github.com/hashicorp/consul/proto-public v0.1.2-0.20230906155245-56917eb4c968 h1:J6FLkHXcGd80fUbouFn3kklR3GGHVV0OCyjItyZS8h0= -github.com/hashicorp/consul/proto-public v0.1.2-0.20230906155245-56917eb4c968/go.mod h1:ENwzmloQTUPAYPu7nC1mli3VY0Ny9QNi/FSzJ+KlZD0= +github.com/hashicorp/consul/proto-public v0.1.2-0.20230907203049-2e7d95108602 h1:qxGr1c4JHUDob7E1dtnaKahBTjMzK2mHrKgzKj7RXXU= +github.com/hashicorp/consul/proto-public v0.1.2-0.20230907203049-2e7d95108602/go.mod h1:Tdq0I2Nn8RVu7vc1D/N35YEmRfyCr0DUaf1SfOQnLtg= github.com/hashicorp/consul/sdk v0.4.1-0.20230825164720-ecdcde430924 h1:gkb6/ix0Tg1Th5FTjyq4QklLgrtIVQ/TUB0kbhIcPsY= github.com/hashicorp/consul/sdk v0.4.1-0.20230825164720-ecdcde430924/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -572,8 +572,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -584,8 +584,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -608,8 +608,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -654,8 +654,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -677,8 +677,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -742,13 +742,13 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -757,8 +757,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -811,8 +811,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/control-plane/subcommand/mesh-init/command.go b/control-plane/subcommand/mesh-init/command.go new file mode 100644 index 0000000000..93b1c878b1 --- /dev/null +++ b/control-plane/subcommand/mesh-init/command.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package meshinit + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/cenkalti/backoff" + "github.com/hashicorp/consul-server-connection-manager/discovery" + "github.com/hashicorp/consul/proto-public/pbdataplane" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/sdk/iptables" + "github.com/hashicorp/go-hclog" + "github.com/mitchellh/cli" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" + "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/consul-k8s/control-plane/version" +) + +const ( + // The number of times to attempt to read this proxy registration (120s). + defaultMaxPollingRetries = 120 +) + +type Command struct { + UI cli.Ui + + flagProxyName string + flagConsulNamespace string + flagConsulPartition string + + maxPollingAttempts uint64 // Number of times to poll Consul for proxy registrations. + + flagRedirectTrafficConfig string + flagLogLevel string + flagLogJSON bool + + flagSet *flag.FlagSet + consul *flags.ConsulFlags + + once sync.Once + help string + logger hclog.Logger + + watcher *discovery.Watcher + + // Only used in tests. + iptablesProvider iptables.Provider + iptablesConfig iptables.Config +} + +func (c *Command) init() { + c.flagSet = flag.NewFlagSet("", flag.ContinueOnError) + + // V2 Flags + c.flagSet.StringVar(&c.flagProxyName, "proxy-name", os.Getenv("PROXY_NAME"), "The Consul proxy name. This is the K8s Pod name, which is also the name of the Workload in Consul. (Required)") + + // Universal flags + c.flagSet.StringVar(&c.flagRedirectTrafficConfig, "redirect-traffic-config", os.Getenv("CONSUL_REDIRECT_TRAFFIC_CONFIG"), "Config (in JSON format) to configure iptables for this pod.") + c.flagSet.StringVar(&c.flagLogLevel, "log-level", "info", + "Log verbosity level. Supported values (in order of detail) are \"trace\", "+ + "\"debug\", \"info\", \"warn\", and \"error\".") + c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, + "Enable or disable JSON output format for logging.") + + if c.maxPollingAttempts == 0 { + c.maxPollingAttempts = defaultMaxPollingRetries + } + + c.consul = &flags.ConsulFlags{} + flags.Merge(c.flagSet, c.consul.Flags()) + c.help = flags.Usage(help, c.flagSet) +} + +func (c *Command) Run(args []string) int { + c.once.Do(c.init) + + if err := c.flagSet.Parse(args); err != nil { + return 1 + } + // Validate flags + if err := c.validateFlags(); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if c.consul.Namespace == "" { + c.consul.Namespace = constants.DefaultConsulNS + } + if c.consul.Partition == "" { + c.consul.Partition = constants.DefaultConsulPartition + } + + // Set up logging. + if c.logger == nil { + var err error + c.logger, err = common.Logger(c.flagLogLevel, c.flagLogJSON) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + } + + // Create Consul API config object. + consulConfig := c.consul.ConsulClientConfig() + + // Create a context to be used by the processes started in this command. + ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancelFunc() + + // Start Consul server Connection manager. + serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() + // Disable server watch because we only need to get server IPs once. + serverConnMgrCfg.ServerWatchDisabled = true + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) + return 1 + } + if c.watcher == nil { + c.watcher, err = discovery.NewWatcher(ctx, serverConnMgrCfg, c.logger.Named("consul-server-connection-manager")) + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + return 1 + } + go c.watcher.Run() // The actual ACL login happens here + defer c.watcher.Stop() + } + + state, err := c.watcher.State() + if err != nil { + c.logger.Error("Unable to get state from consul-server-connection-manager", "error", err) + return 1 + } + + consulClient, err := consul.NewClientFromConnMgrState(consulConfig, state) + if err != nil { + c.logger.Error("Unable to get client connection", "error", err) + return 1 + } + + if version.IsFIPS() { + // make sure we are also using FIPS Consul + var versionInfo map[string]interface{} + _, err := consulClient.Raw().Query("/v1/agent/version", versionInfo, nil) + if err != nil { + c.logger.Warn("This is a FIPS build of consul-k8s, which should be used with FIPS Consul. Unable to verify FIPS Consul while setting up Consul API client.") + } + if val, ok := versionInfo["FIPS"]; !ok || val == "" { + c.logger.Warn("This is a FIPS build of consul-k8s, which should be used with FIPS Consul. A non-FIPS version of Consul was detected.") + } + } + + // todo (agentless): this should eventually be passed to consul-dataplane as a string so we don't need to write it to file. + if c.consul.UseTLS && c.consul.CACertPEM != "" { + if err = common.WriteFileWithPerms(constants.ConsulCAFile, c.consul.CACertPEM, 0444); err != nil { + c.logger.Error("error writing CA cert file", "error", err) + return 1 + } + } + + dc, err := consul.NewDataplaneServiceClient(c.watcher) + if err != nil { + c.logger.Error("failed to create resource client", "error", err) + return 1 + } + + var res pbdataplane.GetEnvoyBootstrapParamsResponse + if err := backoff.Retry(c.getBootstrapParams(dc, &res), backoff.WithMaxRetries(backoff.NewConstantBackOff(1*time.Second), c.maxPollingAttempts)); err != nil { + c.logger.Error("Timed out waiting for bootstrap parameters", "error", err) + return 1 + } + + if c.flagRedirectTrafficConfig != "" { + err := c.applyTrafficRedirectionRules(res.GetBootstrapConfig()) // BootstrapConfig is always populated non-nil from the RPC + if err != nil { + c.logger.Error("error applying traffic redirection rules", "err", err) + return 1 + } + } + + c.logger.Info("Proxy initialization completed") + return 0 +} + +func (c *Command) validateFlags() error { + if c.flagProxyName == "" { + return errors.New("-proxy-name must be set") + } + return nil +} + +func (c *Command) Synopsis() string { return synopsis } +func (c *Command) Help() string { + c.once.Do(c.init) + return c.help +} + +func (c *Command) getBootstrapParams( + client pbdataplane.DataplaneServiceClient, + response *pbdataplane.GetEnvoyBootstrapParamsResponse) backoff.Operation { + + return func() error { + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: c.flagProxyName, + Namespace: c.consul.Namespace, + Partition: c.consul.Partition, + } + res, err := client.GetEnvoyBootstrapParams(context.Background(), req) + if err != nil { + c.logger.Error("Unable to get bootstrap parameters", "error", err) + return err + } + *response = *res + return nil + } +} + +// This below implementation is loosely based on +// https://github.com/hashicorp/consul/blob/fe2d41ddad9ba2b8ff86cbdebbd8f05855b1523c/command/connect/redirecttraffic/redirect_traffic.go#L136. + +func (c *Command) applyTrafficRedirectionRules(config *pbmesh.BootstrapConfig) error { + + err := json.Unmarshal([]byte(c.flagRedirectTrafficConfig), &c.iptablesConfig) + if err != nil { + return err + } + if c.iptablesProvider != nil { + c.iptablesConfig.IptablesProvider = c.iptablesProvider + } + + // TODO: provide dynamic updates to the c.iptablesConfig.ProxyOutboundPort + // We currently don't have a V2 endpoint that can gather the fully synthesized ProxyConfiguration. + // We need this to dynamically set c.iptablesConfig.ProxyOutboundPort with the outbound port configuration from + // pbmesh.DynamicConfiguration.TransparentProxy.OutboundListenerPort. + // We would either need to grab another resource that has this information rendered in it, or add + // pbmesh.DynamicConfiguration to the GetBootstrapParameters rpc. + // Right now this is an edge case because the mesh webhook configured the flagRedirectTrafficConfig with the default + // 15001 port. + + // TODO: provide dyanmic updates to the c.iptablesConfig.ProxyInboundPort + // This is the `mesh` port in the workload resource. + // Right now this will always be the default port (20000) + + if config.StatsBindAddr != "" { + _, port, err := net.SplitHostPort(config.StatsBindAddr) + if err != nil { + return fmt.Errorf("failed parsing host and port from StatsBindAddr: %s", err) + } + + c.iptablesConfig.ExcludeInboundPorts = append(c.iptablesConfig.ExcludeInboundPorts, port) + } + + // Configure any relevant information from the proxy service + err = iptables.Setup(c.iptablesConfig) + if err != nil { + return err + } + c.logger.Info("Successfully applied traffic redirection rules") + return nil +} + +const synopsis = "Inject mesh init command." +const help = ` +Usage: consul-k8s-control-plane mesh-init [options] + + Bootstraps mesh-injected pod components. + Uses V2 Consul Catalog APIs. + Not intended for stand-alone use. +` diff --git a/control-plane/subcommand/mesh-init/command_ent_test.go b/control-plane/subcommand/mesh-init/command_ent_test.go new file mode 100644 index 0000000000..7136eebca1 --- /dev/null +++ b/control-plane/subcommand/mesh-init/command_ent_test.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build enterprise + +package meshinit + +import ( + "context" + "strconv" + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul-k8s/control-plane/namespaces" +) + +func TestRun_WithNamespaces(t *testing.T) { + t.Parallel() + cases := []struct { + name string + consulNamespace string + consulPartition string + }{ + { + name: "default ns, default partition", + consulNamespace: constants.DefaultConsulNS, + consulPartition: constants.DefaultConsulPartition, + }, + { + name: "non-default ns, default partition", + consulNamespace: "bar", + consulPartition: constants.DefaultConsulPartition, + }, + { + name: "non-default ns, non-default partition", + consulNamespace: "bar", + consulPartition: "baz", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + resourceClient, err := consul.NewResourceServiceClient(testClient.Watcher) + require.NoError(t, err) + + _, err = EnsurePartitionExists(testClient.APIClient, c.consulPartition) + require.NoError(t, err) + + partitionedCfg := testClient.Cfg.APIClientConfig + partitionedCfg.Partition = c.consulPartition + + partitionedClient, err := api.NewClient(partitionedCfg) + require.NoError(t, err) + + _, err = namespaces.EnsureExists(partitionedClient, c.consulNamespace, "") + require.NoError(t, err) + + // Register Consul workload. + loadResource(t, resourceClient, getWorkloadID(testPodName, c.consulNamespace, c.consulPartition), getWorkload(), nil) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 5, + } + // We build the consul-addr because normally it's defined by the init container setting + // CONSUL_HTTP_ADDR when it processes the command template. + flags := []string{"-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-namespace", c.consulNamespace, + "-partition", c.consulPartition, + } + + // Run the command. + code := cmd.Run(flags) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + }) + } +} + +// EnsurePartitionExists ensures a Consul partition exists. +// Boolean return value indicates if the namespace was created by this call. +// This is borrowed from namespaces.EnsureExists +func EnsurePartitionExists(client *api.Client, name string) (bool, error) { + if name == constants.DefaultConsulPartition { + return false, nil + } + // Check if the Consul namespace exists. + partitionInfo, _, err := client.Partitions().Read(context.Background(), name, nil) + if err != nil { + return false, err + } + if partitionInfo != nil { + return false, nil + } + + consulPartition := api.Partition{ + Name: name, + Description: "Auto-generated by consul-k8s", + } + + _, _, err = client.Partitions().Create(context.Background(), &consulPartition, nil) + return true, err +} diff --git a/control-plane/subcommand/mesh-init/command_test.go b/control-plane/subcommand/mesh-init/command_test.go new file mode 100644 index 0000000000..f1f2c1405e --- /dev/null +++ b/control-plane/subcommand/mesh-init/command_test.go @@ -0,0 +1,425 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package meshinit + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" + "testing" + "time" + + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/sdk/iptables" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul-k8s/control-plane/connect-inject/constants" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" +) + +func TestRun_FlagValidation(t *testing.T) { + t.Parallel() + cases := []struct { + flags []string + env string + expErr string + }{ + { + flags: []string{}, + expErr: "-proxy-name must be set", + }, + { + flags: []string{ + "-proxy-name", testPodName, + "-log-level", "invalid", + }, + expErr: "unknown log level: invalid", + }, + } + for _, c := range cases { + t.Run(c.expErr, func(t *testing.T) { + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + } + code := cmd.Run(c.flags) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), c.expErr) + }) + } +} + +// TestRun_MeshServices tests that the command can log in to Consul (if ACLs are enabled) using a kubernetes +// auth method and, using the obtained token, make call to the dataplane GetBootstrapParams() RPC. +func TestRun_MeshServices(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + workload *pbcatalog.Workload + proxyConfiguration *pbmesh.ProxyConfiguration + aclsEnabled bool + expFail bool + }{ + { + name: "basic workload bootstrap", + workload: getWorkload(), + }, + { + name: "workload and proxyconfiguration bootstrap", + workload: getWorkload(), + proxyConfiguration: getProxyConfiguration(), + }, + { + name: "missing workload", + expFail: true, + }, + // TODO: acls enabled + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + //tokenFile := fmt.Sprintf("/tmp/%d1", rand.Int()) + //t.Cleanup(func() { + // _ = os.RemoveAll(tokenFile) + //}) + + // Create test consulServer server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + resourceClient, err := consul.NewResourceServiceClient(testClient.Watcher) + require.NoError(t, err) + + loadResource(t, resourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tt.workload, nil) + loadResource(t, resourceClient, getProxyConfigurationID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), tt.proxyConfiguration, nil) + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 3, + } + + // We build the consul-addr because normally it's defined by the init container setting + // CONSUL_HTTP_ADDR when it processes the command template. + flags := []string{"-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + } + //if tt.aclsEnabled { + // flags = append(flags, "-auth-method-name", test.AuthMethod, + // "-service-account-name", tt.serviceAccountName, + // "-acl-token-sink", tokenFile) //TODO: what happens if this is unspecified? We don't need this file + //} + + // Run the command. + code := cmd.Run(flags) + if tt.expFail { + require.Equal(t, 1, code) + return + } + require.Equal(t, 0, code, ui.ErrorWriter.String()) + + // TODO: Can we remove the tokenFile from this workflow? + // consul-dataplane performs it's own login using the Serviceaccount bearer token + //if tt.aclsEnabled { + // // Validate the ACL token was written. + // tokenData, err := os.ReadFile(tokenFile) + // require.NoError(t, err) + // require.NotEmpty(t, tokenData) + // + // // Check that the token has the metadata with pod name and pod namespace. + // consulClient, err = api.NewClient(&api.Config{Address: server.HTTPAddr, Token: string(tokenData)}) + // require.NoError(t, err) + // token, _, err := consulClient.ACL().TokenReadSelf(nil) + // require.NoError(t, err) + // require.Equal(t, "token created via login: {\"pod\":\"default-ns/counting-pod\"}", token.Description) + //} + }) + } +} + +// TestRun_RetryServicePolling runs the command but does not register the consul service +// for 2 seconds and then asserts the command exits successfully. +func TestRun_RetryServicePolling(t *testing.T) { + t.Parallel() + + // Start Consul server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + resourceClient, err := consul.NewResourceServiceClient(testClient.Watcher) + require.NoError(t, err) + + // Start the consul service registration in a go func and delay it so that it runs + // after the cmd.Run() starts. + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + // Wait a moment, this ensures that we are already in the retry logic. + time.Sleep(time.Second * 2) + // Register counting service. + loadResource(t, resourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getWorkload(), nil) + }() + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 10, + } + flags := []string{ + "-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + } + code := cmd.Run(flags) + wg.Wait() + require.Equal(t, 0, code) +} + +func TestRun_TrafficRedirection(t *testing.T) { + cases := map[string]struct { + registerProxyConfiguration bool + expIptablesParamsFunc func(actual iptables.Config) error + }{ + "no proxyConfiguration provided": { + expIptablesParamsFunc: func(actual iptables.Config) error { + if len(actual.ExcludeInboundPorts) != 1 || actual.ExcludeInboundPorts[0] != "9090" { + return fmt.Errorf("ExcludeInboundPorts in iptables.Config was %v, but should be [1234]", actual.ExcludeInboundPorts) + } + if actual.ProxyInboundPort != 20000 { + return fmt.Errorf("ProxyInboundPort in iptables.Config was %d, but should be [20000]", actual.ProxyOutboundPort) + } + if actual.ProxyOutboundPort != 15001 { + return fmt.Errorf("ProxyOutboundPort in iptables.Config was %d, but should be [15001]", actual.ProxyOutboundPort) + } + return nil + }, + }, + "stats bind port is provided in proxyConfiguration": { + registerProxyConfiguration: true, + expIptablesParamsFunc: func(actual iptables.Config) error { + if len(actual.ExcludeInboundPorts) != 1 || actual.ExcludeInboundPorts[0] != "9090" { + return fmt.Errorf("ExcludeInboundPorts in iptables.Config was %v, but should be [1234]", actual.ExcludeInboundPorts) + } + if actual.ProxyInboundPort != 20000 { + return fmt.Errorf("ProxyInboundPort in iptables.Config was %d, but should be [20000]", actual.ProxyOutboundPort) + } + if actual.ProxyOutboundPort != 15001 { + return fmt.Errorf("ProxyOutboundPort in iptables.Config was %d, but should be [15001]", actual.ProxyOutboundPort) + } + return nil + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + + // Start Consul server. + var serverCfg *testutil.TestServerConfig + testClient := test.TestServerWithMockConnMgrWatcher(t, func(c *testutil.TestServerConfig) { + c.Experiments = []string{"resource-apis"} + serverCfg = c + }) + resourceClient, err := consul.NewResourceServiceClient(testClient.Watcher) + require.NoError(t, err) + + // Add additional proxy configuration either to a config entry or to the service itself. + if c.registerProxyConfiguration { + loadResource(t, resourceClient, getProxyConfigurationID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getProxyConfiguration(), nil) + } + + // Register Consul workload. + loadResource(t, resourceClient, getWorkloadID(testPodName, constants.DefaultConsulNS, constants.DefaultConsulPartition), getWorkload(), nil) + + iptablesProvider := &fakeIptablesProvider{} + iptablesCfg := iptables.Config{ + ProxyUserID: "5995", + ProxyInboundPort: 20000, + ProxyOutboundPort: 15001, + } + + ui := cli.NewMockUi() + cmd := Command{ + UI: ui, + maxPollingAttempts: 3, + iptablesProvider: iptablesProvider, + } + iptablesCfgJSON, err := json.Marshal(iptablesCfg) + require.NoError(t, err) + flags := []string{ + "-proxy-name", testPodName, + "-addresses", "127.0.0.1", + "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), + "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), + "-redirect-traffic-config", string(iptablesCfgJSON), + } + code := cmd.Run(flags) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + require.Truef(t, iptablesProvider.applyCalled, "redirect traffic rules were not applied") + if c.expIptablesParamsFunc != nil { + errMsg := c.expIptablesParamsFunc(cmd.iptablesConfig) + require.NoError(t, errMsg) + } + }) + } +} + +const ( + testPodName = "foo" +) + +type fakeIptablesProvider struct { + applyCalled bool + rules []string +} + +func loadResource(t *testing.T, client pbresource.ResourceServiceClient, id *pbresource.ID, proto proto.Message, owner *pbresource.ID) { + if id == nil || !proto.ProtoReflect().IsValid() { + return + } + + data, err := anypb.New(proto) + require.NoError(t, err) + + resource := &pbresource.Resource{ + Id: id, + Data: data, + Owner: owner, + } + + req := &pbresource.WriteRequest{Resource: resource} + _, err = client.Write(context.Background(), req) + require.NoError(t, err) + test.ResourceHasPersisted(t, client, id) +} + +func getWorkloadID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: &pbresource.Type{ + Group: "catalog", + GroupVersion: "v1alpha1", + Kind: "Workload", + }, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + + // Because we are explicitly defining NS/partition, this will not default and must be explicit. + // At a future point, this will move out of the Tenancy block. + PeerName: constants.DefaultConsulPeer, + }, + } +} + +// getWorkload creates a proxyConfiguration that matches the pod from createPod, +// assuming that metrics, telemetry, and overwrite probes are enabled separately. +func getWorkload() *pbcatalog.Workload { + return &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1", Ports: []string{"public", "admin", "mesh"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "public": { + Port: 80, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "admin": { + Port: 8080, + Protocol: pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, + }, + "mesh": { + Port: constants.ProxyDefaultInboundPort, + Protocol: pbcatalog.Protocol_PROTOCOL_MESH, + }, + }, + NodeName: "k8s-node-0", + Identity: testPodName, + } +} + +func getProxyConfigurationID(name, namespace, partition string) *pbresource.ID { + return &pbresource.ID{ + Name: name, + Type: &pbresource.Type{ + Group: "mesh", + GroupVersion: "v1alpha1", + Kind: "ProxyConfiguration", + }, + Tenancy: &pbresource.Tenancy{ + Partition: partition, + Namespace: namespace, + + // Because we are explicitly defining NS/partition, this will not default and must be explicit. + // At a future point, this will move out of the Tenancy block. + PeerName: constants.DefaultConsulPeer, + }, + } +} + +// getProxyConfiguration creates a proxyConfiguration that matches the pod from createWorkload +func getProxyConfiguration() *pbmesh.ProxyConfiguration { + return &pbmesh.ProxyConfiguration{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{testPodName}, + }, + DynamicConfig: &pbmesh.DynamicConfig{ + Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, + ExposeConfig: &pbmesh.ExposeConfig{ + ExposePaths: []*pbmesh.ExposePath{ + { + ListenerPort: 20400, + LocalPathPort: 2001, + Path: "/livez", + }, + { + ListenerPort: 20300, + LocalPathPort: 2000, + Path: "/readyz", + }, + { + ListenerPort: 20500, + LocalPathPort: 2002, + Path: "/startupz", + }, + }, + }, + }, + BootstrapConfig: &pbmesh.BootstrapConfig{ + StatsBindAddr: "0.0.0.0:9090", + PrometheusBindAddr: "0.0.0.0:21234", // This gets added to the iptables exclude directly in the webhook + }, + } +} + +func (f *fakeIptablesProvider) AddRule(_ string, args ...string) { + f.rules = append(f.rules, strings.Join(args, " ")) +} + +func (f *fakeIptablesProvider) ApplyRules() error { + f.applyCalled = true + return nil +} + +func (f *fakeIptablesProvider) Rules() []string { + return f.rules +}