From 4f9562863d84f6dff37c8ef0971d5150d9798bc6 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 3 Jan 2024 15:36:06 -0500 Subject: [PATCH 01/12] first pass - working Signed-off-by: Joe Elliott --- cmd/tempo/app/app.go | 43 ++-- cmd/tempo/app/http_handler.go | 19 ++ cmd/tempo/app/modules.go | 105 ++++++---- docs/sources/tempo/api_docs/_index.md | 4 - example/docker-compose/shared/tempo.yaml | 2 +- go.mod | 2 +- vendor/golang.org/x/net/http2/h2c/h2c.go | 240 +++++++++++++++++++++++ vendor/modules.txt | 1 + 8 files changed, 348 insertions(+), 68 deletions(-) create mode 100644 cmd/tempo/app/http_handler.go create mode 100644 vendor/golang.org/x/net/http2/h2c/h2c.go diff --git a/cmd/tempo/app/app.go b/cmd/tempo/app/app.go index a24dcf50255..47a1dc3653f 100644 --- a/cmd/tempo/app/app.go +++ b/cmd/tempo/app/app.go @@ -66,20 +66,22 @@ var ( type App struct { cfg Config - Server *server.Server + mux *mux.Router + server *server.Server InternalServer *server.Server - readRings map[string]*ring.Ring - Overrides overrides.Service - distributor *distributor.Distributor - querier *querier.Querier - frontend *frontend_v1.Frontend - compactor *compactor.Compactor - ingester *ingester.Ingester - generator *generator.Generator - store storage.Store - usageReport *usagestats.Reporter - cacheProvider cache.Provider - MemberlistKV *memberlist.KVInitService + + readRings map[string]*ring.Ring + Overrides overrides.Service + distributor *distributor.Distributor + querier *querier.Querier + frontend *frontend_v1.Frontend + compactor *compactor.Compactor + ingester *ingester.Ingester + generator *generator.Generator + store storage.Store + usageReport *usagestats.Reporter + cacheProvider cache.Provider + MemberlistKV *memberlist.KVInitService HTTPAuthMiddleware middleware.Interface TracesConsumerMiddleware receiver.Middleware @@ -94,6 +96,7 @@ func New(cfg Config) (*App, error) { app := &App{ cfg: cfg, readRings: map[string]*ring.Ring{}, + mux: mux.NewRouter(), } usagestats.Edition("oss") @@ -192,12 +195,12 @@ func (t *App) Run() error { t.InternalServer.HTTP.Path("/ready").Methods("GET").Handler(t.readyHandler(sm)) } - t.Server.HTTP.Path(addHTTPAPIPrefix(&t.cfg, api.PathBuildInfo)).Handler(t.buildinfoHandler()).Methods("GET") + t.mux.Path(addHTTPAPIPrefix(&t.cfg, api.PathBuildInfo)).Handler(t.buildinfoHandler()).Methods("GET") - t.Server.HTTP.Path("/ready").Handler(t.readyHandler(sm)) - t.Server.HTTP.Path("/status").Handler(t.statusHandler()).Methods("GET") - t.Server.HTTP.Path("/status/{endpoint}").Handler(t.statusHandler()).Methods("GET") - grpc_health_v1.RegisterHealthServer(t.Server.GRPC, grpcutil.NewHealthCheck(sm)) + t.mux.Path("/ready").Handler(t.readyHandler(sm)) + t.mux.Path("/status").Handler(t.statusHandler()).Methods("GET") + t.mux.Path("/status/{endpoint}").Handler(t.statusHandler()).Methods("GET") + grpc_health_v1.RegisterHealthServer(t.server.GRPC, grpcutil.NewHealthCheck(sm)) // Let's listen for events from this manager, and log them. healthy := func() { level.Info(log.Logger).Log("msg", "Tempo started") } @@ -226,7 +229,7 @@ func (t *App) Run() error { sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) // Setup signal handler. If signal arrives, we stop the manager, which stops all the services. - handler := signals.NewHandler(t.Server.Log) + handler := signals.NewHandler(t.server.Log) go func() { handler.Loop() sm.StopAsync() @@ -472,7 +475,7 @@ func (t *App) writeStatusEndpoints(w io.Writer) error { endpoints := []endpoint{} - err := t.Server.HTTP.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + err := t.mux.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { e := endpoint{} pathTemplate, err := route.GetPathTemplate() diff --git a/cmd/tempo/app/http_handler.go b/cmd/tempo/app/http_handler.go new file mode 100644 index 00000000000..b625f0be049 --- /dev/null +++ b/cmd/tempo/app/http_handler.go @@ -0,0 +1,19 @@ +package app + +import ( + "net/http" + + "github.com/gorilla/mux" +) + +type handler interface { + Handle(pattern string, handler http.Handler) +} + +type muxWrapper struct { + *mux.Router +} + +func (m muxWrapper) Handle(pattern string, handler http.Handler) { + m.Router.Handle(pattern, handler) +} diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index 87e27310578..5e17449a462 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -7,6 +7,7 @@ import ( "io" "net/http" "path" + "strings" "github.com/go-kit/log/level" "github.com/grafana/dskit/dns" @@ -20,6 +21,8 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" "github.com/grafana/tempo/modules/cache" "github.com/grafana/tempo/modules/compactor" @@ -95,13 +98,11 @@ func (t *App) initServer() (services.Service, error) { )) } + t.cfg.Server.Router = t.mux + if t.cfg.StreamOverHTTPEnabled { + t.cfg.Server.Router = nil + } DisableSignalHandling(&t.cfg.Server) - - // this allows us to serve http and grpc over the primary http server. - // to use this register services with GRPCOnHTTPServer - // Note: Enabling this breaks TLS - t.cfg.Server.RouteHTTPToGRPC = t.cfg.StreamOverHTTPEnabled - server, err := server.New(t.cfg.Server) if err != nil { return nil, fmt.Errorf("failed to create server: %w", err) @@ -118,9 +119,20 @@ func (t *App) initServer() (services.Service, error) { return svs } - t.Server = server + t.server = server s := NewServerService(server, servicesToWaitFor) + if t.cfg.StreamOverHTTPEnabled { + t.server.HTTPServer.Handler = h2c.NewHandler(server.HTTPServer.Handler, &http2.Server{}) + t.server.HTTP.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.ProtoMajor == 2 && strings.Contains(req.Header.Get("Content-Type"), "application/grpc") { // jpe - both? i don't think grafana sends the content-type header + t.server.GRPC.ServeHTTP(w, req) + } else { + t.mux.ServeHTTP(w, req) + } + }) + } + return s, nil } @@ -178,7 +190,7 @@ func (t *App) initReadRing(cfg ring.Config, name, key string) (*ring.Ring, error return nil, fmt.Errorf("failed to create ring %s: %w", name, err) } - t.Server.HTTP.Handle("/"+name+"/ring", ring) + t.mux.Handle("/"+name+"/ring", ring) t.readRings[name] = ring return ring, nil @@ -217,10 +229,10 @@ func (t *App) initOverridesAPI() (services.Service, error) { return t.HTTPAuthMiddleware.Wrap(h) } - t.Server.HTTP.Path(overridesPath).Methods(http.MethodGet).Handler(wrapHandler(userConfigOverridesAPI.GetHandler)) - t.Server.HTTP.Path(overridesPath).Methods(http.MethodPost).Handler(wrapHandler(userConfigOverridesAPI.PostHandler)) - t.Server.HTTP.Path(overridesPath).Methods(http.MethodPatch).Handler(wrapHandler(userConfigOverridesAPI.PatchHandler)) - t.Server.HTTP.Path(overridesPath).Methods(http.MethodDelete).Handler(wrapHandler(userConfigOverridesAPI.DeleteHandler)) + t.mux.Path(overridesPath).Methods(http.MethodGet).Handler(wrapHandler(userConfigOverridesAPI.GetHandler)) + t.mux.Path(overridesPath).Methods(http.MethodPost).Handler(wrapHandler(userConfigOverridesAPI.PostHandler)) + t.mux.Path(overridesPath).Methods(http.MethodPatch).Handler(wrapHandler(userConfigOverridesAPI.PatchHandler)) + t.mux.Path(overridesPath).Methods(http.MethodDelete).Handler(wrapHandler(userConfigOverridesAPI.DeleteHandler)) return userConfigOverridesAPI, nil } @@ -241,7 +253,7 @@ func (t *App) initDistributor() (services.Service, error) { t.distributor = distributor if distributor.DistributorRing != nil { - t.Server.HTTP.Handle("/distributor/ring", distributor.DistributorRing) + t.mux.Handle("/distributor/ring", distributor.DistributorRing) } return t.distributor, nil @@ -257,10 +269,10 @@ func (t *App) initIngester() (services.Service, error) { } t.ingester = ingester - tempopb.RegisterPusherServer(t.Server.GRPC, t.ingester) - tempopb.RegisterQuerierServer(t.Server.GRPC, t.ingester) - t.Server.HTTP.Path("/flush").Handler(http.HandlerFunc(t.ingester.FlushHandler)) - t.Server.HTTP.Path("/shutdown").Handler(http.HandlerFunc(t.ingester.ShutdownHandler)) + tempopb.RegisterPusherServer(t.server.GRPC, t.ingester) + tempopb.RegisterQuerierServer(t.server.GRPC, t.ingester) + t.mux.Path("/flush").Handler(http.HandlerFunc(t.ingester.FlushHandler)) + t.mux.Path("/shutdown").Handler(http.HandlerFunc(t.ingester.ShutdownHandler)) return t.ingester, nil } @@ -277,9 +289,9 @@ func (t *App) initGenerator() (services.Service, error) { t.generator = genSvc spanStatsHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.generator.SpanMetricsHandler)) - t.Server.HTTP.Handle(path.Join(api.PathPrefixGenerator, addHTTPAPIPrefix(&t.cfg, api.PathSpanMetrics)), spanStatsHandler) + t.mux.Handle(path.Join(api.PathPrefixGenerator, addHTTPAPIPrefix(&t.cfg, api.PathSpanMetrics)), spanStatsHandler) - tempopb.RegisterMetricsGeneratorServer(t.Server.GRPC, t.generator) + tempopb.RegisterMetricsGeneratorServer(t.server.GRPC, t.generator) return t.generator, nil } @@ -326,27 +338,27 @@ func (t *App) initQuerier() (services.Service, error) { ) tracesHandler := middleware.Wrap(http.HandlerFunc(t.querier.TraceByIDHandler)) - t.Server.HTTP.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathTraces)), tracesHandler) + t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathTraces)), tracesHandler) searchHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchHandler)) - t.Server.HTTP.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearch)), searchHandler) + t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearch)), searchHandler) searchTagsHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchTagsHandler)) - t.Server.HTTP.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTags)), searchTagsHandler) + t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTags)), searchTagsHandler) searchTagsV2Handler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchTagsV2Handler)) - t.Server.HTTP.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagsV2)), searchTagsV2Handler) + t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagsV2)), searchTagsV2Handler) searchTagValuesHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchTagValuesHandler)) - t.Server.HTTP.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValues)), searchTagValuesHandler) + t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValues)), searchTagValuesHandler) searchTagValuesV2Handler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchTagValuesV2Handler)) - t.Server.HTTP.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValuesV2)), searchTagValuesV2Handler) + t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValuesV2)), searchTagValuesV2Handler) spanMetricsSummaryHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SpanMetricsSummaryHandler)) - t.Server.HTTP.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSpanMetricsSummary)), spanMetricsSummaryHandler) + t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSpanMetricsSummary)), spanMetricsSummaryHandler) - return t.querier, t.querier.CreateAndRegisterWorker(t.Server.HTTPServer.Handler) + return t.querier, t.querier.CreateAndRegisterWorker(t.server.HTTPServer.Handler) } func (t *App) initQueryFrontend() (services.Service, error) { @@ -358,6 +370,15 @@ func (t *App) initQueryFrontend() (services.Service, error) { } t.frontend = v1 + // jpe - add stream over http support here + // - update docs to reflect new defaults + // - remove stream_over_http_enabled config in integration tests + // - review impact on GET + // - remove support from dskit + // - restore default = false + // - remove ws support + // - review dskit server settings, do i need to copy any to the router? + // create query frontend queryFrontend, err := frontend.New(t.cfg.Frontend, cortexTripper, t.Overrides, t.store, t.cacheProvider, t.cfg.HTTPAPIPrefix, log.Logger, prometheus.DefaultRegisterer) if err != nil { @@ -365,11 +386,11 @@ func (t *App) initQueryFrontend() (services.Service, error) { } // register grpc server for queriers to connect to - frontend_v1pb.RegisterFrontendServer(t.Server.GRPC, t.frontend) + frontend_v1pb.RegisterFrontendServer(t.server.GRPC, t.frontend) // we register the streaming querier service on both the http and grpc servers. Grafana expects // this GRPC service to be available on the HTTP server. - tempopb.RegisterStreamingQuerierServer(t.Server.GRPC, queryFrontend) - tempopb.RegisterStreamingQuerierServer(t.Server.GRPCOnHTTPServer, queryFrontend) + tempopb.RegisterStreamingQuerierServer(t.server.GRPC, queryFrontend) + // tempopb.RegisterStreamingQuerierServer(t.Server.GRPCOnHTTPServer, queryFrontend) // wrap handlers with auth base := middleware.Merge( @@ -378,27 +399,27 @@ func (t *App) initQueryFrontend() (services.Service, error) { ) // http trace by id endpoint - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathTraces), base.Wrap(queryFrontend.TraceByIDHandler)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathTraces), base.Wrap(queryFrontend.TraceByIDHandler)) // http search endpoints - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearch), base.Wrap(queryFrontend.SearchHandler)) - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathWSSearch), base.Wrap(queryFrontend.SearchWSHandler)) - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTags), base.Wrap(queryFrontend.SearchTagsHandler)) - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagsV2), base.Wrap(queryFrontend.SearchTagsV2Handler)) - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValues), base.Wrap(queryFrontend.SearchTagsValuesHandler)) - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValuesV2), base.Wrap(queryFrontend.SearchTagsValuesV2Handler)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearch), base.Wrap(queryFrontend.SearchHandler)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathWSSearch), base.Wrap(queryFrontend.SearchWSHandler)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTags), base.Wrap(queryFrontend.SearchTagsHandler)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagsV2), base.Wrap(queryFrontend.SearchTagsV2Handler)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValues), base.Wrap(queryFrontend.SearchTagsValuesHandler)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValuesV2), base.Wrap(queryFrontend.SearchTagsValuesV2Handler)) // http metrics endpoints - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSpanMetricsSummary), base.Wrap(queryFrontend.SpanMetricsSummaryHandler)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSpanMetricsSummary), base.Wrap(queryFrontend.SpanMetricsSummaryHandler)) // the query frontend needs to have knowledge of the blocks so it can shard search jobs t.store.EnablePolling(context.Background(), nil) // http query echo endpoint - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathEcho), echoHandler()) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathEcho), echoHandler()) // http endpoint to see usage stats data - t.Server.HTTP.Handle(addHTTPAPIPrefix(&t.cfg, api.PathUsageStats), usageStatsHandler(t.cfg.UsageReport)) + t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathUsageStats), usageStatsHandler(t.cfg.UsageReport)) // todo: queryFrontend should implement service.Service and take the cortex frontend a submodule return t.frontend, nil @@ -416,7 +437,7 @@ func (t *App) initCompactor() (services.Service, error) { t.compactor = compactor if t.compactor.Ring != nil { - t.Server.HTTP.Handle("/compactor/ring", t.compactor.Ring) + t.mux.Handle("/compactor/ring", t.compactor.Ring) } return t.compactor, nil @@ -456,7 +477,7 @@ func (t *App) initMemberlistKV() (services.Service, error) { t.cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Server.HTTP.Handle("/memberlist", t.MemberlistKV) + t.mux.Handle("/memberlist", t.MemberlistKV) return t.MemberlistKV, nil } diff --git a/docs/sources/tempo/api_docs/_index.md b/docs/sources/tempo/api_docs/_index.md index 5b245401806..4000037f21a 100644 --- a/docs/sources/tempo/api_docs/_index.md +++ b/docs/sources/tempo/api_docs/_index.md @@ -580,10 +580,6 @@ The query-frontend component implements the streaming querier interface defined By default, this service is only offered over the GRPC port. You can use streaming service over the HTTP port as well (which Grafana expects). -{{% admonition type="note" %}} -Enabling this setting is incompatible with TLS. -{{% /admonition %}} - To enable the streaming service over the HTTP port for use with Grafana, set the following: ``` diff --git a/example/docker-compose/shared/tempo.yaml b/example/docker-compose/shared/tempo.yaml index fc41f2d678f..030afe80e14 100644 --- a/example/docker-compose/shared/tempo.yaml +++ b/example/docker-compose/shared/tempo.yaml @@ -1,4 +1,4 @@ -multitenancy_enabled: true +multitenancy_enabled: false stream_over_http_enabled: true server: http_listen_port: 3200 diff --git a/go.mod b/go.mod index f7a1e9d9cb6..8fd50602cf4 100644 --- a/go.mod +++ b/go.mod @@ -120,6 +120,7 @@ require ( go.opentelemetry.io/collector/processor v0.89.0 go.opentelemetry.io/collector/receiver v0.89.0 golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 + golang.org/x/net v0.18.0 golang.org/x/oauth2 v0.14.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 ) @@ -314,7 +315,6 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect golang.org/x/crypto v0.15.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.18.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.15.0 // indirect diff --git a/vendor/golang.org/x/net/http2/h2c/h2c.go b/vendor/golang.org/x/net/http2/h2c/h2c.go new file mode 100644 index 00000000000..2d6bf861b97 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2c/h2c.go @@ -0,0 +1,240 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package h2c implements the unencrypted "h2c" form of HTTP/2. +// +// The h2c protocol is the non-TLS version of HTTP/2 which is not available from +// net/http or golang.org/x/net/http2. +package h2c + +import ( + "bufio" + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "net/textproto" + "os" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2" +) + +var ( + http2VerboseLogs bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") || strings.Contains(e, "http2debug=2") { + http2VerboseLogs = true + } +} + +// h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic +// that should be h2c traffic. There are two ways to begin a h2c connection +// (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this +// works by starting an h2c connection with a string of bytes that is valid +// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to +// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to +// h2c. When either of those situations occur we hijack the HTTP/1 connection, +// convert it to an HTTP/2 connection and pass the net.Conn to http2.ServeConn. +type h2cHandler struct { + Handler http.Handler + s *http2.Server +} + +// NewHandler returns an http.Handler that wraps h, intercepting any h2c +// traffic. If a request is an h2c connection, it's hijacked and redirected to +// s.ServeConn. Otherwise the returned Handler just forwards requests to h. This +// works because h2c is designed to be parseable as valid HTTP/1, but ignored by +// any HTTP server that does not handle h2c. Therefore we leverage the HTTP/1 +// compatible parts of the Go http library to parse and recognize h2c requests. +// Once a request is recognized as h2c, we hijack the connection and convert it +// to an HTTP/2 connection which is understandable to s.ServeConn. (s.ServeConn +// understands HTTP/2 except for the h2c part of it.) +// +// The first request on an h2c connection is read entirely into memory before +// the Handler is called. To limit the memory consumed by this request, wrap +// the result of NewHandler in an http.MaxBytesHandler. +func NewHandler(h http.Handler, s *http2.Server) http.Handler { + return &h2cHandler{ + Handler: h, + s: s, + } +} + +// extractServer extracts existing http.Server instance from http.Request or create an empty http.Server +func extractServer(r *http.Request) *http.Server { + server, ok := r.Context().Value(http.ServerContextKey).(*http.Server) + if ok { + return server + } + return new(http.Server) +} + +// ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler. +func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Handle h2c with prior knowledge (RFC 7540 Section 3.4) + if r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" { + if http2VerboseLogs { + log.Print("h2c: attempting h2c with prior knowledge.") + } + conn, err := initH2CWithPriorKnowledge(w) + if err != nil { + if http2VerboseLogs { + log.Printf("h2c: error h2c with prior knowledge: %v", err) + } + return + } + defer conn.Close() + s.s.ServeConn(conn, &http2.ServeConnOpts{ + Context: r.Context(), + BaseConfig: extractServer(r), + Handler: s.Handler, + SawClientPreface: true, + }) + return + } + // Handle Upgrade to h2c (RFC 7540 Section 3.2) + if isH2CUpgrade(r.Header) { + conn, settings, err := h2cUpgrade(w, r) + if err != nil { + if http2VerboseLogs { + log.Printf("h2c: error h2c upgrade: %v", err) + } + w.WriteHeader(http.StatusInternalServerError) + return + } + defer conn.Close() + s.s.ServeConn(conn, &http2.ServeConnOpts{ + Context: r.Context(), + BaseConfig: extractServer(r), + Handler: s.Handler, + UpgradeRequest: r, + Settings: settings, + }) + return + } + s.Handler.ServeHTTP(w, r) + return +} + +// initH2CWithPriorKnowledge implements creating a h2c connection with prior +// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn. +// All we have to do is look for the client preface that is suppose to be part +// of the body, and reforward the client preface on the net.Conn this function +// creates. +func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) { + hijacker, ok := w.(http.Hijacker) + if !ok { + return nil, errors.New("h2c: connection does not support Hijack") + } + conn, rw, err := hijacker.Hijack() + if err != nil { + return nil, err + } + + const expectedBody = "SM\r\n\r\n" + + buf := make([]byte, len(expectedBody)) + n, err := io.ReadFull(rw, buf) + if err != nil { + return nil, fmt.Errorf("h2c: error reading client preface: %s", err) + } + + if string(buf[:n]) == expectedBody { + return newBufConn(conn, rw), nil + } + + conn.Close() + return nil, errors.New("h2c: invalid client preface") +} + +// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2). +func h2cUpgrade(w http.ResponseWriter, r *http.Request) (_ net.Conn, settings []byte, err error) { + settings, err = getH2Settings(r.Header) + if err != nil { + return nil, nil, err + } + hijacker, ok := w.(http.Hijacker) + if !ok { + return nil, nil, errors.New("h2c: connection does not support Hijack") + } + + body, err := io.ReadAll(r.Body) + if err != nil { + return nil, nil, err + } + r.Body = io.NopCloser(bytes.NewBuffer(body)) + + conn, rw, err := hijacker.Hijack() + if err != nil { + return nil, nil, err + } + + rw.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n" + + "Connection: Upgrade\r\n" + + "Upgrade: h2c\r\n\r\n")) + return newBufConn(conn, rw), settings, nil +} + +// isH2CUpgrade returns true if the header properly request an upgrade to h2c +// as specified by Section 3.2. +func isH2CUpgrade(h http.Header) bool { + return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") && + httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings") +} + +// getH2Settings returns the settings in the HTTP2-Settings header. +func getH2Settings(h http.Header) ([]byte, error) { + vals, ok := h[textproto.CanonicalMIMEHeaderKey("HTTP2-Settings")] + if !ok { + return nil, errors.New("missing HTTP2-Settings header") + } + if len(vals) != 1 { + return nil, fmt.Errorf("expected 1 HTTP2-Settings. Got: %v", vals) + } + settings, err := base64.RawURLEncoding.DecodeString(vals[0]) + if err != nil { + return nil, err + } + return settings, nil +} + +func newBufConn(conn net.Conn, rw *bufio.ReadWriter) net.Conn { + rw.Flush() + if rw.Reader.Buffered() == 0 { + // If there's no buffered data to be read, + // we can just discard the bufio.ReadWriter. + return conn + } + return &bufConn{conn, rw.Reader} +} + +// bufConn wraps a net.Conn, but reads drain the bufio.Reader first. +type bufConn struct { + net.Conn + *bufio.Reader +} + +func (c *bufConn) Read(p []byte) (int, error) { + if c.Reader == nil { + return c.Conn.Read(p) + } + n := c.Reader.Buffered() + if n == 0 { + c.Reader = nil + return c.Conn.Read(p) + } + if n < len(p) { + p = p[:n] + } + return c.Reader.Read(p) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 62809af499c..32b130bb1cc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1634,6 +1634,7 @@ golang.org/x/net/context golang.org/x/net/http/httpguts golang.org/x/net/http/httpproxy golang.org/x/net/http2 +golang.org/x/net/http2/h2c golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/iana From 8ce669f82c1383015850a7e583e50162e46badef Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 4 Jan 2024 13:24:25 -0500 Subject: [PATCH 02/12] refactor Signed-off-by: Joe Elliott --- cmd/tempo/app/app.go | 19 +++---- cmd/tempo/app/http_handler.go | 19 ------- cmd/tempo/app/modules.go | 98 ++++++++++++--------------------- cmd/tempo/app/server_service.go | 84 ++++++++++++++++++++++++++++ 4 files changed, 128 insertions(+), 92 deletions(-) delete mode 100644 cmd/tempo/app/http_handler.go diff --git a/cmd/tempo/app/app.go b/cmd/tempo/app/app.go index 47a1dc3653f..96b7b7fa07b 100644 --- a/cmd/tempo/app/app.go +++ b/cmd/tempo/app/app.go @@ -66,8 +66,7 @@ var ( type App struct { cfg Config - mux *mux.Router - server *server.Server + server TempoServer InternalServer *server.Server readRings map[string]*ring.Ring @@ -96,7 +95,7 @@ func New(cfg Config) (*App, error) { app := &App{ cfg: cfg, readRings: map[string]*ring.Ring{}, - mux: mux.NewRouter(), + server: newTempoServer(), } usagestats.Edition("oss") @@ -195,12 +194,12 @@ func (t *App) Run() error { t.InternalServer.HTTP.Path("/ready").Methods("GET").Handler(t.readyHandler(sm)) } - t.mux.Path(addHTTPAPIPrefix(&t.cfg, api.PathBuildInfo)).Handler(t.buildinfoHandler()).Methods("GET") + t.server.HTTP().Path(addHTTPAPIPrefix(&t.cfg, api.PathBuildInfo)).Handler(t.buildinfoHandler()).Methods("GET") - t.mux.Path("/ready").Handler(t.readyHandler(sm)) - t.mux.Path("/status").Handler(t.statusHandler()).Methods("GET") - t.mux.Path("/status/{endpoint}").Handler(t.statusHandler()).Methods("GET") - grpc_health_v1.RegisterHealthServer(t.server.GRPC, grpcutil.NewHealthCheck(sm)) + t.server.HTTP().Path("/ready").Handler(t.readyHandler(sm)) + t.server.HTTP().Path("/status").Handler(t.statusHandler()).Methods("GET") + t.server.HTTP().Path("/status/{endpoint}").Handler(t.statusHandler()).Methods("GET") + grpc_health_v1.RegisterHealthServer(t.server.GRPC(), grpcutil.NewHealthCheck(sm)) // Let's listen for events from this manager, and log them. healthy := func() { level.Info(log.Logger).Log("msg", "Tempo started") } @@ -229,7 +228,7 @@ func (t *App) Run() error { sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) // Setup signal handler. If signal arrives, we stop the manager, which stops all the services. - handler := signals.NewHandler(t.server.Log) + handler := signals.NewHandler(t.server.Log()) go func() { handler.Loop() sm.StopAsync() @@ -475,7 +474,7 @@ func (t *App) writeStatusEndpoints(w io.Writer) error { endpoints := []endpoint{} - err := t.mux.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + err := t.server.HTTP().Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { e := endpoint{} pathTemplate, err := route.GetPathTemplate() diff --git a/cmd/tempo/app/http_handler.go b/cmd/tempo/app/http_handler.go deleted file mode 100644 index b625f0be049..00000000000 --- a/cmd/tempo/app/http_handler.go +++ /dev/null @@ -1,19 +0,0 @@ -package app - -import ( - "net/http" - - "github.com/gorilla/mux" -) - -type handler interface { - Handle(pattern string, handler http.Handler) -} - -type muxWrapper struct { - *mux.Router -} - -func (m muxWrapper) Handle(pattern string, handler http.Handler) { - m.Router.Handle(pattern, handler) -} diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index 5e17449a462..d8f72e84ef1 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -7,7 +7,6 @@ import ( "io" "net/http" "path" - "strings" "github.com/go-kit/log/level" "github.com/grafana/dskit/dns" @@ -21,8 +20,6 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" "github.com/grafana/tempo/modules/cache" "github.com/grafana/tempo/modules/compactor" @@ -98,16 +95,6 @@ func (t *App) initServer() (services.Service, error) { )) } - t.cfg.Server.Router = t.mux - if t.cfg.StreamOverHTTPEnabled { - t.cfg.Server.Router = nil - } - DisableSignalHandling(&t.cfg.Server) - server, err := server.New(t.cfg.Server) - if err != nil { - return nil, fmt.Errorf("failed to create server: %w", err) - } - servicesToWaitFor := func() []services.Service { svs := []services.Service(nil) for m, s := range t.serviceMap { @@ -119,21 +106,7 @@ func (t *App) initServer() (services.Service, error) { return svs } - t.server = server - s := NewServerService(server, servicesToWaitFor) - - if t.cfg.StreamOverHTTPEnabled { - t.server.HTTPServer.Handler = h2c.NewHandler(server.HTTPServer.Handler, &http2.Server{}) - t.server.HTTP.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if req.ProtoMajor == 2 && strings.Contains(req.Header.Get("Content-Type"), "application/grpc") { // jpe - both? i don't think grafana sends the content-type header - t.server.GRPC.ServeHTTP(w, req) - } else { - t.mux.ServeHTTP(w, req) - } - }) - } - - return s, nil + return t.server.StartAndReturnService(t.cfg.Server, t.cfg.StreamOverHTTPEnabled, servicesToWaitFor) } func (t *App) initInternalServer() (services.Service, error) { @@ -190,7 +163,7 @@ func (t *App) initReadRing(cfg ring.Config, name, key string) (*ring.Ring, error return nil, fmt.Errorf("failed to create ring %s: %w", name, err) } - t.mux.Handle("/"+name+"/ring", ring) + t.server.HTTP().Handle("/"+name+"/ring", ring) t.readRings[name] = ring return ring, nil @@ -229,10 +202,10 @@ func (t *App) initOverridesAPI() (services.Service, error) { return t.HTTPAuthMiddleware.Wrap(h) } - t.mux.Path(overridesPath).Methods(http.MethodGet).Handler(wrapHandler(userConfigOverridesAPI.GetHandler)) - t.mux.Path(overridesPath).Methods(http.MethodPost).Handler(wrapHandler(userConfigOverridesAPI.PostHandler)) - t.mux.Path(overridesPath).Methods(http.MethodPatch).Handler(wrapHandler(userConfigOverridesAPI.PatchHandler)) - t.mux.Path(overridesPath).Methods(http.MethodDelete).Handler(wrapHandler(userConfigOverridesAPI.DeleteHandler)) + t.server.HTTP().Path(overridesPath).Methods(http.MethodGet).Handler(wrapHandler(userConfigOverridesAPI.GetHandler)) + t.server.HTTP().Path(overridesPath).Methods(http.MethodPost).Handler(wrapHandler(userConfigOverridesAPI.PostHandler)) + t.server.HTTP().Path(overridesPath).Methods(http.MethodPatch).Handler(wrapHandler(userConfigOverridesAPI.PatchHandler)) + t.server.HTTP().Path(overridesPath).Methods(http.MethodDelete).Handler(wrapHandler(userConfigOverridesAPI.DeleteHandler)) return userConfigOverridesAPI, nil } @@ -253,7 +226,7 @@ func (t *App) initDistributor() (services.Service, error) { t.distributor = distributor if distributor.DistributorRing != nil { - t.mux.Handle("/distributor/ring", distributor.DistributorRing) + t.server.HTTP().Handle("/distributor/ring", distributor.DistributorRing) } return t.distributor, nil @@ -269,10 +242,10 @@ func (t *App) initIngester() (services.Service, error) { } t.ingester = ingester - tempopb.RegisterPusherServer(t.server.GRPC, t.ingester) - tempopb.RegisterQuerierServer(t.server.GRPC, t.ingester) - t.mux.Path("/flush").Handler(http.HandlerFunc(t.ingester.FlushHandler)) - t.mux.Path("/shutdown").Handler(http.HandlerFunc(t.ingester.ShutdownHandler)) + tempopb.RegisterPusherServer(t.server.GRPC(), t.ingester) + tempopb.RegisterQuerierServer(t.server.GRPC(), t.ingester) + t.server.HTTP().Path("/flush").Handler(http.HandlerFunc(t.ingester.FlushHandler)) + t.server.HTTP().Path("/shutdown").Handler(http.HandlerFunc(t.ingester.ShutdownHandler)) return t.ingester, nil } @@ -289,9 +262,9 @@ func (t *App) initGenerator() (services.Service, error) { t.generator = genSvc spanStatsHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.generator.SpanMetricsHandler)) - t.mux.Handle(path.Join(api.PathPrefixGenerator, addHTTPAPIPrefix(&t.cfg, api.PathSpanMetrics)), spanStatsHandler) + t.server.HTTP().Handle(path.Join(api.PathPrefixGenerator, addHTTPAPIPrefix(&t.cfg, api.PathSpanMetrics)), spanStatsHandler) - tempopb.RegisterMetricsGeneratorServer(t.server.GRPC, t.generator) + tempopb.RegisterMetricsGeneratorServer(t.server.GRPC(), t.generator) return t.generator, nil } @@ -338,27 +311,27 @@ func (t *App) initQuerier() (services.Service, error) { ) tracesHandler := middleware.Wrap(http.HandlerFunc(t.querier.TraceByIDHandler)) - t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathTraces)), tracesHandler) + t.server.HTTP().Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathTraces)), tracesHandler) searchHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchHandler)) - t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearch)), searchHandler) + t.server.HTTP().Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearch)), searchHandler) searchTagsHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchTagsHandler)) - t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTags)), searchTagsHandler) + t.server.HTTP().Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTags)), searchTagsHandler) searchTagsV2Handler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchTagsV2Handler)) - t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagsV2)), searchTagsV2Handler) + t.server.HTTP().Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagsV2)), searchTagsV2Handler) searchTagValuesHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchTagValuesHandler)) - t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValues)), searchTagValuesHandler) + t.server.HTTP().Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValues)), searchTagValuesHandler) searchTagValuesV2Handler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SearchTagValuesV2Handler)) - t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValuesV2)), searchTagValuesV2Handler) + t.server.HTTP().Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValuesV2)), searchTagValuesV2Handler) spanMetricsSummaryHandler := t.HTTPAuthMiddleware.Wrap(http.HandlerFunc(t.querier.SpanMetricsSummaryHandler)) - t.mux.Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSpanMetricsSummary)), spanMetricsSummaryHandler) + t.server.HTTP().Handle(path.Join(api.PathPrefixQuerier, addHTTPAPIPrefix(&t.cfg, api.PathSpanMetricsSummary)), spanMetricsSummaryHandler) - return t.querier, t.querier.CreateAndRegisterWorker(t.server.HTTPServer.Handler) + return t.querier, t.querier.CreateAndRegisterWorker(t.server.HTTP()) } func (t *App) initQueryFrontend() (services.Service, error) { @@ -386,11 +359,10 @@ func (t *App) initQueryFrontend() (services.Service, error) { } // register grpc server for queriers to connect to - frontend_v1pb.RegisterFrontendServer(t.server.GRPC, t.frontend) + frontend_v1pb.RegisterFrontendServer(t.server.GRPC(), t.frontend) // we register the streaming querier service on both the http and grpc servers. Grafana expects // this GRPC service to be available on the HTTP server. - tempopb.RegisterStreamingQuerierServer(t.server.GRPC, queryFrontend) - // tempopb.RegisterStreamingQuerierServer(t.Server.GRPCOnHTTPServer, queryFrontend) + tempopb.RegisterStreamingQuerierServer(t.server.GRPC(), queryFrontend) // wrap handlers with auth base := middleware.Merge( @@ -399,27 +371,27 @@ func (t *App) initQueryFrontend() (services.Service, error) { ) // http trace by id endpoint - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathTraces), base.Wrap(queryFrontend.TraceByIDHandler)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathTraces), base.Wrap(queryFrontend.TraceByIDHandler)) // http search endpoints - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearch), base.Wrap(queryFrontend.SearchHandler)) - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathWSSearch), base.Wrap(queryFrontend.SearchWSHandler)) - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTags), base.Wrap(queryFrontend.SearchTagsHandler)) - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagsV2), base.Wrap(queryFrontend.SearchTagsV2Handler)) - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValues), base.Wrap(queryFrontend.SearchTagsValuesHandler)) - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValuesV2), base.Wrap(queryFrontend.SearchTagsValuesV2Handler)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearch), base.Wrap(queryFrontend.SearchHandler)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathWSSearch), base.Wrap(queryFrontend.SearchWSHandler)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTags), base.Wrap(queryFrontend.SearchTagsHandler)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagsV2), base.Wrap(queryFrontend.SearchTagsV2Handler)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValues), base.Wrap(queryFrontend.SearchTagsValuesHandler)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathSearchTagValuesV2), base.Wrap(queryFrontend.SearchTagsValuesV2Handler)) // http metrics endpoints - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathSpanMetricsSummary), base.Wrap(queryFrontend.SpanMetricsSummaryHandler)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathSpanMetricsSummary), base.Wrap(queryFrontend.SpanMetricsSummaryHandler)) // the query frontend needs to have knowledge of the blocks so it can shard search jobs t.store.EnablePolling(context.Background(), nil) // http query echo endpoint - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathEcho), echoHandler()) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathEcho), echoHandler()) // http endpoint to see usage stats data - t.mux.Handle(addHTTPAPIPrefix(&t.cfg, api.PathUsageStats), usageStatsHandler(t.cfg.UsageReport)) + t.server.HTTP().Handle(addHTTPAPIPrefix(&t.cfg, api.PathUsageStats), usageStatsHandler(t.cfg.UsageReport)) // todo: queryFrontend should implement service.Service and take the cortex frontend a submodule return t.frontend, nil @@ -437,7 +409,7 @@ func (t *App) initCompactor() (services.Service, error) { t.compactor = compactor if t.compactor.Ring != nil { - t.mux.Handle("/compactor/ring", t.compactor.Ring) + t.server.HTTP().Handle("/compactor/ring", t.compactor.Ring) } return t.compactor, nil @@ -477,7 +449,7 @@ func (t *App) initMemberlistKV() (services.Service, error) { t.cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.mux.Handle("/memberlist", t.MemberlistKV) + t.server.HTTP().Handle("/memberlist", t.MemberlistKV) return t.MemberlistKV, nil } diff --git a/cmd/tempo/app/server_service.go b/cmd/tempo/app/server_service.go index 34adf898d79..0ef80e52a8d 100644 --- a/cmd/tempo/app/server_service.go +++ b/cmd/tempo/app/server_service.go @@ -3,14 +3,98 @@ package app import ( "context" "fmt" + "net/http" + "strings" + "sync" + "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/gorilla/mux" "github.com/grafana/dskit/server" "github.com/grafana/dskit/services" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + "google.golang.org/grpc" util_log "github.com/grafana/tempo/pkg/util/log" ) +type TempoServer interface { + HTTP() *mux.Router + GRPC() *grpc.Server + Log() log.Logger + EnableHTTP2() + + StartAndReturnService(cfg server.Config, supportGRPCOnHTTP bool, servicesToWaitFor func() []services.Service) (services.Service, error) +} + +type tempoServer struct { + mux *mux.Router // all tempo http routes are added here + + externalServer *server.Server // the standard server that all HTTP/GRPC requests are served on + // jpe: put internal server here as well? + + enableHTTP2 sync.Once +} + +func newTempoServer() *tempoServer { + return &tempoServer{ + mux: mux.NewRouter(), + // externalServer will be initialized in StartService + } +} + +func (s *tempoServer) HTTP() *mux.Router { + return s.mux +} + +func (s *tempoServer) GRPC() *grpc.Server { + return s.externalServer.GRPC +} + +func (s *tempoServer) Log() log.Logger { + return s.externalServer.Log +} + +func (s *tempoServer) EnableHTTP2() { + s.enableHTTP2.Do(func() { + s.externalServer.HTTPServer.Handler = h2c.NewHandler(s.externalServer.HTTPServer.Handler, &http2.Server{}) + }) +} + +func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP bool, servicesToWaitFor func() []services.Service) (services.Service, error) { + var err error + + // use tempo's mux unless we are doing grpc over http, then we will let the library instantiate its own + // router and piggy back on it to route grpc requests + cfg.Router = s.mux + if supportGRPCOnHTTP { + cfg.Router = nil + } + DisableSignalHandling(&cfg) + s.externalServer, err = server.New(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create server: %w", err) + } + + // now that we have created the server and service let's setup our grpc/http router if necessary + if supportGRPCOnHTTP { + s.EnableHTTP2() + s.externalServer.HTTP.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // route to GRPC server if it's a GRPC request + if req.ProtoMajor == 2 && strings.Contains(req.Header.Get("Content-Type"), "application/grpc") { // jpe - both? i don't think grafana sends the content-type header + s.externalServer.GRPC.ServeHTTP(w, req) + return + } + + // default to standard http server + s.mux.ServeHTTP(w, req) + }) + } + + return NewServerService(s.externalServer, servicesToWaitFor), nil +} + // NewServerService constructs service from Server component. // servicesToWaitFor is called when server is stopping, and should return all // services that need to terminate before server actually stops. From c159fc22305344c5e3f9c4298728301cb700b707 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 4 Jan 2024 17:05:41 -0500 Subject: [PATCH 03/12] working? Signed-off-by: Joe Elliott --- cmd/tempo/app/server_service.go | 25 +++- go.mod | 5 +- go.sum | 8 +- .../grafana/dskit/httpgrpc/server/server.go | 29 +++- .../grafana/dskit/middleware/http_tracing.go | 88 ++++++------ vendor/github.com/grafana/dskit/ring/batch.go | 15 ++ .../grafana/dskit/ring/client/pool.go | 44 ++++-- .../grafana/dskit/ring/replication_set.go | 52 ++++--- .../dskit/ring/replication_set_tracker.go | 64 +++++---- vendor/github.com/grafana/dskit/ring/ring.go | 80 +++++++++-- .../grafana/dskit/ring/token_generator.go | 19 ++- .../github.com/grafana/dskit/server/server.go | 126 +++++++++-------- .../grafana/dskit/tenant/resolver.go | 132 ++++-------------- .../github.com/grafana/dskit/tenant/tenant.go | 64 +++++++-- .../github.com/klauspost/compress/README.md | 8 ++ .../klauspost/compress/fse/compress.go | 2 +- .../klauspost/compress/gzhttp/compress.go | 11 +- .../klauspost/compress/zstd/enc_best.go | 44 +++--- .../klauspost/compress/zstd/enc_better.go | 17 ++- vendor/modules.txt | 13 +- 20 files changed, 517 insertions(+), 329 deletions(-) diff --git a/cmd/tempo/app/server_service.go b/cmd/tempo/app/server_service.go index 0ef80e52a8d..b6c6d01f0dd 100644 --- a/cmd/tempo/app/server_service.go +++ b/cmd/tempo/app/server_service.go @@ -10,6 +10,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gorilla/mux" + "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/server" "github.com/grafana/dskit/services" "golang.org/x/net/http2" @@ -65,14 +66,16 @@ func (s *tempoServer) EnableHTTP2() { func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP bool, servicesToWaitFor func() []services.Service) (services.Service, error) { var err error + metrics := server.NewServerMetrics(cfg) // use tempo's mux unless we are doing grpc over http, then we will let the library instantiate its own // router and piggy back on it to route grpc requests cfg.Router = s.mux if supportGRPCOnHTTP { cfg.Router = nil + cfg.DoNotAddDefaultHTTPMiddleware = true // we don't want instrumentation on the "root" router, we want it on our mux } DisableSignalHandling(&cfg) - s.externalServer, err = server.New(cfg) + s.externalServer, err = server.NewWithMetrics(cfg, metrics) if err != nil { return nil, fmt.Errorf("failed to create server: %w", err) } @@ -80,6 +83,24 @@ func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP // now that we have created the server and service let's setup our grpc/http router if necessary if supportGRPCOnHTTP { s.EnableHTTP2() + // jpe - this works as well + // s.externalServer.HTTP.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // // route to GRPC server if it's a GRPC request + // if req.ProtoMajor == 2 && strings.Contains(req.Header.Get("Content-Type"), "application/grpc") { // jpe - both? i don't think grafana sends the content-type header + // s.externalServer.GRPC.ServeHTTP(w, req) + // return + // } + + // w.WriteHeader(http.StatusNotFound) + // }) + + // recreate dskit instrumentation here + cfg.DoNotAddDefaultHTTPMiddleware = false + httpMiddleware, err := server.BuildHTTPMiddleware(cfg, s.mux, metrics, s.externalServer.Log) + if err != nil { + return nil, fmt.Errorf("failed to create http middleware: %w", err) + } + router := middleware.Merge(httpMiddleware...).Wrap(s.mux) s.externalServer.HTTP.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) { // route to GRPC server if it's a GRPC request if req.ProtoMajor == 2 && strings.Contains(req.Header.Get("Content-Type"), "application/grpc") { // jpe - both? i don't think grafana sends the content-type header @@ -88,7 +109,7 @@ func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP } // default to standard http server - s.mux.ServeHTTP(w, req) + router.ServeHTTP(w, req) }) } diff --git a/go.mod b/go.mod index 8fd50602cf4..5a754462987 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/jedib0t/go-pretty/v6 v6.2.4 github.com/json-iterator/go v1.1.12 github.com/jsternberg/zap-logfmt v1.2.0 - github.com/klauspost/compress v1.17.2 + github.com/klauspost/compress v1.17.3 github.com/minio/minio-go/v7 v7.0.63 github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 github.com/olekukonko/tablewriter v0.0.5 @@ -187,6 +187,7 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/gorilla/handlers v1.5.1 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.6 // indirect github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect github.com/hashicorp/consul/api v1.25.1 // indirect @@ -342,3 +343,5 @@ replace ( replace github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91 replace golang.org/x/net => golang.org/x/net v0.17.0 + +replace github.com/grafana/dskit => ../dskit diff --git a/go.sum b/go.sum index e1673c3e0c4..43ae3c1cbe2 100644 --- a/go.sum +++ b/go.sum @@ -508,14 +508,14 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNakWv5/tKwBHuLy3icnIeCo9gQr4= -github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc= github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91 h1:/NipyHnOmvRsVzj81j2qE0VxsvsqhOB0f4vJIhk2qCQ= github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzedH7MZzRZt5/lsAHch6Z3L2ZGn5FA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= @@ -658,8 +658,8 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= +github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= diff --git a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go index c642f7fa13f..b73c5a0f775 100644 --- a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go @@ -32,17 +32,29 @@ var ( DoNotLogErrorHeaderKey = http.CanonicalHeaderKey("X-DoNotLogError") ) +type Option func(*Server) + +func WithReturn4XXErrors(s *Server) { + s.return4XXErrors = true +} + +func applyServerOptions(s *Server, opts ...Option) *Server { + for _, opt := range opts { + opt(s) + } + return s +} + // Server implements HTTPServer. HTTPServer is a generated interface that gRPC // servers must implement. type Server struct { - handler http.Handler + handler http.Handler + return4XXErrors bool } // NewServer makes a new Server. -func NewServer(handler http.Handler) *Server { - return &Server{ - handler: handler, - } +func NewServer(handler http.Handler, opts ...Option) *Server { + return applyServerOptions(&Server{handler: handler}, opts...) } // Handle implements HTTPServer. @@ -67,7 +79,7 @@ func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc. Headers: httpgrpc.FromHeader(header), Body: recorder.Body.Bytes(), } - if recorder.Code/100 == 5 { + if s.shouldReturnError(resp) { err := httpgrpc.ErrorFromHTTPResponse(resp) if doNotLogError { err = middleware.DoNotLogError{Err: err} @@ -77,6 +89,11 @@ func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc. return resp, nil } +func (s Server) shouldReturnError(resp *httpgrpc.HTTPResponse) bool { + mask := resp.GetCode() / 100 + return mask == 5 || (s.return4XXErrors && mask == 4) +} + // Client is a http.Handler that forwards the request over gRPC. type Client struct { client httpgrpc.HTTPClient diff --git a/vendor/github.com/grafana/dskit/middleware/http_tracing.go b/vendor/github.com/grafana/dskit/middleware/http_tracing.go index 989f50fe1e8..901970a4a6b 100644 --- a/vendor/github.com/grafana/dskit/middleware/http_tracing.go +++ b/vendor/github.com/grafana/dskit/middleware/http_tracing.go @@ -5,13 +5,17 @@ package middleware import ( + "context" "fmt" "net/http" + "github.com/grafana/dskit/httpgrpc" + "github.com/gorilla/mux" "github.com/opentracing-contrib/go-stdlib/nethttp" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" + "google.golang.org/grpc" ) // Dummy dependency to enforce that we have a nethttp version newer @@ -46,27 +50,8 @@ func (t Tracer) Wrap(next http.Handler) http.Handler { return nethttp.Middleware(opentracing.GlobalTracer(), next, options...) } -// HTTPGRPCTracer is a middleware which traces incoming httpgrpc requests. -type HTTPGRPCTracer struct { - RouteMatcher RouteMatcher -} - -// InitHTTPGRPCMiddleware initializes gorilla/mux-compatible HTTP middleware -// -// HTTPGRPCTracer is specific to the server-side handling of HTTP requests which were -// wrapped into gRPC requests and routed through the httpgrpc.HTTP/Handle gRPC. -// -// HTTPGRPCTracer.Wrap must be attached to the same mux.Router assigned to dskit/server.Config.Router -// but it does not need to be attached to dskit/server.Config.HTTPMiddleware. -// dskit/server.Config.HTTPMiddleware is applied to direct HTTP requests not routed through gRPC; -// the server utilizes the default http middleware Tracer.Wrap for those standard http requests. -func InitHTTPGRPCMiddleware(router *mux.Router) *mux.Router { - middleware := HTTPGRPCTracer{RouteMatcher: router} - router.Use(middleware.Wrap) - return router -} - -// Wrap creates and decorates server-side tracing spans for httpgrpc requests +// HTTPGRPCTracingInterceptor adds additional information about the encapsulated HTTP request +// to httpgrpc trace spans. // // The httpgrpc client wraps HTTP requests up into a generic httpgrpc.HTTP/Handle gRPC method. // The httpgrpc server unwraps httpgrpc.HTTP/Handle gRPC requests into HTTP requests @@ -80,39 +65,52 @@ func InitHTTPGRPCMiddleware(router *mux.Router) *mux.Router { // and attaches the HTTP server span tags to the parent httpgrpc.HTTP/Handle gRPC span, allowing // tracing tooling to differentiate the HTTP requests represented by the httpgrpc.HTTP/Handle spans. // -// opentracing-contrib/go-stdlib/nethttp.Middleware could not be used here -// as it does not expose options to access and tag the incoming parent span. -func (hgt HTTPGRPCTracer) Wrap(next http.Handler) http.Handler { - httpOperationNameFunc := makeHTTPOperationNameFunc(hgt.RouteMatcher) - fn := func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - tracer := opentracing.GlobalTracer() +// Note that we cannot do this in the httpgrpc Server implementation, as some applications (eg. +// Mimir's queriers) call Server.Handle() directly, which means we'd attach HTTP-request related +// span tags to whatever parent span is active in the caller, rather than the /httpgrpc.HTTP/Handle +// span created by the tracing middleware for requests that arrive over the network. +func HTTPGRPCTracingInterceptor(router *mux.Router) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + if info.FullMethod != "/httpgrpc.HTTP/Handle" { + return handler(ctx, req) + } + httpgrpcRequest, ok := req.(*httpgrpc.HTTPRequest) + if !ok { + return handler(ctx, req) + } + + httpRequest, err := httpgrpc.ToHTTPRequest(ctx, httpgrpcRequest) + if err != nil { + return handler(ctx, req) + } + + tracer := opentracing.GlobalTracer() parentSpan := opentracing.SpanFromContext(ctx) // extract relevant span & tag data from request - method := r.Method - matchedRoute := getRouteName(hgt.RouteMatcher, r) - urlPath := r.URL.Path - userAgent := r.Header.Get("User-Agent") + method := httpRequest.Method + routeName := getRouteName(router, httpRequest) + urlPath := httpRequest.URL.Path + userAgent := httpRequest.Header.Get("User-Agent") // tag parent httpgrpc.HTTP/Handle server span, if it exists if parentSpan != nil { parentSpan.SetTag(string(ext.HTTPUrl), urlPath) parentSpan.SetTag(string(ext.HTTPMethod), method) - parentSpan.SetTag("http.route", matchedRoute) + parentSpan.SetTag("http.route", routeName) parentSpan.SetTag("http.user_agent", userAgent) } // create and start child HTTP span // mirroring opentracing-contrib/go-stdlib/nethttp.Middleware span name and tags - childSpanName := httpOperationNameFunc(r) + childSpanName := getOperationName(routeName, httpRequest) startSpanOpts := []opentracing.StartSpanOption{ ext.SpanKindRPCServer, opentracing.Tag{Key: string(ext.Component), Value: "net/http"}, opentracing.Tag{Key: string(ext.HTTPUrl), Value: urlPath}, opentracing.Tag{Key: string(ext.HTTPMethod), Value: method}, - opentracing.Tag{Key: "http.route", Value: matchedRoute}, + opentracing.Tag{Key: "http.route", Value: routeName}, opentracing.Tag{Key: "http.user_agent", Value: userAgent}, } if parentSpan != nil { @@ -127,19 +125,21 @@ func (hgt HTTPGRPCTracer) Wrap(next http.Handler) http.Handler { childSpan := tracer.StartSpan(childSpanName, startSpanOpts...) defer childSpan.Finish() - r = r.WithContext(opentracing.ContextWithSpan(r.Context(), childSpan)) - next.ServeHTTP(w, r) + ctx = opentracing.ContextWithSpan(ctx, childSpan) + return handler(ctx, req) } - - return http.HandlerFunc(fn) } func makeHTTPOperationNameFunc(routeMatcher RouteMatcher) func(r *http.Request) string { return func(r *http.Request) string { - op := getRouteName(routeMatcher, r) - if op == "" { - return "HTTP " + r.Method - } - return fmt.Sprintf("HTTP %s - %s", r.Method, op) + routeName := getRouteName(routeMatcher, r) + return getOperationName(routeName, r) + } +} + +func getOperationName(routeName string, r *http.Request) string { + if routeName == "" { + return "HTTP " + r.Method } + return fmt.Sprintf("HTTP %s - %s", r.Method, routeName) } diff --git a/vendor/github.com/grafana/dskit/ring/batch.go b/vendor/github.com/grafana/dskit/ring/batch.go index 5acd8fd0086..7781fe67a5a 100644 --- a/vendor/github.com/grafana/dskit/ring/batch.go +++ b/vendor/github.com/grafana/dskit/ring/batch.go @@ -111,6 +111,15 @@ func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []ui bufZones [GetBufferSize]string ) for i, key := range keys { + // Get call below takes ~1 microsecond for ~500 instances. + // Checking every 10K calls would be every 10ms. + if i%10e3 == 0 { + if err := ctx.Err(); err != nil { + o.Cleanup() + return err + } + } + replicationSet, err := r.Get(key, op, bufDescs[:0], bufHosts[:0], bufZones[:0]) if err != nil { o.Cleanup() @@ -134,6 +143,12 @@ func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []ui } } + // One last check before calling the callbacks: it doesn't make sense if context is canceled. + if err := ctx.Err(); err != nil { + o.Cleanup() + return err + } + tracker := batchTracker{ done: make(chan struct{}, 1), err: make(chan error, 1), diff --git a/vendor/github.com/grafana/dskit/ring/client/pool.go b/vendor/github.com/grafana/dskit/ring/client/pool.go index 5584a7e8751..6bf015e6e20 100644 --- a/vendor/github.com/grafana/dskit/ring/client/pool.go +++ b/vendor/github.com/grafana/dskit/ring/client/pool.go @@ -157,15 +157,43 @@ func (p *Pool) RemoveClientFor(addr string) { client, ok := p.clients[addr] if ok { delete(p.clients, addr) - if p.clientsMetric != nil { - p.clientsMetric.Add(-1) + p.closeClient(addr, client) + } +} + +func (p *Pool) closeClient(addr string, client PoolClient) { + if p.clientsMetric != nil { + p.clientsMetric.Add(-1) + } + // Close in the background since this operation may take awhile and we have a mutex + go func(addr string, closer PoolClient) { + if err := closer.Close(); err != nil { + level.Error(p.logger).Log("msg", fmt.Sprintf("error closing connection to %s", p.clientName), "addr", addr, "err", err) } - // Close in the background since this operation may take awhile and we have a mutex - go func(addr string, closer PoolClient) { - if err := closer.Close(); err != nil { - level.Error(p.logger).Log("msg", fmt.Sprintf("error closing connection to %s", p.clientName), "addr", addr, "err", err) - } - }(addr, client) + }(addr, client) +} + +// RemoveClient removes the client instance from the pool if it is still there and not cleaned up by health check. +// The value of client needs to be the same as returned by GetClientForInstance or GetClientFor. +// If addr is not empty and contains the same addr passed when obtaining the client, then the operation is sped up. +func (p *Pool) RemoveClient(client PoolClient, addr string) { + p.Lock() + defer p.Unlock() + if addr != "" { + if p.clients[addr] != client { + return + } + delete(p.clients, addr) + p.closeClient(addr, client) + return + } + for addr, cachedClient := range p.clients { + if cachedClient != client { + continue + } + delete(p.clients, addr) + p.closeClient(addr, client) + return } } diff --git a/vendor/github.com/grafana/dskit/ring/replication_set.go b/vendor/github.com/grafana/dskit/ring/replication_set.go index f389f4766fc..f05153c0525 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set.go @@ -11,6 +11,7 @@ import ( "github.com/go-kit/log/level" "github.com/opentracing/opentracing-go/ext" + "github.com/grafana/dskit/cancellation" "github.com/grafana/dskit/spanlogger" ) @@ -37,7 +38,7 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont // Initialise the result tracker, which is use to keep track of successes and failures. var tracker replicationSetResultTracker if r.MaxUnavailableZones > 0 { - tracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones, kitlog.NewNopLogger()) + tracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones, nil, kitlog.NewNopLogger()) } else { tracker = newDefaultResultTracker(r.Instances, r.MaxErrors, kitlog.NewNopLogger()) } @@ -123,6 +124,18 @@ type DoUntilQuorumConfig struct { // total response size across all instances is reached, making further requests to other // instances would not be worthwhile. IsTerminalError func(error) bool + + // ZoneSorter orders the provided zones in preference order, for use when MinimizeRequests is true + // and DoUntilQuorum is operating in zone-aware mode. If not set, zones will be used in a + // randomly-selected order. + // + // Earlier zones will be used first. + // The function can modify the provided slice of zones in place. + // All provided zones must be returned exactly once. + // + // This can be used to prioritise zones that are more likely to succeed, or are expected to complete + // faster, for example. + ZoneSorter ZoneSorter } func (c DoUntilQuorumConfig) Validate() error { @@ -168,8 +181,12 @@ func (c DoUntilQuorumConfig) Validate() error { // r.MaxUnavailableZones is 1 and there are three zones, DoUntilQuorum will initially only call f for instances in two // zones, and only call f for instances in the remaining zone if a request in the initial two zones fails. // -// DoUntilQuorum will randomly select available zones / instances such that calling DoUntilQuorum multiple times with -// the same ReplicationSet should evenly distribute requests across all zones / instances. +// If cfg.ZoneSorter is non-nil and DoUntilQuorum is operating in zone-aware mode, DoUntilQuorum will initiate requests +// to zones in the order returned by the sorter. +// +// If cfg.ZoneSorter is nil, or DoUntilQuorum is operating in non-zone-aware mode, DoUntilQuorum will randomly select +// available zones / instances such that calling DoUntilQuorum multiple times with the same ReplicationSet should evenly +// distribute requests across all zones / instances. // // If cfg.HedgingDelay is non-zero, DoUntilQuorum will call f for an additional zone's instances (if zone-aware) / an // additional instance (if not zone-aware) every cfg.HedgingDelay until one of the termination conditions above is @@ -197,7 +214,7 @@ func DoUntilQuorum[T any](ctx context.Context, r ReplicationSet, cfg DoUntilQuor ctx, cancel := context.WithCancel(ctx) defer cancel() - wrappedF := func(ctx context.Context, desc *InstanceDesc, _ context.CancelFunc) (T, error) { + wrappedF := func(ctx context.Context, desc *InstanceDesc, _ context.CancelCauseFunc) (T, error) { return f(ctx, desc) } @@ -216,7 +233,7 @@ func DoUntilQuorum[T any](ctx context.Context, r ReplicationSet, cfg DoUntilQuor // DoUntilQuorumWithoutSuccessfulContextCancellation // // Failing to do this may result in a memory leak. -func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Context, r ReplicationSet, cfg DoUntilQuorumConfig, f func(context.Context, *InstanceDesc, context.CancelFunc) (T, error), cleanupFunc func(T)) ([]T, error) { +func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Context, r ReplicationSet, cfg DoUntilQuorumConfig, f func(context.Context, *InstanceDesc, context.CancelCauseFunc) (T, error), cleanupFunc func(T)) ([]T, error) { if err := cfg.Validate(); err != nil { return nil, err } @@ -249,7 +266,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex var resultTracker replicationSetResultTracker var contextTracker replicationSetContextTracker if r.MaxUnavailableZones > 0 || r.ZoneAwarenessEnabled { - resultTracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones, logger) + resultTracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones, cfg.ZoneSorter, logger) contextTracker = newZoneAwareContextTracker(ctx, r.Instances) } else { resultTracker = newDefaultResultTracker(r.Instances, r.MaxErrors, logger) @@ -293,12 +310,12 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex } } - terminate := func(err error) ([]T, error) { - if cfg.Logger != nil { + terminate := func(err error, cause string) ([]T, error) { + if cfg.Logger != nil && !errors.Is(err, context.Canceled) { // Cancellation is not an error. ext.Error.Set(cfg.Logger.Span, true) } - contextTracker.cancelAllContexts() + contextTracker.cancelAllContexts(cancellation.NewErrorf(cause)) cleanupResultsAlreadyReceived() return nil, err } @@ -314,12 +331,13 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex for !resultTracker.succeeded() { select { case <-ctx.Done(): - level.Debug(logger).Log("msg", "parent context done, returning", "err", ctx.Err()) + err := context.Cause(ctx) + level.Debug(logger).Log("msg", "parent context done, returning", "err", err) // No need to cancel individual instance contexts, as they inherit the cancellation from ctx. cleanupResultsAlreadyReceived() - return nil, ctx.Err() + return nil, err case <-hedgingTrigger: resultTracker.startAdditionalRequests() case result := <-resultsChan: @@ -328,7 +346,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex if result.err != nil && cfg.IsTerminalError != nil && cfg.IsTerminalError(result.err) { level.Warn(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err) // We must return before calling resultTracker.done() below, otherwise done() might start further requests if request minimisation is enabled. - return terminate(result.err) + return terminate(result.err, "a terminal error occurred") } resultTracker.done(result.instance, result.err) @@ -336,11 +354,11 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex if result.err == nil { resultsMap[result.instance] = result.result } else { - contextTracker.cancelContextFor(result.instance) + contextTracker.cancelContextFor(result.instance, cancellation.NewErrorf("this instance returned an error: %w", result.err)) if resultTracker.failed() { - level.Error(logger).Log("msg", "cancelling all requests because quorum cannot be reached") - return terminate(result.err) + level.Error(logger).Log("msg", "cancelling all outstanding requests because quorum cannot be reached") + return terminate(result.err, "quorum cannot be reached") } } } @@ -358,12 +376,12 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex if resultTracker.shouldIncludeResultFrom(instance) { results = append(results, result) } else { - contextTracker.cancelContextFor(instance) + contextTracker.cancelContextFor(instance, cancellation.NewErrorf("quorum reached, result not required from this instance")) cleanupFunc(result) } } else { // Nothing to clean up (yet) - this will be handled by deferred call above. - contextTracker.cancelContextFor(instance) + contextTracker.cancelContextFor(instance, cancellation.NewErrorf("quorum reached, result not required from this instance")) } } diff --git a/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go b/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go index d74a3e2aae3..202b568bb95 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go @@ -63,15 +63,15 @@ type replicationSetContextTracker interface { // The context.CancelFunc will only cancel the context for this instance (ie. if this tracker // is zone-aware, calling the context.CancelFunc should not cancel contexts for other instances // in the same zone). - contextFor(instance *InstanceDesc) (context.Context, context.CancelFunc) + contextFor(instance *InstanceDesc) (context.Context, context.CancelCauseFunc) // Cancels the context for instance previously obtained with contextFor. // This method may cancel the context for other instances if those other instances are part of // the same zone and this tracker is zone-aware. - cancelContextFor(instance *InstanceDesc) + cancelContextFor(instance *InstanceDesc, cause error) // Cancels all contexts previously obtained with contextFor. - cancelAllContexts() + cancelAllContexts(cause error) } var errResultNotNeeded = errors.New("result from this instance is not needed") @@ -196,7 +196,7 @@ func (t *defaultResultTracker) startAllRequests() { func (t *defaultResultTracker) awaitStart(ctx context.Context, instance *InstanceDesc) error { select { case <-ctx.Done(): - return ctx.Err() + return context.Cause(ctx) case _, ok := <-t.instanceRelease[instance]: if ok { return nil @@ -208,32 +208,32 @@ func (t *defaultResultTracker) awaitStart(ctx context.Context, instance *Instanc type defaultContextTracker struct { ctx context.Context - cancelFuncs map[*InstanceDesc]context.CancelFunc + cancelFuncs map[*InstanceDesc]context.CancelCauseFunc } func newDefaultContextTracker(ctx context.Context, instances []InstanceDesc) *defaultContextTracker { return &defaultContextTracker{ ctx: ctx, - cancelFuncs: make(map[*InstanceDesc]context.CancelFunc, len(instances)), + cancelFuncs: make(map[*InstanceDesc]context.CancelCauseFunc, len(instances)), } } -func (t *defaultContextTracker) contextFor(instance *InstanceDesc) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(t.ctx) +func (t *defaultContextTracker) contextFor(instance *InstanceDesc) (context.Context, context.CancelCauseFunc) { + ctx, cancel := context.WithCancelCause(t.ctx) t.cancelFuncs[instance] = cancel return ctx, cancel } -func (t *defaultContextTracker) cancelContextFor(instance *InstanceDesc) { +func (t *defaultContextTracker) cancelContextFor(instance *InstanceDesc, cause error) { if cancel, ok := t.cancelFuncs[instance]; ok { - cancel() + cancel(cause) delete(t.cancelFuncs, instance) } } -func (t *defaultContextTracker) cancelAllContexts() { +func (t *defaultContextTracker) cancelAllContexts(cause error) { for instance, cancel := range t.cancelFuncs { - cancel() + cancel(cause) delete(t.cancelFuncs, instance) } } @@ -248,14 +248,18 @@ type zoneAwareResultTracker struct { zoneRelease map[string]chan struct{} zoneShouldStart map[string]*atomic.Bool pendingZones []string + zoneSorter ZoneSorter logger log.Logger } -func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int, logger log.Logger) *zoneAwareResultTracker { +type ZoneSorter func(zones []string) []string + +func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int, zoneSorter ZoneSorter, logger log.Logger) *zoneAwareResultTracker { t := &zoneAwareResultTracker{ waitingByZone: make(map[string]int), failuresByZone: make(map[string]int), maxUnavailableZones: maxUnavailableZones, + zoneSorter: zoneSorter, logger: logger, } @@ -269,9 +273,21 @@ func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int t.minSuccessfulZones = 0 } + if t.zoneSorter == nil { + t.zoneSorter = defaultZoneSorter + } + return t } +func defaultZoneSorter(zones []string) []string { + rand.Shuffle(len(zones), func(i, j int) { + zones[i], zones[j] = zones[j], zones[i] + }) + + return zones +} + func (t *zoneAwareResultTracker) done(instance *InstanceDesc, err error) { t.waitingByZone[instance.Zone]-- @@ -338,9 +354,7 @@ func (t *zoneAwareResultTracker) startMinimumRequests() { allZones = append(allZones, zone) } - rand.Shuffle(len(allZones), func(i, j int) { - allZones[i], allZones[j] = allZones[j], allZones[i] - }) + allZones = t.zoneSorter(allZones) for i := 0; i < t.minSuccessfulZones; i++ { level.Debug(t.logger).Log("msg", "starting requests to zone", "reason", "initial requests", "zone", allZones[i]) @@ -396,7 +410,7 @@ func (t *zoneAwareResultTracker) releaseZone(zone string, shouldStart bool) { func (t *zoneAwareResultTracker) awaitStart(ctx context.Context, instance *InstanceDesc) error { select { case <-ctx.Done(): - return ctx.Err() + return context.Cause(ctx) case <-t.zoneRelease[instance.Zone]: if t.zoneShouldStart[instance.Zone].Load() { return nil @@ -408,18 +422,18 @@ func (t *zoneAwareResultTracker) awaitStart(ctx context.Context, instance *Insta type zoneAwareContextTracker struct { contexts map[*InstanceDesc]context.Context - cancelFuncs map[*InstanceDesc]context.CancelFunc + cancelFuncs map[*InstanceDesc]context.CancelCauseFunc } func newZoneAwareContextTracker(ctx context.Context, instances []InstanceDesc) *zoneAwareContextTracker { t := &zoneAwareContextTracker{ contexts: make(map[*InstanceDesc]context.Context, len(instances)), - cancelFuncs: make(map[*InstanceDesc]context.CancelFunc, len(instances)), + cancelFuncs: make(map[*InstanceDesc]context.CancelCauseFunc, len(instances)), } for i := range instances { instance := &instances[i] - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancelCause(ctx) t.contexts[instance] = ctx t.cancelFuncs[instance] = cancel } @@ -427,26 +441,26 @@ func newZoneAwareContextTracker(ctx context.Context, instances []InstanceDesc) * return t } -func (t *zoneAwareContextTracker) contextFor(instance *InstanceDesc) (context.Context, context.CancelFunc) { +func (t *zoneAwareContextTracker) contextFor(instance *InstanceDesc) (context.Context, context.CancelCauseFunc) { return t.contexts[instance], t.cancelFuncs[instance] } -func (t *zoneAwareContextTracker) cancelContextFor(instance *InstanceDesc) { +func (t *zoneAwareContextTracker) cancelContextFor(instance *InstanceDesc, cause error) { // Why not create a per-zone parent context to make this easier? // If we create a per-zone parent context, we'd need to have some way to cancel the per-zone context when the last of the individual // contexts in a zone are cancelled using the context.CancelFunc returned from contextFor. for i, cancel := range t.cancelFuncs { if i.Zone == instance.Zone { - cancel() + cancel(cause) delete(t.contexts, i) delete(t.cancelFuncs, i) } } } -func (t *zoneAwareContextTracker) cancelAllContexts() { +func (t *zoneAwareContextTracker) cancelAllContexts(cause error) { for instance, cancel := range t.cancelFuncs { - cancel() + cancel(cause) delete(t.contexts, instance) delete(t.cancelFuncs, instance) } diff --git a/vendor/github.com/grafana/dskit/ring/ring.go b/vendor/github.com/grafana/dskit/ring/ring.go index 947f3290ff0..0c54bb1c543 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.go +++ b/vendor/github.com/grafana/dskit/ring/ring.go @@ -75,6 +75,9 @@ type ReadRing interface { // CleanupShuffleShardCache should delete cached shuffle-shard subrings for given identifier. CleanupShuffleShardCache(identifier string) + + // GetTokenRangesForInstance returns the token ranges owned by an instance in the ring + GetTokenRangesForInstance(instanceID string) (TokenRanges, error) } var ( @@ -360,6 +363,26 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, return ReplicationSet{}, ErrEmptyRing } + instances, err := r.findInstancesForKey(key, op, bufDescs, bufHosts, bufZones, nil) + if err != nil { + return ReplicationSet{}, err + } + + healthyInstances, maxFailure, err := r.strategy.Filter(instances, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout, r.cfg.ZoneAwarenessEnabled) + if err != nil { + return ReplicationSet{}, err + } + + return ReplicationSet{ + Instances: healthyInstances, + MaxErrors: maxFailure, + }, nil +} + +// Returns instances for given key and operation. Instances are not filtered through ReplicationStrategy. +// InstanceFilter can ignore uninteresting instances that would otherwise be part of the output, and can also stop search early. +// This function needs to be called with read lock on the ring. +func (r *Ring) findInstancesForKey(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts []string, bufZones []string, instanceFilter func(instanceID string) (include, keepGoing bool)) ([]InstanceDesc, error) { var ( n = r.cfg.ReplicationFactor instances = bufDescs[:0] @@ -382,7 +405,7 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, info, ok := r.ringInstanceByToken[token] if !ok { // This should never happen unless a bug in the ring code. - return ReplicationSet{}, ErrInconsistentTokensInfo + return nil, ErrInconsistentTokensInfo } // We want n *distinct* instances && distinct zones. @@ -410,18 +433,18 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, distinctZones = append(distinctZones, info.Zone) } - instances = append(instances, instance) - } - - healthyInstances, maxFailure, err := r.strategy.Filter(instances, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout, r.cfg.ZoneAwarenessEnabled) - if err != nil { - return ReplicationSet{}, err + include, keepGoing := true, true + if instanceFilter != nil { + include, keepGoing = instanceFilter(info.InstanceID) + } + if include { + instances = append(instances, instance) + } + if !keepGoing { + break + } } - - return ReplicationSet{ - Instances: healthyInstances, - MaxErrors: maxFailure, - }, nil + return instances, nil } // GetAllHealthy implements ReadRing. @@ -1078,3 +1101,36 @@ func (op Operation) ShouldExtendReplicaSetOnState(s InstanceState) bool { // All states are healthy, no states extend replica set. var allStatesRingOperation = Operation(0x0000ffff) + +// numberOfKeysOwnedByInstance returns how many of the supplied keys are owned by given instance. +func (r *Ring) numberOfKeysOwnedByInstance(keys []uint32, op Operation, instanceID string, bufDescs []InstanceDesc, bufHosts []string, bufZones []string) (int, error) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.ringDesc == nil || len(r.ringTokens) == 0 { + return 0, ErrEmptyRing + } + + // Instance is not in this ring, it can't own any key. + if _, ok := r.ringDesc.Ingesters[instanceID]; !ok { + return 0, nil + } + + owned := 0 + for _, tok := range keys { + i, err := r.findInstancesForKey(tok, op, bufDescs, bufHosts, bufZones, func(foundInstanceID string) (include, keepGoing bool) { + if foundInstanceID == instanceID { + // If we've found our instance, we can stop. + return true, false + } + return false, true + }) + if err != nil { + return 0, err + } + if len(i) > 0 { + owned++ + } + } + return owned, nil +} diff --git a/vendor/github.com/grafana/dskit/ring/token_generator.go b/vendor/github.com/grafana/dskit/ring/token_generator.go index 159d9ffd6fd..93f0299547e 100644 --- a/vendor/github.com/grafana/dskit/ring/token_generator.go +++ b/vendor/github.com/grafana/dskit/ring/token_generator.go @@ -3,6 +3,7 @@ package ring import ( "math/rand" "sort" + "sync" "time" ) @@ -21,10 +22,17 @@ type TokenGenerator interface { CanJoinEnabled() bool } -type RandomTokenGenerator struct{} +type RandomTokenGenerator struct { + m sync.Mutex + r *rand.Rand +} func NewRandomTokenGenerator() *RandomTokenGenerator { - return &RandomTokenGenerator{} + return &RandomTokenGenerator{r: rand.New(rand.NewSource(time.Now().UnixNano()))} +} + +func NewRandomTokenGeneratorWithSeed(seed int64) *RandomTokenGenerator { + return &RandomTokenGenerator{r: rand.New(rand.NewSource(seed))} } // GenerateTokens generates at most requestedTokensCount unique random tokens, none of which clashes with @@ -35,8 +43,6 @@ func (t *RandomTokenGenerator) GenerateTokens(requestedTokensCount int, allTaken return []uint32{} } - r := rand.New(rand.NewSource(time.Now().UnixNano())) - used := make(map[uint32]bool, len(allTakenTokens)) for _, v := range allTakenTokens { used[v] = true @@ -44,7 +50,10 @@ func (t *RandomTokenGenerator) GenerateTokens(requestedTokensCount int, allTaken tokens := make([]uint32, 0, requestedTokensCount) for i := 0; i < requestedTokensCount; { - candidate := r.Uint32() + t.m.Lock() + candidate := t.r.Uint32() + t.m.Unlock() + if used[candidate] { continue } diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go index 2b54283df7f..b9d67ad51f3 100644 --- a/vendor/github.com/grafana/dskit/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -17,6 +17,8 @@ import ( "strings" "time" + _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" // anonymous import to get godelatprof handlers registered + gokit_log "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gorilla/mux" @@ -92,10 +94,11 @@ type Config struct { HTTPTLSConfig TLSConfig `yaml:"http_tls_config"` GRPCTLSConfig TLSConfig `yaml:"grpc_tls_config"` - RegisterInstrumentation bool `yaml:"register_instrumentation"` - ReportGRPCCodesInInstrumentationLabel bool `yaml:"report_grpc_codes_in_instrumentation_label_enabled"` - ExcludeRequestInLog bool `yaml:"-"` - DisableRequestSuccessLog bool `yaml:"-"` + RegisterInstrumentation bool `yaml:"register_instrumentation"` + ReportGRPCCodesInInstrumentationLabel bool `yaml:"report_grpc_codes_in_instrumentation_label_enabled"` + ReportHTTP4XXCodesInInstrumentationLabel bool `yaml:"-"` + ExcludeRequestInLog bool `yaml:"-"` + DisableRequestSuccessLog bool `yaml:"-"` ServerGracefulShutdownTimeout time.Duration `yaml:"graceful_shutdown_timeout"` HTTPServerReadTimeout time.Duration `yaml:"http_server_read_timeout"` @@ -350,6 +353,22 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { level.Info(logger).Log("msg", "server listening on addresses", "http", httpListener.Addr(), "grpc", grpcListener.Addr()) + // Setup HTTP server + var router *mux.Router + if cfg.Router != nil { + router = cfg.Router + } else { + router = mux.NewRouter() + } + if cfg.PathPrefix != "" { + // Expect metrics and pprof handlers to be prefixed with server's path prefix. + // e.g. /loki/metrics or /loki/debug/pprof + router = router.PathPrefix(cfg.PathPrefix).Subrouter() + } + if cfg.RegisterInstrumentation { + RegisterInstrumentationWithGatherer(router, gatherer) + } + // Setup gRPC server serverLog := middleware.GRPCServerLog{ Log: logger, @@ -363,6 +382,7 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpcMiddleware := []grpc.UnaryServerInterceptor{ serverLog.UnaryServerInterceptor, otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()), + middleware.HTTPGRPCTracingInterceptor(router), // This must appear after the OpenTracingServerInterceptor. middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), } grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...) @@ -419,55 +439,9 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpcServer := grpc.NewServer(grpcOptions...) grpcOnHTTPServer := grpc.NewServer(grpcOptions...) - // Setup HTTP server - var router *mux.Router - if cfg.Router != nil { - router = cfg.Router - } else { - router = mux.NewRouter() - } - if cfg.PathPrefix != "" { - // Expect metrics and pprof handlers to be prefixed with server's path prefix. - // e.g. /loki/metrics or /loki/debug/pprof - router = router.PathPrefix(cfg.PathPrefix).Subrouter() - } - if cfg.RegisterInstrumentation { - RegisterInstrumentationWithGatherer(router, gatherer) - } - - sourceIPs, err := middleware.NewSourceIPs(cfg.LogSourceIPsHeader, cfg.LogSourceIPsRegex) + httpMiddleware, err := BuildHTTPMiddleware(cfg, router, metrics, logger) if err != nil { - return nil, fmt.Errorf("error setting up source IP extraction: %v", err) - } - logSourceIPs := sourceIPs - if !cfg.LogSourceIPs { - // We always include the source IPs for traces, - // but only want to log them in the middleware if that is enabled. - logSourceIPs = nil - } - - defaultLogMiddleware := middleware.NewLogMiddleware(logger, cfg.LogRequestHeaders, cfg.LogRequestAtInfoLevel, logSourceIPs, strings.Split(cfg.LogRequestExcludeHeadersList, ",")) - defaultLogMiddleware.DisableRequestSuccessLog = cfg.DisableRequestSuccessLog - - defaultHTTPMiddleware := []middleware.Interface{ - middleware.Tracer{ - RouteMatcher: router, - SourceIPs: sourceIPs, - }, - defaultLogMiddleware, - middleware.Instrument{ - RouteMatcher: router, - Duration: metrics.RequestDuration, - RequestBodySize: metrics.ReceivedMessageSize, - ResponseBodySize: metrics.SentMessageSize, - InflightRequests: metrics.InflightRequests, - }, - } - var httpMiddleware []middleware.Interface - if cfg.DoNotAddDefaultHTTPMiddleware { - httpMiddleware = cfg.HTTPMiddleware - } else { - httpMiddleware = append(defaultHTTPMiddleware, cfg.HTTPMiddleware...) + return nil, fmt.Errorf("error building http middleware: %w", err) } httpServer := &http.Server{ @@ -517,6 +491,45 @@ func RegisterInstrumentationWithGatherer(router *mux.Router, gatherer prometheus router.PathPrefix("/debug/pprof").Handler(http.DefaultServeMux) } +func BuildHTTPMiddleware(cfg Config, router *mux.Router, metrics *Metrics, logger gokit_log.Logger) ([]middleware.Interface, error) { + sourceIPs, err := middleware.NewSourceIPs(cfg.LogSourceIPsHeader, cfg.LogSourceIPsRegex) + if err != nil { + return nil, fmt.Errorf("error setting up source IP extraction: %w", err) + } + logSourceIPs := sourceIPs + if !cfg.LogSourceIPs { + // We always include the source IPs for traces, + // but only want to log them in the middleware if that is enabled. + logSourceIPs = nil + } + + defaultLogMiddleware := middleware.NewLogMiddleware(logger, cfg.LogRequestHeaders, cfg.LogRequestAtInfoLevel, logSourceIPs, strings.Split(cfg.LogRequestExcludeHeadersList, ",")) + defaultLogMiddleware.DisableRequestSuccessLog = cfg.DisableRequestSuccessLog + + defaultHTTPMiddleware := []middleware.Interface{ + middleware.Tracer{ + RouteMatcher: router, + SourceIPs: sourceIPs, + }, + defaultLogMiddleware, + middleware.Instrument{ + RouteMatcher: router, + Duration: metrics.RequestDuration, + RequestBodySize: metrics.ReceivedMessageSize, + ResponseBodySize: metrics.SentMessageSize, + InflightRequests: metrics.InflightRequests, + }, + } + var httpMiddleware []middleware.Interface + if cfg.DoNotAddDefaultHTTPMiddleware { + httpMiddleware = cfg.HTTPMiddleware + } else { + httpMiddleware = append(defaultHTTPMiddleware, cfg.HTTPMiddleware...) + } + + return httpMiddleware, nil +} + // Run the server; blocks until SIGTERM (if signal handling is enabled), an error is received, or Stop() is called. func (s *Server) Run() error { errChan := make(chan error, 1) @@ -547,9 +560,12 @@ func (s *Server) Run() error { } }() - // Setup gRPC server - // for HTTP over gRPC, ensure we don't double-count the middleware - httpgrpc.RegisterHTTPServer(s.GRPC, httpgrpc_server.NewServer(s.HTTP)) + serverOptions := make([]httpgrpc_server.Option, 0, 1) + if s.cfg.ReportHTTP4XXCodesInInstrumentationLabel { + serverOptions = append(serverOptions, httpgrpc_server.WithReturn4XXErrors) + } + // Setup gRPC server for HTTP over gRPC, ensure we don't double-count the middleware + httpgrpc.RegisterHTTPServer(s.GRPC, httpgrpc_server.NewServer(s.HTTP, serverOptions...)) go func() { err := s.GRPC.Serve(s.grpcListener) diff --git a/vendor/github.com/grafana/dskit/tenant/resolver.go b/vendor/github.com/grafana/dskit/tenant/resolver.go index aa19d75bb4a..35e95b1c831 100644 --- a/vendor/github.com/grafana/dskit/tenant/resolver.go +++ b/vendor/github.com/grafana/dskit/tenant/resolver.go @@ -2,20 +2,11 @@ package tenant import ( "context" - "errors" - "net/http" "strings" "github.com/grafana/dskit/user" ) -var defaultResolver Resolver = NewSingleResolver() - -// WithDefaultResolver updates the resolver used for the package methods. -func WithDefaultResolver(r Resolver) { - defaultResolver = r -} - // TenantID returns exactly a single tenant ID from the context. It should be // used when a certain endpoint should only support exactly a single // tenant ID. It returns an error user.ErrNoOrgID if there is no tenant ID @@ -25,7 +16,16 @@ func WithDefaultResolver(r Resolver) { // //nolint:revive func TenantID(ctx context.Context) (string, error) { - return defaultResolver.TenantID(ctx) + orgIDs, err := TenantIDs(ctx) + if err != nil { + return "", err + } + + if len(orgIDs) > 1 { + return "", user.ErrTooManyOrgIDs + } + + return orgIDs[0], nil } // TenantIDs returns all tenant IDs from the context. It should return @@ -36,7 +36,20 @@ func TenantID(ctx context.Context) (string, error) { // //nolint:revive func TenantIDs(ctx context.Context) ([]string, error) { - return defaultResolver.TenantIDs(ctx) + //lint:ignore faillint wrapper around upstream method + orgID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + orgIDs := strings.Split(orgID, tenantIDsSeparator) + for _, id := range orgIDs { + if err := ValidTenantID(id); err != nil { + return nil, err + } + } + + return NormalizeTenantIDs(orgIDs), nil } type Resolver interface { @@ -52,109 +65,20 @@ type Resolver interface { TenantIDs(context.Context) ([]string, error) } -// NewSingleResolver creates a tenant resolver, which restricts all requests to -// be using a single tenant only. This allows a wider set of characters to be -// used within the tenant ID and should not impose a breaking change. -func NewSingleResolver() *SingleResolver { - return &SingleResolver{} -} - -type SingleResolver struct { -} - -// containsUnsafePathSegments will return true if the string is a directory -// reference like `.` and `..` or if any path separator character like `/` and -// `\` can be found. -func containsUnsafePathSegments(id string) bool { - // handle the relative reference to current and parent path. - if id == "." || id == ".." { - return true - } - - return strings.ContainsAny(id, "\\/") -} - -var errInvalidTenantID = errors.New("invalid tenant ID") - -func (t *SingleResolver) TenantID(ctx context.Context) (string, error) { - //lint:ignore faillint wrapper around upstream method - id, err := user.ExtractOrgID(ctx) - if err != nil { - return "", err - } - - if containsUnsafePathSegments(id) { - return "", errInvalidTenantID - } - - return id, nil -} - -func (t *SingleResolver) TenantIDs(ctx context.Context) ([]string, error) { - orgID, err := t.TenantID(ctx) - if err != nil { - return nil, err - } - return []string{orgID}, err -} - -type MultiResolver struct { -} +type MultiResolver struct{} // NewMultiResolver creates a tenant resolver, which allows request to have // multiple tenant ids submitted separated by a '|' character. This enforces // further limits on the character set allowed within tenants as detailed here: -// https://cortexmetrics.io/docs/guides/limitations/#tenant-id-naming) +// https://grafana.com/docs/mimir/latest/configure/about-tenant-ids/ func NewMultiResolver() *MultiResolver { return &MultiResolver{} } func (t *MultiResolver) TenantID(ctx context.Context) (string, error) { - orgIDs, err := t.TenantIDs(ctx) - if err != nil { - return "", err - } - - if len(orgIDs) > 1 { - return "", user.ErrTooManyOrgIDs - } - - return orgIDs[0], nil + return TenantID(ctx) } func (t *MultiResolver) TenantIDs(ctx context.Context) ([]string, error) { - //lint:ignore faillint wrapper around upstream method - orgID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, err - } - - orgIDs := strings.Split(orgID, tenantIDsLabelSeparator) - for _, orgID := range orgIDs { - if err := ValidTenantID(orgID); err != nil { - return nil, err - } - if containsUnsafePathSegments(orgID) { - return nil, errInvalidTenantID - } - } - - return NormalizeTenantIDs(orgIDs), nil -} - -// ExtractTenantIDFromHTTPRequest extracts a single TenantID through a given -// resolver directly from a HTTP request. -func ExtractTenantIDFromHTTPRequest(req *http.Request) (string, context.Context, error) { - //lint:ignore faillint wrapper around upstream method - _, ctx, err := user.ExtractOrgIDFromHTTPRequest(req) - if err != nil { - return "", nil, err - } - - tenantID, err := defaultResolver.TenantID(ctx) - if err != nil { - return "", nil, err - } - - return tenantID, ctx, nil + return TenantIDs(ctx) } diff --git a/vendor/github.com/grafana/dskit/tenant/tenant.go b/vendor/github.com/grafana/dskit/tenant/tenant.go index a5807500e52..4a89b57225a 100644 --- a/vendor/github.com/grafana/dskit/tenant/tenant.go +++ b/vendor/github.com/grafana/dskit/tenant/tenant.go @@ -4,14 +4,23 @@ import ( "context" "errors" "fmt" + "net/http" "sort" "strings" "github.com/grafana/dskit/user" ) +const ( + // MaxTenantIDLength is the max length of single tenant ID in bytes + MaxTenantIDLength = 150 + + tenantIDsSeparator = "|" +) + var ( - errTenantIDTooLong = errors.New("tenant ID is too long: max 150 characters") + errTenantIDTooLong = fmt.Errorf("tenant ID is too long: max %d characters", MaxTenantIDLength) + errUnsafeTenantID = errors.New("tenant ID is '.' or '..'") ) type errTenantIDUnsupportedCharacter struct { @@ -27,9 +36,7 @@ func (e *errTenantIDUnsupportedCharacter) Error() string { ) } -const tenantIDsLabelSeparator = "|" - -// NormalizeTenantIDs is creating a normalized form by sortiing and de-duplicating the list of tenantIDs +// NormalizeTenantIDs creates a normalized form by sorting and de-duplicating the list of tenantIDs func NormalizeTenantIDs(tenantIDs []string) []string { sort.Strings(tenantIDs) @@ -49,7 +56,7 @@ func NormalizeTenantIDs(tenantIDs []string) []string { return tenantIDs[0:posOut] } -// ValidTenantID +// ValidTenantID returns an error if the single tenant ID is invalid, nil otherwise func ValidTenantID(s string) error { // check if it contains invalid runes for pos, r := range s { @@ -61,19 +68,49 @@ func ValidTenantID(s string) error { } } - if len(s) > 150 { + if len(s) > MaxTenantIDLength { return errTenantIDTooLong } + if containsUnsafePathSegments(s) { + return errUnsafeTenantID + } + return nil } +// JoinTenantIDs returns all tenant IDs concatenated with the separator character `|` func JoinTenantIDs(tenantIDs []string) string { - return strings.Join(tenantIDs, tenantIDsLabelSeparator) + return strings.Join(tenantIDs, tenantIDsSeparator) +} + +// ExtractTenantIDFromHTTPRequest extracts a single tenant ID directly from a HTTP request. +func ExtractTenantIDFromHTTPRequest(req *http.Request) (string, context.Context, error) { + //lint:ignore faillint wrapper around upstream method + _, ctx, err := user.ExtractOrgIDFromHTTPRequest(req) + if err != nil { + return "", nil, err + } + + tenantID, err := TenantID(ctx) + if err != nil { + return "", nil, err + } + + return tenantID, ctx, nil +} + +// TenantIDsFromOrgID extracts different tenants from an orgID string value +// +// ignore stutter warning +// +//nolint:revive +func TenantIDsFromOrgID(orgID string) ([]string, error) { + return TenantIDs(user.InjectOrgID(context.TODO(), orgID)) } // this checks if a rune is supported in tenant IDs (according to -// https://cortexmetrics.io/docs/guides/limitations/#tenant-id-naming) +// https://grafana.com/docs/mimir/latest/configure/about-tenant-ids/ func isSupported(c rune) bool { // characters if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') { @@ -96,11 +133,8 @@ func isSupported(c rune) bool { c == ')' } -// TenantIDsFromOrgID extracts different tenants from an orgID string value -// -// ignore stutter warning -// -//nolint:revive -func TenantIDsFromOrgID(orgID string) ([]string, error) { - return TenantIDs(user.InjectOrgID(context.TODO(), orgID)) +// containsUnsafePathSegments will return true if the string is a directory +// reference like `.` and `..` +func containsUnsafePathSegments(id string) bool { + return id == "." || id == ".." } diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 43de4867758..7e83f583c00 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,14 @@ This package provides various compression algorithms. # changelog +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 65d777357aa..074018d8f94 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -212,7 +212,7 @@ func (s *Scratch) writeCount() error { previous0 bool charnum uint16 - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 // Write Table Size bitStream = uint32(tableLog - minTablelog) diff --git a/vendor/github.com/klauspost/compress/gzhttp/compress.go b/vendor/github.com/klauspost/compress/gzhttp/compress.go index 265e71c0622..289ae3e2ee8 100644 --- a/vendor/github.com/klauspost/compress/gzhttp/compress.go +++ b/vendor/github.com/klauspost/compress/gzhttp/compress.go @@ -335,7 +335,16 @@ func (w *GzipResponseWriter) Close() error { ce = w.Header().Get(contentEncoding) cr = w.Header().Get(contentRange) ) - // fmt.Println(len(w.buf) == 0, len(w.buf) < w.minSize, len(w.Header()[HeaderNoCompression]) != 0, ce != "", cr != "", !w.contentTypeFilter(ct)) + if ct == "" { + ct = http.DetectContentType(w.buf) + + // Handles the intended case of setting a nil Content-Type (as for http/server or http/fs) + // Set the header only if the key does not exist + if _, ok := w.Header()[contentType]; w.setContentType && !ok { + w.Header().Set(contentType, ct) + } + } + if len(w.buf) == 0 || len(w.buf) < w.minSize || len(w.Header()[HeaderNoCompression]) != 0 || ce != "" || cr != "" || !w.contentTypeFilter(ct) { // GZIP not triggered, write out regular response. return w.startPlain() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 858f8f43a56..c81a15357af 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) { if m.rep < 0 { ofc = ofCode(uint32(m.s-m.offset) + 3) } else { - ofc = ofCode(uint32(m.rep)) + ofc = ofCode(uint32(m.rep) & 3) } // Cost, excluding ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] @@ -227,7 +227,7 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if rep < 0 { + if true { // Extend candidate match backwards as far as possible. tMin := s - e.maxMatchOff if tMin < 0 { @@ -282,6 +282,7 @@ encodeLoop: // Load next and check... e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 // Look far ahead, unless we have a really long match already... if best.length < goodEnough { @@ -357,19 +358,16 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index old s + 1 -> s - 1 - index0 := s + 1 s = best.s + best.length - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } off := index0 + e.cur - for index0 < s { + for index0 < end { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -378,6 +376,7 @@ encodeLoop: off++ index0++ } + switch best.rep { case 2, 4 | 1: offset1, offset2 = offset2, offset1 @@ -386,12 +385,17 @@ encodeLoop: case 4 | 3: offset1, offset2, offset3 = offset1-1, offset1, offset2 } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } continue } // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. - index0 := s + 1 s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 @@ -419,19 +423,25 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) nextEmit = s - if s >= sLimit { - break encodeLoop + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 } - // Index old s + 1 -> s - 1 - for index0 < s { + off := index0 + e.cur + for index0 < end { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ + off++ + } + if s >= sLimit { + break encodeLoop } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 8582f31a7cc..20d25b0e052 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -145,7 +145,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched int32 + var matched, index0 int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -162,6 +162,7 @@ encodeLoop: off := s + e.cur e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -258,7 +259,6 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -498,15 +498,15 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 + off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 + off += 2 } cv = load6432(src, s) @@ -672,7 +672,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched int32 + var matched, index0 int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -691,6 +691,7 @@ encodeLoop: e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.markShortShardDirty(nextHashS) + index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -726,7 +727,6 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - index0 := s + repOff s += lenght + repOff nextEmit = s @@ -790,7 +790,6 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -1024,18 +1023,18 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 + off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 + off += 2 } cv = load6432(src, s) diff --git a/vendor/modules.txt b/vendor/modules.txt index 32b130bb1cc..0f6fc1b8bd2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -488,9 +488,10 @@ github.com/gorilla/mux # github.com/gorilla/websocket v1.5.0 ## explicit; go 1.12 github.com/gorilla/websocket -# github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f +# github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f => ../dskit ## explicit; go 1.20 github.com/grafana/dskit/backoff +github.com/grafana/dskit/cancellation github.com/grafana/dskit/concurrency github.com/grafana/dskit/crypto/tls github.com/grafana/dskit/dns @@ -537,6 +538,11 @@ github.com/grafana/e2e/images # github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 ## explicit; go 1.18 github.com/grafana/gomemcache/memcache +# github.com/grafana/pyroscope-go/godeltaprof v0.1.6 +## explicit; go 1.16 +github.com/grafana/pyroscope-go/godeltaprof +github.com/grafana/pyroscope-go/godeltaprof/http/pprof +github.com/grafana/pyroscope-go/godeltaprof/internal/pprof # github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db ## explicit; go 1.17 github.com/grafana/regexp @@ -750,8 +756,8 @@ github.com/jsternberg/zap-logfmt # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.17.2 -## explicit; go 1.18 +# github.com/klauspost/compress v1.17.3 +## explicit; go 1.19 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -1918,3 +1924,4 @@ gopkg.in/yaml.v3 # k8s.io/client-go => k8s.io/client-go v0.25.0 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91 # golang.org/x/net => golang.org/x/net v0.17.0 +# github.com/grafana/dskit => ../dskit From 7b7622db11ffbc6705758dad67757d26b8235a5c Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Fri, 5 Jan 2024 08:51:58 -0500 Subject: [PATCH 04/12] vendor Signed-off-by: Joe Elliott --- .../grafana/dskit/cancellation/error.go | 37 + .../grafana/dskit/ring/token_range.go | 153 ++++ .../pyroscope-go/godeltaprof/.gitignore | 1 + .../grafana/pyroscope-go/godeltaprof/LICENSE | 203 +++++ .../pyroscope-go/godeltaprof/README.md | 98 +++ .../grafana/pyroscope-go/godeltaprof/block.go | 119 +++ .../grafana/pyroscope-go/godeltaprof/heap.go | 81 ++ .../godeltaprof/http/pprof/pprof.go | 50 ++ .../godeltaprof/internal/pprof/delta_heap.go | 118 +++ .../godeltaprof/internal/pprof/delta_mutex.go | 59 ++ .../godeltaprof/internal/pprof/elf.go | 109 +++ .../godeltaprof/internal/pprof/gzip_go16.go | 18 + .../godeltaprof/internal/pprof/gzip_go17.go | 19 + .../godeltaprof/internal/pprof/map.go | 96 +++ .../internal/pprof/mutex_scale_go19.go | 27 + .../internal/pprof/mutex_scale_go20.go | 17 + .../godeltaprof/internal/pprof/proto.go | 715 ++++++++++++++++++ .../godeltaprof/internal/pprof/protobuf.go | 141 ++++ .../godeltaprof/internal/pprof/stub.go | 17 + .../godeltaprof/internal/pprof/stub_go20.go | 16 + .../godeltaprof/internal/pprof/stub_go21.go | 21 + .../grafana/pyroscope-go/godeltaprof/proto.go | 9 + 22 files changed, 2124 insertions(+) create mode 100644 vendor/github.com/grafana/dskit/cancellation/error.go create mode 100644 vendor/github.com/grafana/dskit/ring/token_range.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/.gitignore create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/LICENSE create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/README.md create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/block.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/heap.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/http/pprof/pprof.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_heap.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_mutex.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/elf.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go16.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go17.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/map.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go19.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go20.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/proto.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/protobuf.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub_go20.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub_go21.go create mode 100644 vendor/github.com/grafana/pyroscope-go/godeltaprof/proto.go diff --git a/vendor/github.com/grafana/dskit/cancellation/error.go b/vendor/github.com/grafana/dskit/cancellation/error.go new file mode 100644 index 00000000000..85fcbad43eb --- /dev/null +++ b/vendor/github.com/grafana/dskit/cancellation/error.go @@ -0,0 +1,37 @@ +package cancellation + +import ( + "context" + "fmt" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type cancellationError struct { + inner error +} + +func NewError(err error) error { + return cancellationError{err} +} + +func NewErrorf(format string, args ...any) error { + return NewError(fmt.Errorf(format, args...)) +} + +func (e cancellationError) Error() string { + return "context canceled: " + e.inner.Error() +} + +func (e cancellationError) Is(err error) bool { + return err == context.Canceled +} + +func (e cancellationError) Unwrap() error { + return e.inner +} + +func (e cancellationError) GRPCStatus() *status.Status { + return status.New(codes.Canceled, e.Error()) +} diff --git a/vendor/github.com/grafana/dskit/ring/token_range.go b/vendor/github.com/grafana/dskit/ring/token_range.go new file mode 100644 index 00000000000..1020ecd3326 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/token_range.go @@ -0,0 +1,153 @@ +package ring + +import ( + "math" + + "github.com/pkg/errors" + "golang.org/x/exp/slices" // using exp/slices until moving to go 1.21. +) + +// TokenRanges describes token ranges owned by an instance. +// It consists of [start, end] pairs, where both start and end are inclusive. +// For example TokenRanges with values [5, 10, 20, 30] covers tokens [5..10] and [20..30]. +type TokenRanges []uint32 + +func (tr TokenRanges) IncludesKey(key uint32) bool { + switch { + case len(tr) == 0: + return false + case key < tr[0]: + // key comes before the first range + return false + case key > tr[len(tr)-1]: + // key comes after the last range + return false + } + + index, found := slices.BinarySearch(tr, key) + switch { + case found: + // ranges are closed + return true + case index%2 == 1: + // hash would be inserted after the start of a range (even index) + return true + default: + return false + } +} + +func (tr TokenRanges) Equal(other TokenRanges) bool { + if len(tr) != len(other) { + return false + } + + for i := 0; i < len(tr); i++ { + if tr[i] != other[i] { + return false + } + } + + return true +} + +// GetTokenRangesForInstance returns the token ranges owned by an instance in the ring. +// +// Current implementation only works with multizone setup, where number of zones is equal to replication factor. +func (r *Ring) GetTokenRangesForInstance(instanceID string) (TokenRanges, error) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + instance, ok := r.ringDesc.Ingesters[instanceID] + if !ok { + return nil, ErrInstanceNotFound + } + if instance.Zone == "" { + return nil, errors.New("zone not set") + } + + rf := r.cfg.ReplicationFactor + numZones := len(r.ringTokensByZone) + + // To simplify computation of token ranges, we currently only support case where zone-awareness is enabled, + // and replicaction factor is equal to number of zones. + if !r.cfg.ZoneAwarenessEnabled || rf != numZones { + // if zoneAwareness is disabled we need to treat the whole ring as one big zone, and we would + // need to walk the ring backwards looking for RF-1 tokens from other instances to determine the range. + return nil, errors.New("can't use ring configuration for computing token ranges") + } + + // at this point zone-aware replication is enabled, and rf == numZones + // this means that we will write to one replica in each zone, so we can just consider the zonal ring for our instance + subringTokens, ok := r.ringTokensByZone[instance.Zone] + if !ok || len(subringTokens) == 0 { + return nil, errors.New("no tokens for zone") + } + + // 1 range (2 values) per token + one additional if we need to split the rollover range. + ranges := make(TokenRanges, 0, 2*(len(instance.Tokens)+1)) + // non-zero value means we're now looking for start of the range. Zero value means we're looking for next end of range (ie. token owned by this instance). + rangeEnd := uint32(0) + + // if this instance claimed the first token, it owns the wrap-around range, which we'll break into two separate ranges + firstToken := subringTokens[0] + firstTokenInfo, ok := r.ringInstanceByToken[firstToken] + if !ok { + // This should never happen unless there's a bug in the ring code. + return nil, ErrInconsistentTokensInfo + } + + if firstTokenInfo.InstanceID == instanceID { + // we'll start by looking for the beginning of the range that ends with math.MaxUint32 + rangeEnd = math.MaxUint32 + } + + // walk the ring backwards, alternating looking for ends and starts of ranges + for i := len(subringTokens) - 1; i > 0; i-- { + token := subringTokens[i] + info, ok := r.ringInstanceByToken[token] + if !ok { + // This should never happen unless a bug in the ring code. + return nil, ErrInconsistentTokensInfo + } + + if rangeEnd == 0 { + // we're looking for the end of the next range + if info.InstanceID == instanceID { + rangeEnd = token - 1 + } + } else { + // we have a range end, and are looking for the start of the range + if info.InstanceID != instanceID { + ranges = append(ranges, rangeEnd, token) + rangeEnd = 0 + } + } + } + + // finally look at the first token again + // - if we have a range end, check if we claimed token 0 + // - if we don't, we have our start + // - if we do, the start is 0 + // - if we don't have a range end, check if we claimed token 0 + // - if we don't, do nothing + // - if we do, add the range of [0, token-1] + // - BUT, if the token itself is 0, do nothing, because we don't own the tokens themselves (we should be covered by the already added range that ends with MaxUint32) + + if rangeEnd == 0 { + if firstTokenInfo.InstanceID == instanceID && firstToken != 0 { + ranges = append(ranges, firstToken-1, 0) + } + } else { + if firstTokenInfo.InstanceID == instanceID { + ranges = append(ranges, rangeEnd, 0) + } else { + ranges = append(ranges, rangeEnd, firstToken) + } + } + + // Ensure returned ranges are sorted. + slices.Sort(ranges) + + return ranges, nil +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/.gitignore b/vendor/github.com/grafana/pyroscope-go/godeltaprof/.gitignore new file mode 100644 index 00000000000..9f11b755a17 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/.gitignore @@ -0,0 +1 @@ +.idea/ diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/LICENSE b/vendor/github.com/grafana/pyroscope-go/godeltaprof/LICENSE new file mode 100644 index 00000000000..98c8148a6a2 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Pyroscope + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/README.md b/vendor/github.com/grafana/pyroscope-go/godeltaprof/README.md new file mode 100644 index 00000000000..fccda096f5e --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/README.md @@ -0,0 +1,98 @@ +# godeltaprof + +godeltaprof is an efficient delta profiler for memory, mutex, and block. + +# Why + +In Golang, allocation, mutex and block profiles are cumulative. They only grow over time and show allocations that happened since the beginning of the running program. +Not only values grow, but the size of the profile itself grows as well. It could grow up to megabytes in size for long-running processes. These megabytes profiles are called huge profiles in this document. + +In many cases, it's more useful to see the differences between two points in time. +You can use the original runtime/pprof package, called a delta profile, to see these differences. +Using the delta profile requires passing seconds argument to the pprof endpoint query. + +``` +go tool pprof http://localhost:6060/debug/pprof/heap?seconds=30 +``` + +What this does: +1. Dump profile `p0` +2. Sleep +3. Dump profile `p1` +4. Decompress and parse protobuf `p0` +5. Decompress and parse protobuf `p1` +6. Subtract `p0` from `p1` +7. Serialize protobuf and compress the result + +The resulting profile is *usually* much smaller (`p0` may be megabytes, while result is usually tens of kilobytes). + +There are number of issues with this approach: + +1. Heap profile contains both allocation values and in-use values. In-use values are not cumulative. In-use values are corrupted by the subtraction. + **Note:** It can be fixed if runtime/pprof package uses `p0.ScaleN([]float64{-1,-1,0,0})`, instead of `p0.Scale(-1)` - that would subtract allocation values and zero out in-use values in `p0`. +2. It requires dumping two profiles. +3. It produces a lot of allocations putting pressure on GC. + + +## DataDog's fastdelta + +DataDog's [fastdelta profiler](https://github.com/DataDog/dd-trace-go/blob/30e1406c2cb62af749df03d559853e1d1de0e3bf/profiler/internal/fastdelta/fd.go#L75) uses another approach. + +It improves the runtime/pprof approach by keeping a copy of the previous profile and subtracting the current profile from it. +The fastdelta profiler uses a custom protobuf pprof parser that doesn't allocate as much memory. +This approach is more efficient, faster, and produces less garbage. It also doesn't require using two profiles. +However, the fastdelta profiler still parses huge profiles up to megabytes, just to discard most of it. + +## godeltaprof + +godeltaprof does a similar job but slightly differently. + +Delta computation happens before serializing any pprof files using `runtime.MemprofileRecord` and `BlockProfileRecord`. +This way, huge profiles don't need to be parsed. The delta is computed on raw records, all zeros are rejected, and results are serialized and compressed. + +The source code for godeltaprof is based (forked) on the original [runtime/pprof package](https://github.com/golang/go/tree/master/src/runtime/pprof). +godeltaprof is modified to include delta computation before serialization and to expose the new endpoints. +There are other small improvements and benefits: +- Using `github.com/klauspost/compress/gzip` instead of `compress/gzip` +- Optional lazy mappings reading (they don't change over time for most applications) +- Separate package from runtime, so updated independently + +# benchmarks + +These benchmarks used memory profiles from the [pyroscope](https://github.com/grafana/pyroscope) server. + +BenchmarkOG - dumps memory profile with runtime/pprof package +BenchmarkFastDelta - dumps memory profile with runtime/pprof package and computes delta using fastdelta +BenchmarkGodeltaprof - does not dump profile with runtime/pprof, computes delta, outputs it results + +Each benchmark also outputs produced profile sizes. +``` +BenchmarkOG + 63 181862189 ns/op +profile sizes: [209117 209107 209077 209089 209095 209076 209088 209082 209090 209092] + +BenchmarkFastDelta + 43 273936764 ns/op +profile sizes: [169300 10815 8969 9511 9752 9376 9545 8959 10357 9536] + +BenchmarkGodeltaprof + 366 31148264 ns/op +profile sizes: [208898 11485 9347 9967 10291 9848 10085 9285 11033 9986] +``` + +Notice how BenchmarkOG profiles sizes are ~200k and BenchmarkGodeltaprof and BenchmarkFastDelta are ~10k - that is because a lof of samples +with zero values are discarded after delta computation. + +Source code of benchmarks could be found [here](https://github.com/grafana/pyroscope/compare/godeltaprofbench?expand=1) + +CPU profiles: [BenchmarkOG](https://flamegraph.com/share/a8f68312-98c7-11ee-a502-466f68d203a5), [BenchmarkFastDelta](https://flamegraph.com/share/c23821f3-98c7-11ee-a502-466f68d203a5), [BenchmarkGodeltaprof]( https://flamegraph.com/share/ea66df36-98c7-11ee-9a0d-f2c25703e557) + + + +# upstreaming + +TODO(korniltsev): create golang issue and ask if godeltaprof is something that could be considered merging to upstream golang repo +in some way(maybe not as is, maybe with different APIs) + + + diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/block.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/block.go new file mode 100644 index 00000000000..825130b7299 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/block.go @@ -0,0 +1,119 @@ +package godeltaprof + +import ( + "io" + "runtime" + "sort" + "sync" + + "github.com/grafana/pyroscope-go/godeltaprof/internal/pprof" +) + +// BlockProfiler is a stateful profiler for goroutine blocking events and mutex contention in Go programs. +// Depending on the function used to create the BlockProfiler, it uses either runtime.BlockProfile or runtime.MutexProfile. +// The BlockProfiler provides similar functionality to pprof.Lookup("block").WriteTo and pprof.Lookup("mutex").WriteTo, +// but with some key differences. +// +// The BlockProfiler tracks the delta of blocking events or mutex contention since the last +// profile was written, effectively providing a snapshot of the changes +// between two points in time. This is in contrast to the +// pprof.Lookup functions, which accumulate profiling data +// and result in profiles that represent the entire lifetime of the program. +// +// The BlockProfiler is safe for concurrent use, as it serializes access to +// its internal state using a sync.Mutex. This ensures that multiple goroutines +// can call the Profile method without causing any data race issues. +type BlockProfiler struct { + impl pprof.DeltaMutexProfiler + mutex sync.Mutex + runtimeProfile func([]runtime.BlockProfileRecord) (int, bool) + scaleProfile pprof.MutexProfileScaler +} + +// NewMutexProfiler creates a new BlockProfiler instance for profiling mutex contention. +// The resulting BlockProfiler uses runtime.MutexProfile as its data source. +// +// Usage: +// +// mp := godeltaprof.NewMutexProfiler() +// ... +// err := mp.Profile(someWriter) +func NewMutexProfiler() *BlockProfiler { + return &BlockProfiler{ + runtimeProfile: runtime.MutexProfile, + scaleProfile: pprof.ScalerMutexProfile, + impl: pprof.DeltaMutexProfiler{ + Options: pprof.ProfileBuilderOptions{ + GenericsFrames: true, + LazyMapping: true, + }, + }, + } +} + +func NewMutexProfilerWithOptions(options ProfileOptions) *BlockProfiler { + return &BlockProfiler{ + runtimeProfile: runtime.MutexProfile, + scaleProfile: pprof.ScalerMutexProfile, + impl: pprof.DeltaMutexProfiler{ + Options: pprof.ProfileBuilderOptions{ + GenericsFrames: options.GenericsFrames, + LazyMapping: options.LazyMappings, + }, + }, + } +} + +// NewBlockProfiler creates a new BlockProfiler instance for profiling goroutine blocking events. +// The resulting BlockProfiler uses runtime.BlockProfile as its data source. +// +// Usage: +// +// bp := godeltaprof.NewBlockProfiler() +// ... +// err := bp.Profile(someWriter) +func NewBlockProfiler() *BlockProfiler { + return &BlockProfiler{ + runtimeProfile: runtime.BlockProfile, + scaleProfile: pprof.ScalerBlockProfile, + impl: pprof.DeltaMutexProfiler{ + Options: pprof.ProfileBuilderOptions{ + GenericsFrames: true, + LazyMapping: true, + }, + }, + } +} + +func NewBlockProfilerWithOptions(options ProfileOptions) *BlockProfiler { + return &BlockProfiler{ + runtimeProfile: runtime.BlockProfile, + scaleProfile: pprof.ScalerBlockProfile, + impl: pprof.DeltaMutexProfiler{ + Options: pprof.ProfileBuilderOptions{ + GenericsFrames: options.GenericsFrames, + LazyMapping: options.LazyMappings, + }, + }, + } +} + +func (d *BlockProfiler) Profile(w io.Writer) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + var p []runtime.BlockProfileRecord + n, ok := d.runtimeProfile(nil) + for { + p = make([]runtime.BlockProfileRecord, n+50) + n, ok = d.runtimeProfile(p) + if ok { + p = p[:n] + break + } + } + + sort.Slice(p, func(i, j int) bool { return p[i].Cycles > p[j].Cycles }) + + return d.impl.PrintCountCycleProfile(w, "contentions", "delay", d.scaleProfile, p) +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/heap.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/heap.go new file mode 100644 index 00000000000..8f26755cbd7 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/heap.go @@ -0,0 +1,81 @@ +package godeltaprof + +import ( + "io" + "runtime" + "sync" + + "github.com/grafana/pyroscope-go/godeltaprof/internal/pprof" +) + +// HeapProfiler is a stateful profiler for heap allocations in Go programs. +// It is based on runtime.MemProfile and provides similar functionality to +// pprof.WriteHeapProfile, but with some key differences. +// +// The HeapProfiler tracks the delta of heap allocations since the last +// profile was written, effectively providing a snapshot of the changes +// in heap usage between two points in time. This is in contrast to the +// pprof.WriteHeapProfile function, which accumulates profiling data +// and results in profiles that represent the entire lifetime of the program. +// +// The HeapProfiler is safe for concurrent use, as it serializes access to +// its internal state using a sync.Mutex. This ensures that multiple goroutines +// can call the Profile method without causing any data race issues. +// +// Usage: +// +// hp := godeltaprof.NewHeapProfiler() +// ... +// err := hp.Profile(someWriter) +type HeapProfiler struct { + impl pprof.DeltaHeapProfiler + mutex sync.Mutex +} + +func NewHeapProfiler() *HeapProfiler { + return &HeapProfiler{ + impl: pprof.DeltaHeapProfiler{ + Options: pprof.ProfileBuilderOptions{ + GenericsFrames: true, + LazyMapping: true, + }, + }} +} + +func NewHeapProfilerWithOptions(options ProfileOptions) *HeapProfiler { + return &HeapProfiler{ + impl: pprof.DeltaHeapProfiler{ + Options: pprof.ProfileBuilderOptions{ + GenericsFrames: options.GenericsFrames, + LazyMapping: options.LazyMappings, + }, + }} +} + +func (d *HeapProfiler) Profile(w io.Writer) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + // Find out how many records there are (MemProfile(nil, true)), + // allocate that many records, and get the data. + // There's a race—more records might be added between + // the two calls—so allocate a few extra records for safety + // and also try again if we're very unlucky. + // The loop should only execute one iteration in the common case. + var p []runtime.MemProfileRecord + n, ok := runtime.MemProfile(nil, true) + for { + // Allocate room for a slightly bigger profile, + // in case a few more entries have been added + // since the call to MemProfile. + p = make([]runtime.MemProfileRecord, n+50) + n, ok = runtime.MemProfile(p, true) + if ok { + p = p[0:n] + break + } + // Profile grew; try again. + } + + return d.impl.WriteHeapProto(w, p, int64(runtime.MemProfileRate), "") +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/http/pprof/pprof.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/http/pprof/pprof.go new file mode 100644 index 00000000000..81331dff0b5 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/http/pprof/pprof.go @@ -0,0 +1,50 @@ +package pprof + +import ( + "fmt" + "io" + "net/http" + "runtime" + "strconv" + + "github.com/grafana/pyroscope-go/godeltaprof" +) + +var ( + deltaHeapProfiler = godeltaprof.NewHeapProfiler() + deltaBlockProfiler = godeltaprof.NewBlockProfiler() + deltaMutexProfiler = godeltaprof.NewMutexProfiler() +) + +type deltaProfiler interface { + Profile(w io.Writer) error +} + +func init() { + http.HandleFunc("/debug/pprof/delta_heap", Heap) + http.HandleFunc("/debug/pprof/delta_block", Block) + http.HandleFunc("/debug/pprof/delta_mutex", Mutex) +} + +func Heap(w http.ResponseWriter, r *http.Request) { + gc, _ := strconv.Atoi(r.FormValue("gc")) + if gc > 0 { + runtime.GC() + } + writeDeltaProfile(deltaHeapProfiler, "heap", w) +} + +func Block(w http.ResponseWriter, r *http.Request) { + writeDeltaProfile(deltaBlockProfiler, "block", w) +} + +func Mutex(w http.ResponseWriter, r *http.Request) { + writeDeltaProfile(deltaMutexProfiler, "mutex", w) +} + +func writeDeltaProfile(p deltaProfiler, name string, w http.ResponseWriter) { + w.Header().Set("X-Content-Type-Options", "nosniff") + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s.pprof.gz"`, name)) + _ = p.Profile(w) +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_heap.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_heap.go new file mode 100644 index 00000000000..47674a55daf --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_heap.go @@ -0,0 +1,118 @@ +package pprof + +import ( + "io" + "math" + "runtime" + "strings" +) + +type DeltaHeapProfiler struct { + m profMap + mem []memMap + Options ProfileBuilderOptions +} + +// WriteHeapProto writes the current heap profile in protobuf format to w. +func (d *DeltaHeapProfiler) WriteHeapProto(w io.Writer, p []runtime.MemProfileRecord, rate int64, defaultSampleType string) error { + if d.mem == nil || !d.Options.LazyMapping { + d.mem = readMapping() + } + b := newProfileBuilder(w, d.Options, d.mem) + b.pbValueType(tagProfile_PeriodType, "space", "bytes") + b.pb.int64Opt(tagProfile_Period, rate) + b.pbValueType(tagProfile_SampleType, "alloc_objects", "count") + b.pbValueType(tagProfile_SampleType, "alloc_space", "bytes") + b.pbValueType(tagProfile_SampleType, "inuse_objects", "count") + b.pbValueType(tagProfile_SampleType, "inuse_space", "bytes") + if defaultSampleType != "" { + b.pb.int64Opt(tagProfile_DefaultSampleType, b.stringIndex(defaultSampleType)) + } + + values := []int64{0, 0, 0, 0} + var locs []uint64 + for _, r := range p { + // do the delta + if r.AllocBytes == 0 && r.AllocObjects == 0 && r.FreeObjects == 0 && r.FreeBytes == 0 { + // it is a fresh bucket and it will be published after next 1-2 gc cycles + continue + } + var blockSize int64 + if r.AllocObjects > 0 { + blockSize = r.AllocBytes / r.AllocObjects + } + entry := d.m.Lookup(r.Stack(), uintptr(blockSize)) + + if (r.AllocObjects - entry.count.v1) < 0 { + continue + } + AllocObjects := r.AllocObjects - entry.count.v1 + AllocBytes := r.AllocBytes - entry.count.v2 + entry.count.v1 = r.AllocObjects + entry.count.v2 = r.AllocBytes + + values[0], values[1] = scaleHeapSample(AllocObjects, AllocBytes, rate) + values[2], values[3] = scaleHeapSample(r.InUseObjects(), r.InUseBytes(), rate) + + if values[0] == 0 && values[1] == 0 && values[2] == 0 && values[3] == 0 { + continue + } + + hideRuntime := true + for tries := 0; tries < 2; tries++ { + stk := r.Stack() + // For heap profiles, all stack + // addresses are return PCs, which is + // what appendLocsForStack expects. + if hideRuntime { + for i, addr := range stk { + if f := runtime.FuncForPC(addr); f != nil && strings.HasPrefix(f.Name(), "runtime.") { + continue + } + // Found non-runtime. Show any runtime uses above it. + stk = stk[i:] + break + } + } + locs = b.appendLocsForStack(locs[:0], stk) + if len(locs) > 0 { + break + } + hideRuntime = false // try again, and show all frames next time. + } + + b.pbSample(values, locs, func() { + if blockSize != 0 { + b.pbLabel(tagSample_Label, "bytes", "", blockSize) + } + }) + } + b.build() + return nil +} + +// scaleHeapSample adjusts the data from a heap Sample to +// account for its probability of appearing in the collected +// data. heap profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heap profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_mutex.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_mutex.go new file mode 100644 index 00000000000..40ae63ffeb2 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/delta_mutex.go @@ -0,0 +1,59 @@ +package pprof + +import ( + "io" + "runtime" +) + +type DeltaMutexProfiler struct { + m profMap + mem []memMap + Options ProfileBuilderOptions +} + +// PrintCountCycleProfile outputs block profile records (for block or mutex profiles) +// as the pprof-proto format output. Translations from cycle count to time duration +// are done because The proto expects count and time (nanoseconds) instead of count +// and the number of cycles for block, contention profiles. +// Possible 'scaler' functions are scaleBlockProfile and scaleMutexProfile. +func (d *DeltaMutexProfiler) PrintCountCycleProfile(w io.Writer, countName, cycleName string, scaler MutexProfileScaler, records []runtime.BlockProfileRecord) error { + if d.mem == nil || !d.Options.LazyMapping { + d.mem = readMapping() + } + // Output profile in protobuf form. + b := newProfileBuilder(w, d.Options, d.mem) + b.pbValueType(tagProfile_PeriodType, countName, "count") + b.pb.int64Opt(tagProfile_Period, 1) + b.pbValueType(tagProfile_SampleType, countName, "count") + b.pbValueType(tagProfile_SampleType, cycleName, "nanoseconds") + + cpuGHz := float64(runtime_cyclesPerSecond()) / 1e9 + + values := []int64{0, 0} + var locs []uint64 + for _, r := range records { + count, nanosec := ScaleMutexProfile(scaler, r.Count, float64(r.Cycles)/cpuGHz) + inanosec := int64(nanosec) + + // do the delta + entry := d.m.Lookup(r.Stack(), 0) + values[0] = count - entry.count.v1 + values[1] = inanosec - entry.count.v2 + entry.count.v1 = count + entry.count.v2 = inanosec + + if values[0] < 0 || values[1] < 0 { + continue + } + if values[0] == 0 && values[1] == 0 { + continue + } + + // For count profiles, all stack addresses are + // return PCs, which is what appendLocsForStack expects. + locs = b.appendLocsForStack(locs[:0], r.Stack()) + b.pbSample(values, locs, nil) + } + b.build() + return nil +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/elf.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/elf.go new file mode 100644 index 00000000000..a8b5ea68175 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/elf.go @@ -0,0 +1,109 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pprof + +import ( + "encoding/binary" + "errors" + "fmt" + "os" +) + +var ( + errBadELF = errors.New("malformed ELF binary") + errNoBuildID = errors.New("no NT_GNU_BUILD_ID found in ELF binary") +) + +// elfBuildID returns the GNU build ID of the named ELF binary, +// without introducing a dependency on debug/elf and its dependencies. +func elfBuildID(file string) (string, error) { + buf := make([]byte, 256) + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + if _, err := f.ReadAt(buf[:64], 0); err != nil { + return "", err + } + + // ELF file begins with \x7F E L F. + if buf[0] != 0x7F || buf[1] != 'E' || buf[2] != 'L' || buf[3] != 'F' { + return "", errBadELF + } + + var byteOrder binary.ByteOrder + switch buf[5] { + default: + return "", errBadELF + case 1: // little-endian + byteOrder = binary.LittleEndian + case 2: // big-endian + byteOrder = binary.BigEndian + } + + var shnum int + var shoff, shentsize int64 + switch buf[4] { + default: + return "", errBadELF + case 1: // 32-bit file header + shoff = int64(byteOrder.Uint32(buf[32:])) + shentsize = int64(byteOrder.Uint16(buf[46:])) + if shentsize != 40 { + return "", errBadELF + } + shnum = int(byteOrder.Uint16(buf[48:])) + case 2: // 64-bit file header + shoff = int64(byteOrder.Uint64(buf[40:])) + shentsize = int64(byteOrder.Uint16(buf[58:])) + if shentsize != 64 { + return "", errBadELF + } + shnum = int(byteOrder.Uint16(buf[60:])) + } + + for i := 0; i < shnum; i++ { + if _, err := f.ReadAt(buf[:shentsize], shoff+int64(i)*shentsize); err != nil { + return "", err + } + if typ := byteOrder.Uint32(buf[4:]); typ != 7 { // SHT_NOTE + continue + } + var off, size int64 + if shentsize == 40 { + // 32-bit section header + off = int64(byteOrder.Uint32(buf[16:])) + size = int64(byteOrder.Uint32(buf[20:])) + } else { + // 64-bit section header + off = int64(byteOrder.Uint64(buf[24:])) + size = int64(byteOrder.Uint64(buf[32:])) + } + size += off + for off < size { + if _, err := f.ReadAt(buf[:16], off); err != nil { // room for header + name GNU\x00 + return "", err + } + nameSize := int(byteOrder.Uint32(buf[0:])) + descSize := int(byteOrder.Uint32(buf[4:])) + noteType := int(byteOrder.Uint32(buf[8:])) + descOff := off + int64(12+(nameSize+3)&^3) + off = descOff + int64((descSize+3)&^3) + if nameSize != 4 || noteType != 3 || buf[12] != 'G' || buf[13] != 'N' || buf[14] != 'U' || buf[15] != '\x00' { // want name GNU\x00 type 3 (NT_GNU_BUILD_ID) + continue + } + if descSize > len(buf) { + return "", errBadELF + } + if _, err := f.ReadAt(buf[:descSize], descOff); err != nil { + return "", err + } + return fmt.Sprintf("%x", buf[:descSize]), nil + } + } + return "", errNoBuildID +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go16.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go16.go new file mode 100644 index 00000000000..4992f7bfd68 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go16.go @@ -0,0 +1,18 @@ +//go:build go1.16 && !go1.17 +// +build go1.16,!go1.17 + +package pprof + +import ( + "compress/gzip" + "io" +) + +type gzipWriter struct { + *gzip.Writer +} + +func newGzipWriter(w io.Writer) gzipWriter { + zw, _ := gzip.NewWriterLevel(w, gzip.BestSpeed) + return gzipWriter{zw} +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go17.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go17.go new file mode 100644 index 00000000000..a5a51c0fe9f --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/gzip_go17.go @@ -0,0 +1,19 @@ +//go:build go1.17 +// +build go1.17 + +package pprof + +import ( + "io" + + "github.com/klauspost/compress/gzip" +) + +type gzipWriter struct { + *gzip.Writer +} + +func newGzipWriter(w io.Writer) gzipWriter { + zw, _ := gzip.NewWriterLevel(w, gzip.BestSpeed) + return gzipWriter{zw} +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/map.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/map.go new file mode 100644 index 00000000000..188001ed162 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/map.go @@ -0,0 +1,96 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pprof + +import "unsafe" + +// A profMap is a map from (stack, tag) to mapEntry. +// It grows without bound, but that's assumed to be OK. +type profMap struct { + hash map[uintptr]*profMapEntry + all *profMapEntry + last *profMapEntry + free []profMapEntry + freeStk []uintptr +} + +type count struct { + // alloc_objects, alloc_bytes for heap + // mutex_count, mutex_duration for mutex + v1, v2 int64 +} + +// A profMapEntry is a single entry in the profMap. +type profMapEntry struct { + nextHash *profMapEntry // next in hash list + nextAll *profMapEntry // next in list of all entries + stk []uintptr + tag uintptr + count count +} + +func (m *profMap) Lookup(stk []uintptr, tag uintptr) *profMapEntry { + // Compute hash of (stk, tag). + h := uintptr(0) + for _, x := range stk { + h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1))) + h += uintptr(x) * 41 + } + h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1))) + h += uintptr(tag) * 41 + + // Find entry if present. + var last *profMapEntry +Search: + for e := m.hash[h]; e != nil; last, e = e, e.nextHash { + if len(e.stk) != len(stk) || e.tag != tag { + continue + } + for j := range stk { + if e.stk[j] != uintptr(stk[j]) { + continue Search + } + } + // Move to front. + if last != nil { + last.nextHash = e.nextHash + e.nextHash = m.hash[h] + m.hash[h] = e + } + return e + } + + // Add new entry. + if len(m.free) < 1 { + m.free = make([]profMapEntry, 128) + } + e := &m.free[0] + m.free = m.free[1:] + e.nextHash = m.hash[h] + e.tag = tag + + if len(m.freeStk) < len(stk) { + m.freeStk = make([]uintptr, 1024) + } + // Limit cap to prevent append from clobbering freeStk. + e.stk = m.freeStk[:len(stk):len(stk)] + m.freeStk = m.freeStk[len(stk):] + + for j := range stk { + e.stk[j] = uintptr(stk[j]) + } + if m.hash == nil { + m.hash = make(map[uintptr]*profMapEntry) + } + m.hash[h] = e + if m.all == nil { + m.all = e + m.last = e + } else { + m.last.nextAll = e + m.last = e + } + return e +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go19.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go19.go new file mode 100644 index 00000000000..349a9dad0ad --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go19.go @@ -0,0 +1,27 @@ +//go:build go1.16 && !go1.20 +// +build go1.16,!go1.20 + +package pprof + +import "runtime" + +type MutexProfileScaler struct { + f func(cnt int64, ns float64) (int64, float64) +} + +func ScaleMutexProfile(scaler MutexProfileScaler, cnt int64, ns float64) (int64, float64) { + return scaler.f(cnt, ns) +} + +var ScalerMutexProfile = MutexProfileScaler{func(cnt int64, ns float64) (int64, float64) { + period := runtime.SetMutexProfileFraction(-1) + return cnt * int64(period), ns * float64(period) +}} + +var ScalerBlockProfile = MutexProfileScaler{func(cnt int64, ns float64) (int64, float64) { + // Do nothing. + // The current way of block profile sampling makes it + // hard to compute the unsampled number. The legacy block + // profile parse doesn't attempt to scale or unsample. + return cnt, ns +}} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go20.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go20.go new file mode 100644 index 00000000000..fc5020777c2 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/mutex_scale_go20.go @@ -0,0 +1,17 @@ +//go:build go1.20 +// +build go1.20 + +package pprof + +type MutexProfileScaler struct { +} + +// ScaleMutexProfile is a no-op for go1.20+. +// https://github.com/golang/go/commit/30b1af00ff142a3f1a5e2a0f32cf04a649bd5e65 +func ScaleMutexProfile(_ MutexProfileScaler, cnt int64, ns float64) (int64, float64) { + return cnt, ns +} + +var ScalerMutexProfile = MutexProfileScaler{} + +var ScalerBlockProfile = MutexProfileScaler{} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/proto.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/proto.go new file mode 100644 index 00000000000..a75dceab181 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/proto.go @@ -0,0 +1,715 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pprof + +import ( + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +// lostProfileEvent is the function to which lost profiling +// events are attributed. +// (The name shows up in the pprof graphs.) +func lostProfileEvent() { lostProfileEvent() } + +type ProfileBuilderOptions struct { + // for go1.21+ if true - use runtime_FrameSymbolName - produces frames with generic types, for example [go.shape.int] + // for go1.21+ if false - use runtime.Frame->Function - produces frames with generic types ommited [...] + // pre 1.21 - always use runtime.Frame->Function - produces frames with generic types ommited [...] + GenericsFrames bool + LazyMapping bool +} + +// A profileBuilder writes a profile incrementally from a +// stream of profile samples delivered by the runtime. +type profileBuilder struct { + start time.Time + end time.Time + havePeriod bool + period int64 + + // encoding state + w io.Writer + zw gzipWriter + pb protobuf + strings []string + stringMap map[string]int + locs map[uintptr]locInfo // list of locInfo starting with the given PC. + funcs map[string]int // Package path-qualified function name to Function.ID + mem []memMap + deck pcDeck + + opt ProfileBuilderOptions +} + +type memMap struct { + // initialized as reading mapping + start uintptr // Address at which the binary (or DLL) is loaded into memory. + end uintptr // The limit of the address range occupied by this mapping. + offset uint64 // Offset in the binary that corresponds to the first mapped address. + file string // The object this entry is loaded from. + buildID string // A string that uniquely identifies a particular program version with high probability. + + funcs symbolizeFlag + fake bool // map entry was faked; /proc/self/maps wasn't available +} + +// symbolizeFlag keeps track of symbolization result. +// +// 0 : no symbol lookup was performed +// 1<<0 (lookupTried) : symbol lookup was performed +// 1<<1 (lookupFailed): symbol lookup was performed but failed +type symbolizeFlag uint8 + +const ( + lookupTried symbolizeFlag = 1 << iota + lookupFailed symbolizeFlag = 1 << iota +) + +const ( + // message Profile + tagProfile_SampleType = 1 // repeated ValueType + tagProfile_Sample = 2 // repeated Sample + tagProfile_Mapping = 3 // repeated Mapping + tagProfile_Location = 4 // repeated Location + tagProfile_Function = 5 // repeated Function + tagProfile_StringTable = 6 // repeated string + tagProfile_DropFrames = 7 // int64 (string table index) + tagProfile_KeepFrames = 8 // int64 (string table index) + tagProfile_TimeNanos = 9 // int64 + tagProfile_DurationNanos = 10 // int64 + tagProfile_PeriodType = 11 // ValueType (really optional string???) + tagProfile_Period = 12 // int64 + tagProfile_Comment = 13 // repeated int64 + tagProfile_DefaultSampleType = 14 // int64 + + // message ValueType + tagValueType_Type = 1 // int64 (string table index) + tagValueType_Unit = 2 // int64 (string table index) + + // message Sample + tagSample_Location = 1 // repeated uint64 + tagSample_Value = 2 // repeated int64 + tagSample_Label = 3 // repeated Label + + // message Label + tagLabel_Key = 1 // int64 (string table index) + tagLabel_Str = 2 // int64 (string table index) + tagLabel_Num = 3 // int64 + + // message Mapping + tagMapping_ID = 1 // uint64 + tagMapping_Start = 2 // uint64 + tagMapping_Limit = 3 // uint64 + tagMapping_Offset = 4 // uint64 + tagMapping_Filename = 5 // int64 (string table index) + tagMapping_BuildID = 6 // int64 (string table index) + tagMapping_HasFunctions = 7 // bool + tagMapping_HasFilenames = 8 // bool + tagMapping_HasLineNumbers = 9 // bool + tagMapping_HasInlineFrames = 10 // bool + + // message Location + tagLocation_ID = 1 // uint64 + tagLocation_MappingID = 2 // uint64 + tagLocation_Address = 3 // uint64 + tagLocation_Line = 4 // repeated Line + + // message Line + tagLine_FunctionID = 1 // uint64 + tagLine_Line = 2 // int64 + + // message Function + tagFunction_ID = 1 // uint64 + tagFunction_Name = 2 // int64 (string table index) + tagFunction_SystemName = 3 // int64 (string table index) + tagFunction_Filename = 4 // int64 (string table index) + tagFunction_StartLine = 5 // int64 +) + +// stringIndex adds s to the string table if not already present +// and returns the index of s in the string table. +func (b *profileBuilder) stringIndex(s string) int64 { + id, ok := b.stringMap[s] + if !ok { + id = len(b.strings) + b.strings = append(b.strings, s) + b.stringMap[s] = id + } + return int64(id) +} + +func (b *profileBuilder) flush() { + const dataFlush = 4096 + if b.pb.nest == 0 && len(b.pb.data) > dataFlush { + b.zw.Write(b.pb.data) + b.pb.data = b.pb.data[:0] + } +} + +// pbValueType encodes a ValueType message to b.pb. +func (b *profileBuilder) pbValueType(tag int, typ, unit string) { + start := b.pb.startMessage() + b.pb.int64(tagValueType_Type, b.stringIndex(typ)) + b.pb.int64(tagValueType_Unit, b.stringIndex(unit)) + b.pb.endMessage(tag, start) +} + +// pbSample encodes a Sample message to b.pb. +func (b *profileBuilder) pbSample(values []int64, locs []uint64, labels func()) { + start := b.pb.startMessage() + b.pb.int64s(tagSample_Value, values) + b.pb.uint64s(tagSample_Location, locs) + if labels != nil { + labels() + } + b.pb.endMessage(tagProfile_Sample, start) + b.flush() +} + +// pbLabel encodes a Label message to b.pb. +func (b *profileBuilder) pbLabel(tag int, key, str string, num int64) { + start := b.pb.startMessage() + b.pb.int64Opt(tagLabel_Key, b.stringIndex(key)) + b.pb.int64Opt(tagLabel_Str, b.stringIndex(str)) + b.pb.int64Opt(tagLabel_Num, num) + b.pb.endMessage(tag, start) +} + +// pbLine encodes a Line message to b.pb. +func (b *profileBuilder) pbLine(tag int, funcID uint64, line int64) { + start := b.pb.startMessage() + b.pb.uint64Opt(tagLine_FunctionID, funcID) + b.pb.int64Opt(tagLine_Line, line) + b.pb.endMessage(tag, start) +} + +// pbMapping encodes a Mapping message to b.pb. +func (b *profileBuilder) pbMapping(tag int, id, base, limit, offset uint64, file, buildID string, hasFuncs bool) { + start := b.pb.startMessage() + b.pb.uint64Opt(tagMapping_ID, id) + b.pb.uint64Opt(tagMapping_Start, base) + b.pb.uint64Opt(tagMapping_Limit, limit) + b.pb.uint64Opt(tagMapping_Offset, offset) + b.pb.int64Opt(tagMapping_Filename, b.stringIndex(file)) + b.pb.int64Opt(tagMapping_BuildID, b.stringIndex(buildID)) + // TODO: we set HasFunctions if all symbols from samples were symbolized (hasFuncs). + // Decide what to do about HasInlineFrames and HasLineNumbers. + // Also, another approach to handle the mapping entry with + // incomplete symbolization results is to dupliace the mapping + // entry (but with different Has* fields values) and use + // different entries for symbolized locations and unsymbolized locations. + if hasFuncs { + b.pb.bool(tagMapping_HasFunctions, true) + } + b.pb.endMessage(tag, start) +} + +func allFrames(addr uintptr) ([]runtime.Frame, symbolizeFlag) { + // Expand this one address using CallersFrames so we can cache + // each expansion. In general, CallersFrames takes a whole + // stack, but in this case we know there will be no skips in + // the stack and we have return PCs anyway. + frames := runtime.CallersFrames([]uintptr{addr}) + frame, more := frames.Next() + if frame.Function == "runtime.goexit" { + // Short-circuit if we see runtime.goexit so the loop + // below doesn't allocate a useless empty location. + return nil, 0 + } + + symbolizeResult := lookupTried + if frame.PC == 0 || frame.Function == "" || frame.File == "" || frame.Line == 0 { + symbolizeResult |= lookupFailed + } + + if frame.PC == 0 { + // If we failed to resolve the frame, at least make up + // a reasonable call PC. This mostly happens in tests. + frame.PC = addr - 1 + } + ret := []runtime.Frame{frame} + for frame.Function != "runtime.goexit" && more { + frame, more = frames.Next() + ret = append(ret, frame) + } + return ret, symbolizeResult +} + +type locInfo struct { + // location id assigned by the profileBuilder + id uint64 + + // sequence of PCs, including the fake PCs returned by the traceback + // to represent inlined functions + // https://github.com/golang/go/blob/d6f2f833c93a41ec1c68e49804b8387a06b131c5/src/runtime/traceback.go#L347-L368 + pcs []uintptr + + // firstPCFrames and firstPCSymbolizeResult hold the results of the + // allFrames call for the first (leaf-most) PC this locInfo represents + firstPCFrames []runtime.Frame + firstPCSymbolizeResult symbolizeFlag +} + +// newProfileBuilder returns a new profileBuilder. +// CPU profiling data obtained from the runtime can be added +// by calling b.addCPUData, and then the eventual profile +// can be obtained by calling b.finish. +func newProfileBuilder(w io.Writer, opt ProfileBuilderOptions, mapping []memMap) *profileBuilder { + zw := newGzipWriter(w) + b := &profileBuilder{ + w: w, + zw: zw, + start: time.Now(), + strings: []string{""}, + stringMap: map[string]int{"": 0}, + locs: map[uintptr]locInfo{}, + funcs: map[string]int{}, + opt: opt, + } + b.mem = mapping + return b +} + +// build completes and returns the constructed profile. +func (b *profileBuilder) build() { + b.end = time.Now() + + b.pb.int64Opt(tagProfile_TimeNanos, b.start.UnixNano()) + if b.havePeriod { // must be CPU profile + b.pbValueType(tagProfile_SampleType, "samples", "count") + b.pbValueType(tagProfile_SampleType, "cpu", "nanoseconds") + b.pb.int64Opt(tagProfile_DurationNanos, b.end.Sub(b.start).Nanoseconds()) + b.pbValueType(tagProfile_PeriodType, "cpu", "nanoseconds") + b.pb.int64Opt(tagProfile_Period, b.period) + } + + for i, m := range b.mem { + hasFunctions := m.funcs == lookupTried // lookupTried but not lookupFailed + b.pbMapping(tagProfile_Mapping, uint64(i+1), uint64(m.start), uint64(m.end), m.offset, m.file, m.buildID, hasFunctions) + } + + // TODO: Anything for tagProfile_DropFrames? + // TODO: Anything for tagProfile_KeepFrames? + + b.pb.strings(tagProfile_StringTable, b.strings) + b.zw.Write(b.pb.data) + b.zw.Close() +} + +// appendLocsForStack appends the location IDs for the given stack trace to the given +// location ID slice, locs. The addresses in the stack are return PCs or 1 + the PC of +// an inline marker as the runtime traceback function returns. +// +// It may return an empty slice even if locs is non-empty, for example if locs consists +// solely of runtime.goexit. We still count these empty stacks in profiles in order to +// get the right cumulative sample count. +// +// It may emit to b.pb, so there must be no message encoding in progress. +func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) { + b.deck.reset() + + // The last frame might be truncated. Recover lost inline frames. + stk = runtime_expandFinalInlineFrame(stk) + + for len(stk) > 0 { + addr := stk[0] + if l, ok := b.locs[addr]; ok { + // When generating code for an inlined function, the compiler adds + // NOP instructions to the outermost function as a placeholder for + // each layer of inlining. When the runtime generates tracebacks for + // stacks that include inlined functions, it uses the addresses of + // those NOPs as "fake" PCs on the stack as if they were regular + // function call sites. But if a profiling signal arrives while the + // CPU is executing one of those NOPs, its PC will show up as a leaf + // in the profile with its own Location entry. So, always check + // whether addr is a "fake" PC in the context of the current call + // stack by trying to add it to the inlining deck before assuming + // that the deck is complete. + if len(b.deck.pcs) > 0 { + if added := b.deck.tryAdd(addr, l.firstPCFrames, l.firstPCSymbolizeResult); added { + stk = stk[1:] + continue + } + } + + // first record the location if there is any pending accumulated info. + if id := b.emitLocation(); id > 0 { + locs = append(locs, id) + } + + // then, record the cached location. + locs = append(locs, l.id) + + // Skip the matching pcs. + // + // Even if stk was truncated due to the stack depth + // limit, expandFinalInlineFrame above has already + // fixed the truncation, ensuring it is long enough. + stk = stk[len(l.pcs):] + continue + } + + frames, symbolizeResult := allFrames(addr) + if len(frames) == 0 { // runtime.goexit. + if id := b.emitLocation(); id > 0 { + locs = append(locs, id) + } + stk = stk[1:] + continue + } + + if added := b.deck.tryAdd(addr, frames, symbolizeResult); added { + stk = stk[1:] + continue + } + // add failed because this addr is not inlined with the + // existing PCs in the deck. Flush the deck and retry handling + // this pc. + if id := b.emitLocation(); id > 0 { + locs = append(locs, id) + } + + // check cache again - previous emitLocation added a new entry + if l, ok := b.locs[addr]; ok { + locs = append(locs, l.id) + stk = stk[len(l.pcs):] // skip the matching pcs. + } else { + b.deck.tryAdd(addr, frames, symbolizeResult) // must succeed. + stk = stk[1:] + } + } + if id := b.emitLocation(); id > 0 { // emit remaining location. + locs = append(locs, id) + } + return locs +} + +// Here's an example of how Go 1.17 writes out inlined functions, compiled for +// linux/amd64. The disassembly of main.main shows two levels of inlining: main +// calls b, b calls a, a does some work. +// +// inline.go:9 0x4553ec 90 NOPL // func main() { b(v) } +// inline.go:6 0x4553ed 90 NOPL // func b(v *int) { a(v) } +// inline.go:5 0x4553ee 48c7002a000000 MOVQ $0x2a, 0(AX) // func a(v *int) { *v = 42 } +// +// If a profiling signal arrives while executing the MOVQ at 0x4553ee (for line +// 5), the runtime will report the stack as the MOVQ frame being called by the +// NOPL at 0x4553ed (for line 6) being called by the NOPL at 0x4553ec (for line +// 9). +// +// The role of pcDeck is to collapse those three frames back into a single +// location at 0x4553ee, with file/line/function symbolization info representing +// the three layers of calls. It does that via sequential calls to pcDeck.tryAdd +// starting with the leaf-most address. The fourth call to pcDeck.tryAdd will be +// for the caller of main.main. Because main.main was not inlined in its caller, +// the deck will reject the addition, and the fourth PC on the stack will get +// its own location. + +// pcDeck is a helper to detect a sequence of inlined functions from +// a stack trace returned by the runtime. +// +// The stack traces returned by runtime's trackback functions are fully +// expanded (at least for Go functions) and include the fake pcs representing +// inlined functions. The profile proto expects the inlined functions to be +// encoded in one Location message. +// https://github.com/google/pprof/blob/5e965273ee43930341d897407202dd5e10e952cb/proto/profile.proto#L177-L184 +// +// Runtime does not directly expose whether a frame is for an inlined function +// and looking up debug info is not ideal, so we use a heuristic to filter +// the fake pcs and restore the inlined and entry functions. Inlined functions +// have the following properties: +// +// Frame's Func is nil (note: also true for non-Go functions), and +// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and +// Frame's Name does not match its entry function frame's name (note: inlined functions cannot be directly recursive). +// +// As reading and processing the pcs in a stack trace one by one (from leaf to the root), +// we use pcDeck to temporarily hold the observed pcs and their expanded frames +// until we observe the entry function frame. +type pcDeck struct { + pcs []uintptr + frames []runtime.Frame + symbolizeResult symbolizeFlag + + // firstPCFrames indicates the number of frames associated with the first + // (leaf-most) PC in the deck + firstPCFrames int + // firstPCSymbolizeResult holds the results of the allFrames call for the + // first (leaf-most) PC in the deck + firstPCSymbolizeResult symbolizeFlag +} + +func (d *pcDeck) reset() { + d.pcs = d.pcs[:0] + d.frames = d.frames[:0] + d.symbolizeResult = 0 + d.firstPCFrames = 0 + d.firstPCSymbolizeResult = 0 +} + +// tryAdd tries to add the pc and Frames expanded from it (most likely one, +// since the stack trace is already fully expanded) and the symbolizeResult +// to the deck. If it fails the caller needs to flush the deck and retry. +func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) { + if existing := len(d.frames); existing > 0 { + // 'd.frames' are all expanded from one 'pc' and represent all + // inlined functions so we check only the last one. + newFrame := frames[0] + last := d.frames[existing-1] + if last.Func != nil { // the last frame can't be inlined. Flush. + return false + } + if last.Entry == 0 || newFrame.Entry == 0 { // Possibly not a Go function. Don't try to merge. + return false + } + + if last.Entry != newFrame.Entry { // newFrame is for a different function. + return false + } + if last.Function == newFrame.Function { // maybe recursion. + return false + } + } + d.pcs = append(d.pcs, pc) + d.frames = append(d.frames, frames...) + d.symbolizeResult |= symbolizeResult + if len(d.pcs) == 1 { + d.firstPCFrames = len(d.frames) + d.firstPCSymbolizeResult = symbolizeResult + } + return true +} + +// emitLocation emits the new location and function information recorded in the deck +// and returns the location ID encoded in the profile protobuf. +// It emits to b.pb, so there must be no message encoding in progress. +// It resets the deck. +func (b *profileBuilder) emitLocation() uint64 { + if len(b.deck.pcs) == 0 { + return 0 + } + defer b.deck.reset() + + addr := b.deck.pcs[0] + firstFrame := b.deck.frames[0] + + // We can't write out functions while in the middle of the + // Location message, so record new functions we encounter and + // write them out after the Location. + type newFunc struct { + id uint64 + name, file string + startLine int64 + } + newFuncs := make([]newFunc, 0, 8) + + id := uint64(len(b.locs)) + 1 + b.locs[addr] = locInfo{ + id: id, + pcs: append([]uintptr{}, b.deck.pcs...), + firstPCSymbolizeResult: b.deck.firstPCSymbolizeResult, + firstPCFrames: append([]runtime.Frame{}, b.deck.frames[:b.deck.firstPCFrames]...), + } + + start := b.pb.startMessage() + b.pb.uint64Opt(tagLocation_ID, id) + b.pb.uint64Opt(tagLocation_Address, uint64(firstFrame.PC)) + for _, frame := range b.deck.frames { + // Write out each line in frame expansion. + funcID := uint64(b.funcs[frame.Function]) + if funcID == 0 { + funcID = uint64(len(b.funcs)) + 1 + b.funcs[frame.Function] = int(funcID) + var name string + if b.opt.GenericsFrames { + name = runtime_FrameSymbolName(&frame) + } else { + name = frame.Function + } + newFuncs = append(newFuncs, newFunc{ + id: funcID, + name: name, + file: frame.File, + startLine: int64(runtime_FrameStartLine(&frame)), + }) + } + b.pbLine(tagLocation_Line, funcID, int64(frame.Line)) + } + for i := range b.mem { + if b.mem[i].start <= addr && addr < b.mem[i].end || b.mem[i].fake { + b.pb.uint64Opt(tagLocation_MappingID, uint64(i+1)) + + m := b.mem[i] + m.funcs |= b.deck.symbolizeResult + b.mem[i] = m + break + } + } + b.pb.endMessage(tagProfile_Location, start) + + // Write out functions we found during frame expansion. + for _, fn := range newFuncs { + start := b.pb.startMessage() + b.pb.uint64Opt(tagFunction_ID, fn.id) + b.pb.int64Opt(tagFunction_Name, b.stringIndex(fn.name)) + b.pb.int64Opt(tagFunction_SystemName, b.stringIndex(fn.name)) + b.pb.int64Opt(tagFunction_Filename, b.stringIndex(fn.file)) + b.pb.int64Opt(tagFunction_StartLine, fn.startLine) + b.pb.endMessage(tagProfile_Function, start) + } + + b.flush() + return id +} + +func readMapping() []memMap { + data, _ := os.ReadFile("/proc/self/maps") + var mem []memMap + parseProcSelfMaps(data, func(lo, hi, offset uint64, file, buildID string) { + mem = append(mem, memMap{ + start: uintptr(lo), + end: uintptr(hi), + offset: offset, + file: file, + buildID: buildID, + fake: false, + }) + }) + if len(mem) == 0 { // pprof expects a map entry, so fake one. + mem = []memMap{{ + start: uintptr(0), + end: uintptr(0), + offset: 0, + file: "", + buildID: "", + fake: true, + }} + } + return mem +} + +var space = []byte(" ") +var newline = []byte("\n") + +func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, buildID string)) { + // $ cat /proc/self/maps + // 00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat + // 0060a000-0060b000 r--p 0000a000 fc:01 787766 /bin/cat + // 0060b000-0060c000 rw-p 0000b000 fc:01 787766 /bin/cat + // 014ab000-014cc000 rw-p 00000000 00:00 0 [heap] + // 7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064 /usr/lib/locale/locale-archive + // 7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so + // 7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so + // 7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so + // 7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so + // 7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0 + // 7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so + // 7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0 + // 7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0 + // 7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so + // 7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so + // 7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0 + // 7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0 [stack] + // 7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0 [vdso] + // ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] + + var line []byte + // next removes and returns the next field in the line. + // It also removes from line any spaces following the field. + next := func() []byte { + var f []byte + f, line, _ = bytesCut(line, space) + line = bytes.TrimLeft(line, " ") + return f + } + + for len(data) > 0 { + line, data, _ = bytesCut(data, newline) + addr := next() + loStr, hiStr, ok := stringsCut(string(addr), "-") + if !ok { + continue + } + lo, err := strconv.ParseUint(loStr, 16, 64) + if err != nil { + continue + } + hi, err := strconv.ParseUint(hiStr, 16, 64) + if err != nil { + continue + } + perm := next() + if len(perm) < 4 || perm[2] != 'x' { + // Only interested in executable mappings. + continue + } + offset, err := strconv.ParseUint(string(next()), 16, 64) + if err != nil { + continue + } + next() // dev + inode := next() // inode + if line == nil { + continue + } + file := string(line) + + // Trim deleted file marker. + deletedStr := " (deleted)" + deletedLen := len(deletedStr) + if len(file) >= deletedLen && file[len(file)-deletedLen:] == deletedStr { + file = file[:len(file)-deletedLen] + } + + if len(inode) == 1 && inode[0] == '0' && file == "" { + // Huge-page text mappings list the initial fragment of + // mapped but unpopulated memory as being inode 0. + // Don't report that part. + // But [vdso] and [vsyscall] are inode 0, so let non-empty file names through. + continue + } + + // TODO: pprof's remapMappingIDs makes one adjustment: + // 1. If there is an /anon_hugepage mapping first and it is + // consecutive to a next mapping, drop the /anon_hugepage. + // There's no indication why this is needed. + // Let's try not doing this and see what breaks. + // If we do need it, it would go here, before we + // enter the mappings into b.mem in the first place. + + buildID, _ := elfBuildID(file) + addMapping(lo, hi, offset, file, buildID) + } +} + +// Cut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, nil, false. +// +// Cut returns slices of the original slice s, not copies. +func bytesCut(s, sep []byte) (before, after []byte, found bool) { + if i := bytes.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, nil, false +} + +// Cut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/protobuf.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/protobuf.go new file mode 100644 index 00000000000..7b99095a13a --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/protobuf.go @@ -0,0 +1,141 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pprof + +// A protobuf is a simple protocol buffer encoder. +type protobuf struct { + data []byte + tmp [16]byte + nest int +} + +func (b *protobuf) varint(x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func (b *protobuf) length(tag int, len int) { + b.varint(uint64(tag)<<3 | 2) + b.varint(uint64(len)) +} + +func (b *protobuf) uint64(tag int, x uint64) { + // append varint to b.data + b.varint(uint64(tag)<<3 | 0) + b.varint(x) +} + +func (b *protobuf) uint64s(tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + b.varint(u) + } + n2 := len(b.data) + b.length(tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + b.uint64(tag, u) + } +} + +func (b *protobuf) uint64Opt(tag int, x uint64) { + if x == 0 { + return + } + b.uint64(tag, x) +} + +func (b *protobuf) int64(tag int, x int64) { + u := uint64(x) + b.uint64(tag, u) +} + +func (b *protobuf) int64Opt(tag int, x int64) { + if x == 0 { + return + } + b.int64(tag, x) +} + +func (b *protobuf) int64s(tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + b.varint(uint64(u)) + } + n2 := len(b.data) + b.length(tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + b.int64(tag, u) + } +} + +func (b *protobuf) string(tag int, x string) { + b.length(tag, len(x)) + b.data = append(b.data, x...) +} + +func (b *protobuf) strings(tag int, x []string) { + for _, s := range x { + b.string(tag, s) + } +} + +func (b *protobuf) stringOpt(tag int, x string) { + if x == "" { + return + } + b.string(tag, x) +} + +func (b *protobuf) bool(tag int, x bool) { + if x { + b.uint64(tag, 1) + } else { + b.uint64(tag, 0) + } +} + +func (b *protobuf) boolOpt(tag int, x bool) { + if x == false { + return + } + b.bool(tag, x) +} + +type msgOffset int + +func (b *protobuf) startMessage() msgOffset { + b.nest++ + return msgOffset(len(b.data)) +} + +func (b *protobuf) endMessage(tag int, start msgOffset) { + n1 := int(start) + n2 := len(b.data) + b.length(tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + b.nest-- +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub.go new file mode 100644 index 00000000000..c617015ecd3 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub.go @@ -0,0 +1,17 @@ +//go:build go1.16 && !go1.23 +// +build go1.16,!go1.23 + +package pprof + +// unsafe is required for go:linkname +import _ "unsafe" + +//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame +func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr + +//go:linkname runtime_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond +func runtime_cyclesPerSecond() int64 + +func Runtime_cyclesPerSecond() int64 { + return runtime_cyclesPerSecond() +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub_go20.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub_go20.go new file mode 100644 index 00000000000..d271cbc0ba7 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub_go20.go @@ -0,0 +1,16 @@ +//go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 + +package pprof + +import "runtime" + +// runtime_FrameStartLine is defined in runtime/symtab.go. +func runtime_FrameStartLine(f *runtime.Frame) int { + return 0 +} + +// runtime_FrameSymbolName is defined in runtime/symtab.go. +func runtime_FrameSymbolName(f *runtime.Frame) string { + return f.Function +} diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub_go21.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub_go21.go new file mode 100644 index 00000000000..178ce251cb8 --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/internal/pprof/stub_go21.go @@ -0,0 +1,21 @@ +//go:build go1.21 +// +build go1.21 + +package pprof + +import ( + "runtime" + _ "unsafe" +) + +// runtime_FrameStartLine is defined in runtime/symtab.go. +// +//go:noescape +//go:linkname runtime_FrameStartLine runtime/pprof.runtime_FrameStartLine +func runtime_FrameStartLine(f *runtime.Frame) int + +// runtime_FrameSymbolName is defined in runtime/symtab.go. +// +//go:noescape +//go:linkname runtime_FrameSymbolName runtime/pprof.runtime_FrameSymbolName +func runtime_FrameSymbolName(f *runtime.Frame) string diff --git a/vendor/github.com/grafana/pyroscope-go/godeltaprof/proto.go b/vendor/github.com/grafana/pyroscope-go/godeltaprof/proto.go new file mode 100644 index 00000000000..2107389419c --- /dev/null +++ b/vendor/github.com/grafana/pyroscope-go/godeltaprof/proto.go @@ -0,0 +1,9 @@ +package godeltaprof + +type ProfileOptions struct { + // for go1.21+ if true - use runtime_FrameSymbolName - produces frames with generic types, for example [go.shape.int] + // for go1.21+ if false - use runtime.Frame->Function - produces frames with generic types ommited [...] + // pre 1.21 - always use runtime.Frame->Function - produces frames with generic types ommited [...] + GenericsFrames bool + LazyMappings bool +} From 47f84b305f85c5e3941c48819923721c676455ad Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Fri, 5 Jan 2024 12:11:35 -0500 Subject: [PATCH 05/12] integration test Signed-off-by: Joe Elliott --- cmd/tempo/app/server_service.go | 1 + integration/e2e/ca/ca.go | 213 ++++++++++++++++++++++++++++++ integration/e2e/config-https.yaml | 79 +++++++++++ integration/e2e/https_test.go | 86 ++++++++++++ integration/util.go | 14 +- pkg/server/internal_server.go | 2 +- 6 files changed, 392 insertions(+), 3 deletions(-) create mode 100644 integration/e2e/ca/ca.go create mode 100644 integration/e2e/config-https.yaml create mode 100644 integration/e2e/https_test.go diff --git a/cmd/tempo/app/server_service.go b/cmd/tempo/app/server_service.go index b6c6d01f0dd..d441c29eb2a 100644 --- a/cmd/tempo/app/server_service.go +++ b/cmd/tempo/app/server_service.go @@ -74,6 +74,7 @@ func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP cfg.Router = nil cfg.DoNotAddDefaultHTTPMiddleware = true // we don't want instrumentation on the "root" router, we want it on our mux } + DisableSignalHandling(&cfg) s.externalServer, err = server.NewWithMetrics(cfg, metrics) if err != nil { diff --git a/integration/e2e/ca/ca.go b/integration/e2e/ca/ca.go new file mode 100644 index 00000000000..575ab445cb8 --- /dev/null +++ b/integration/e2e/ca/ca.go @@ -0,0 +1,213 @@ +package test + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "io" + "math/big" + "os" + "path/filepath" + "testing" + "time" + + "github.com/grafana/dskit/runutil" + "github.com/stretchr/testify/require" +) + +type KeyMaterial struct { + CaCertFile string + ServerCertFile string + ServerKeyFile string + ServerNoLocalhostCertFile string + ServerNoLocalhostKeyFile string + ClientCA1CertFile string + ClientCABothCertFile string + Client1CertFile string + Client1KeyFile string + Client2CertFile string + Client2KeyFile string +} + +func SetupCertificates(t *testing.T) KeyMaterial { + testCADir := t.TempDir() + + // create server side CA + + testCA := newCA("Test") + caCertFile := filepath.Join(testCADir, "ca.crt") + require.NoError(t, testCA.writeCACertificate(caCertFile)) + + serverCertFile := filepath.Join(testCADir, "server.crt") + serverKeyFile := filepath.Join(testCADir, "server.key") + require.NoError(t, testCA.writeCertificate( + &x509.Certificate{ + Subject: pkix.Name{CommonName: "server"}, + DNSNames: []string{"localhost", "my-other-name"}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, + serverCertFile, + serverKeyFile, + )) + + serverNoLocalhostCertFile := filepath.Join(testCADir, "server-no-localhost.crt") + serverNoLocalhostKeyFile := filepath.Join(testCADir, "server-no-localhost.key") + require.NoError(t, testCA.writeCertificate( + &x509.Certificate{ + Subject: pkix.Name{CommonName: "server-no-localhost"}, + DNSNames: []string{"my-other-name"}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, + serverNoLocalhostCertFile, + serverNoLocalhostKeyFile, + )) + + // create client CAs + testClientCA1 := newCA("Test Client CA 1") + testClientCA2 := newCA("Test Client CA 2") + + clientCA1CertFile := filepath.Join(testCADir, "ca-client-1.crt") + require.NoError(t, testClientCA1.writeCACertificate(clientCA1CertFile)) + clientCA2CertFile := filepath.Join(testCADir, "ca-client-2.crt") + require.NoError(t, testClientCA2.writeCACertificate(clientCA2CertFile)) + + // create a ca file with both certs + clientCABothCertFile := filepath.Join(testCADir, "ca-client-both.crt") + func() { + src1, err := os.Open(clientCA1CertFile) + require.NoError(t, err) + defer src1.Close() + src2, err := os.Open(clientCA2CertFile) + require.NoError(t, err) + defer src2.Close() + + dst, err := os.Create(clientCABothCertFile) + require.NoError(t, err) + defer dst.Close() + + _, err = io.Copy(dst, src1) + require.NoError(t, err) + _, err = io.Copy(dst, src2) + require.NoError(t, err) + + }() + + client1CertFile := filepath.Join(testCADir, "client-1.crt") + client1KeyFile := filepath.Join(testCADir, "client-1.key") + require.NoError(t, testClientCA1.writeCertificate( + &x509.Certificate{ + Subject: pkix.Name{CommonName: "client-1"}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + client1CertFile, + client1KeyFile, + )) + + client2CertFile := filepath.Join(testCADir, "client-2.crt") + client2KeyFile := filepath.Join(testCADir, "client-2.key") + require.NoError(t, testClientCA2.writeCertificate( + &x509.Certificate{ + Subject: pkix.Name{CommonName: "client-2"}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + client2CertFile, + client2KeyFile, + )) + + return KeyMaterial{ + CaCertFile: caCertFile, + ServerCertFile: serverCertFile, + ServerKeyFile: serverKeyFile, + ServerNoLocalhostCertFile: serverNoLocalhostCertFile, + ServerNoLocalhostKeyFile: serverNoLocalhostKeyFile, + ClientCA1CertFile: clientCA1CertFile, + ClientCABothCertFile: clientCABothCertFile, + Client1CertFile: client1CertFile, + Client1KeyFile: client1KeyFile, + Client2CertFile: client2CertFile, + Client2KeyFile: client2KeyFile, + } +} + +type ca struct { + key *ecdsa.PrivateKey + cert *x509.Certificate + serial *big.Int +} + +func newCA(name string) *ca { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + panic(err) + } + + return &ca{ + key: key, + cert: &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{name}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * 180), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + IsCA: true, + }, + serial: big.NewInt(2), + } +} + +func writeExclusivePEMFile(path, marker string, mode os.FileMode, data []byte) (err error) { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, mode) + if err != nil { + return err + } + defer runutil.CloseWithErrCapture(&err, f, "write pem file") + + return pem.Encode(f, &pem.Block{Type: marker, Bytes: data}) +} + +func (ca *ca) writeCACertificate(path string) error { + derBytes, err := x509.CreateCertificate(rand.Reader, ca.cert, ca.cert, ca.key.Public(), ca.key) + if err != nil { + return err + } + + return writeExclusivePEMFile(path, "CERTIFICATE", 0644, derBytes) +} + +func (ca *ca) writeCertificate(template *x509.Certificate, certPath string, keyPath string) error { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return err + } + + if err := writeExclusivePEMFile(keyPath, "PRIVATE KEY", 0600, keyBytes); err != nil { + return err + } + + template.IsCA = false + template.NotBefore = time.Now() + if template.NotAfter.IsZero() { + template.NotAfter = time.Now().Add(time.Hour * 24 * 180) + } + template.SerialNumber = ca.serial.Add(ca.serial, big.NewInt(1)) + + derBytes, err := x509.CreateCertificate(rand.Reader, template, ca.cert, key.Public(), ca.key) + if err != nil { + return err + } + + return writeExclusivePEMFile(certPath, "CERTIFICATE", 0644, derBytes) +} diff --git a/integration/e2e/config-https.yaml b/integration/e2e/config-https.yaml new file mode 100644 index 00000000000..70504884488 --- /dev/null +++ b/integration/e2e/config-https.yaml @@ -0,0 +1,79 @@ +target: all +stream_over_http_enabled: true + +server: + http_listen_port: 3200 + tls_cipher_suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + tls_min_version: VersionTLS12 + grpc_tls_config: + cert_file: /shared/tls.crt + key_file: /shared/tls.key + client_auth_type: VerifyClientCertIfGiven + client_ca_file: /shared/ca.crt + http_tls_config: + cert_file: /shared/tls.crt + key_file: /shared/tls.key + client_auth_type: VerifyClientCertIfGiven + client_ca_file: /shared/ca.crt + +internal_server: + enable: true + http_listen_port: 3201 + +distributor: + receivers: + jaeger: + protocols: + grpc: + +ingester: + lifecycler: + address: 127.0.0.1 + ring: + kvstore: + store: inmemory + replication_factor: 1 + final_sleep: 0s + trace_idle_period: 1ms + max_block_duration: 1h + complete_block_timeout: 1h + flush_check_period: 1ms + +storage: + trace: + backend: gcs + gcs: + bucket_name: tempo + endpoint: https://tempo_e2e-gcs:4443/storage/v1/ + insecure: true + pool: + max_workers: 10 + queue_depth: 1000 + +overrides: + user_configurable_overrides: + enabled: true + poll_interval: 10s + client: + backend: gcs + # fsouza/fake-gcs-server does not support versioning + confirm_versioning: false + gcs: + bucket_name: tempo + endpoint: https://tempo_e2e-gcs:4443/storage/v1/ + insecure: true + +ingester_client: + grpc_client_config: + tls_enabled: true + tls_insecure_skip_verify: true + tls_cipher_suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + tls_min_version: VersionTLS12 + +querier: + frontend_worker: + grpc_client_config: + tls_enabled: true + tls_insecure_skip_verify: true + tls_cipher_suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + tls_min_version: VersionTLS12 \ No newline at end of file diff --git a/integration/e2e/https_test.go b/integration/e2e/https_test.go new file mode 100644 index 00000000000..53d519f82bb --- /dev/null +++ b/integration/e2e/https_test.go @@ -0,0 +1,86 @@ +package e2e + +import ( + "context" + "crypto/tls" + "net/http" + "os" + "testing" + "time" + + "github.com/grafana/e2e" + "github.com/grafana/tempo/cmd/tempo/app" + util "github.com/grafana/tempo/integration" + "github.com/grafana/tempo/integration/e2e/backend" + e2e_ca "github.com/grafana/tempo/integration/e2e/ca" + "github.com/grafana/tempo/pkg/httpclient" + tempoUtil "github.com/grafana/tempo/pkg/util" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/credentials" + "gopkg.in/yaml.v2" +) + +const ( + configHTTPS = "config-https.yaml" +) + +func TestHTTPS(t *testing.T) { + km := e2e_ca.SetupCertificates(t) + + s, err := e2e.NewScenario("tempo_e2e") + require.NoError(t, err) + defer s.Close() + + // set up the backend + cfg := app.Config{} + buff, err := os.ReadFile(configHTTPS) + require.NoError(t, err) + err = yaml.UnmarshalStrict(buff, &cfg) + require.NoError(t, err) + _, err = backend.New(s, cfg) + require.NoError(t, err) + + // copy in certs + require.NoError(t, util.CopyFileToSharedDir(s, km.ServerCertFile, "tls.crt")) + require.NoError(t, util.CopyFileToSharedDir(s, km.ServerKeyFile, "tls.key")) + require.NoError(t, util.CopyFileToSharedDir(s, km.CaCertFile, "ca.crt")) + + require.NoError(t, util.CopyFileToSharedDir(s, configHTTPS, "config.yaml")) + tempo := util.NewTempoAllInOneWithReadinessProbe(e2e.NewHTTPReadinessProbe(3201, "/ready", 200, 299)) + require.NoError(t, s.StartAndWaitReady(tempo)) + + // Get port for the Jaeger gRPC receiver endpoint + c, err := util.NewJaegerGRPCClient(tempo.Endpoint(14250)) + require.NoError(t, err) + require.NotNil(t, c) + + time.Sleep(10 * time.Second) + + info := tempoUtil.NewTraceInfo(time.Now(), "") + require.NoError(t, info.EmitAllBatches(c)) + + apiClient := httpclient.New("https://"+tempo.Endpoint(3200), "") + + // trust bad certs + defaultTransport := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + apiClient.WithTransport(defaultTransport) + + echoReq, err := http.NewRequest("GET", "https://"+tempo.Endpoint(3200)+"/api/echo", nil) + require.NoError(t, err) + resp, err := apiClient.Do(echoReq) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + + // query an in-memory trace + queryAndAssertTrace(t, apiClient, info) + util.SearchAndAssertTrace(t, apiClient, info) + util.SearchTraceQLAndAssertTrace(t, apiClient, info) + + creds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + grpcClient, err := util.NewSearchGRPCClientWithCredentials(context.Background(), tempo.Endpoint(3200), creds) + require.NoError(t, err) + + now := time.Now() + util.SearchStreamAndAssertTrace(t, grpcClient, info, now.Add(-time.Hour).Unix(), now.Add(time.Hour).Unix()) +} diff --git a/integration/util.go b/integration/util.go index 3f3b7ea9ade..7d1e8251940 100644 --- a/integration/util.go +++ b/integration/util.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "github.com/grafana/tempo/pkg/httpclient" @@ -57,6 +58,10 @@ func buildArgsWithExtra(args, extraArgs []string) []string { } func NewTempoAllInOne(extraArgs ...string) *e2e.HTTPService { + return NewTempoAllInOneWithReadinessProbe(e2e.NewHTTPReadinessProbe(3200, "/ready", 200, 299), extraArgs...) +} + +func NewTempoAllInOneWithReadinessProbe(rp e2e.ReadinessProbe, extraArgs ...string) *e2e.HTTPService { args := []string{"-config.file=" + filepath.Join(e2e.ContainerSharedDir, "config.yaml")} args = buildArgsWithExtra(args, extraArgs) @@ -64,8 +69,9 @@ func NewTempoAllInOne(extraArgs ...string) *e2e.HTTPService { "tempo", image, e2e.NewCommandWithoutEntrypoint("/tempo", args...), - e2e.NewHTTPReadinessProbe(3200, "/ready", 200, 299), + rp, 3200, // http all things + 3201, // http all things 9095, // grpc tempo 14250, // jaeger grpc ingest 9411, // zipkin ingest (used by load) @@ -281,7 +287,11 @@ func NewJaegerGRPCClient(endpoint string) (*jaeger_grpc.Reporter, error) { } func NewSearchGRPCClient(ctx context.Context, endpoint string) (tempopb.StreamingQuerierClient, error) { - clientConn, err := grpc.DialContext(ctx, endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) + return NewSearchGRPCClientWithCredentials(ctx, endpoint, insecure.NewCredentials()) +} + +func NewSearchGRPCClientWithCredentials(ctx context.Context, endpoint string, creds credentials.TransportCredentials) (tempopb.StreamingQuerierClient, error) { + clientConn, err := grpc.DialContext(ctx, endpoint, grpc.WithTransportCredentials(creds)) if err != nil { return nil, err } diff --git a/pkg/server/internal_server.go b/pkg/server/internal_server.go index ad3374d27f8..67ce931a3e8 100644 --- a/pkg/server/internal_server.go +++ b/pkg/server/internal_server.go @@ -15,7 +15,7 @@ type Config struct { // RegisterFlags add internal server flags to flagset func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Config.HTTPListenAddress, "internal-server.http-listen-address", "localhost", "HTTP internal server listen address.") + f.StringVar(&cfg.Config.HTTPListenAddress, "internal-server.http-listen-address", "", "HTTP internal server listen address.") f.StringVar(&cfg.Config.HTTPListenNetwork, "internal-server.http-listen-network", serverww.DefaultNetwork, "HTTP internal server listen network, default tcp") f.StringVar(&cfg.Config.HTTPTLSConfig.TLSCertPath, "internal-server.http-tls-cert-path", "", "HTTP internal server cert path.") f.StringVar(&cfg.Config.HTTPTLSConfig.TLSKeyPath, "internal-server.http-tls-key-path", "", "HTTP internal server key path.") From 982c6490eda8863f5f82f13f967fe13d53cfbc9a Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Tue, 16 Jan 2024 15:33:04 -0500 Subject: [PATCH 06/12] updated go.mod to latest Signed-off-by: Joe Elliott --- go.mod | 4 +- go.sum | 2 + .../dskit/kv/memberlist/memberlist_client.go | 2 +- vendor/github.com/grafana/dskit/ring/model.go | 7 ++ .../grafana/dskit/ring/replication_set.go | 15 ++- .../ring/spread_minimizing_token_generator.go | 108 +++++++----------- .../github.com/grafana/dskit/server/server.go | 56 ++------- .../grafana/dskit/spanlogger/spanlogger.go | 14 +++ vendor/modules.txt | 3 +- 9 files changed, 92 insertions(+), 119 deletions(-) diff --git a/go.mod b/go.mod index 5a754462987..394d9b25ab2 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.4.0 github.com/gorilla/mux v1.8.1 - github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f + github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8 github.com/grafana/e2e v0.1.1 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/go-hclog v1.5.0 @@ -343,5 +343,3 @@ replace ( replace github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91 replace golang.org/x/net => golang.org/x/net v0.17.0 - -replace github.com/grafana/dskit => ../dskit diff --git a/go.sum b/go.sum index 43ae3c1cbe2..9616cc62964 100644 --- a/go.sum +++ b/go.sum @@ -508,6 +508,8 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8 h1:qduBYOZAR5/RUO6yOlq1qYSw4tqeS3YeNxIHpQ4JIW8= +github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8/go.mod h1:x5DMwyr1kyirtHOxoFSZ7RnyOgHdGh03ZruupdPetQM= github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc= github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go index 693964b5ad0..e8a94debe18 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go @@ -177,7 +177,7 @@ func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { // "Defaults to hostname" -- memberlist sets it to hostname by default. f.StringVar(&cfg.NodeName, prefix+"memberlist.nodename", "", "Name of the node in memberlist cluster. Defaults to hostname.") // memberlist.DefaultLANConfig will put hostname here. f.BoolVar(&cfg.RandomizeNodeName, prefix+"memberlist.randomize-node-name", true, "Add random suffix to the node name.") - f.DurationVar(&cfg.StreamTimeout, prefix+"memberlist.stream-timeout", mlDefaults.TCPTimeout, "The timeout for establishing a connection with a remote node, and for read/write operations.") + f.DurationVar(&cfg.StreamTimeout, prefix+"memberlist.stream-timeout", 2*time.Second, "The timeout for establishing a connection with a remote node, and for read/write operations.") f.IntVar(&cfg.RetransmitMult, prefix+"memberlist.retransmit-factor", mlDefaults.RetransmitMult, "Multiplication factor used when sending out messages (factor * log(N+1)).") f.Var(&cfg.JoinMembers, prefix+"memberlist.join", "Other cluster members to join. Can be specified multiple times. It can be an IP, hostname or an entry specified in the DNS Service Discovery format.") f.DurationVar(&cfg.MinJoinBackoff, prefix+"memberlist.min-join-backoff", 1*time.Second, "Min backoff duration to join other cluster members.") diff --git a/vendor/github.com/grafana/dskit/ring/model.go b/vendor/github.com/grafana/dskit/ring/model.go index 956dbe0cf42..d11707e9cd4 100644 --- a/vendor/github.com/grafana/dskit/ring/model.go +++ b/vendor/github.com/grafana/dskit/ring/model.go @@ -21,6 +21,13 @@ func (ts ByAddr) Len() int { return len(ts) } func (ts ByAddr) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } func (ts ByAddr) Less(i, j int) bool { return ts[i].Addr < ts[j].Addr } +// ByID is a sortable list of InstanceDesc. +type ByID []InstanceDesc + +func (ts ByID) Len() int { return len(ts) } +func (ts ByID) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } +func (ts ByID) Less(i, j int) bool { return ts[i].Id < ts[j].Id } + // ProtoDescFactory makes new Descs func ProtoDescFactory() proto.Message { return NewDesc() diff --git a/vendor/github.com/grafana/dskit/ring/replication_set.go b/vendor/github.com/grafana/dskit/ring/replication_set.go index f05153c0525..b6bae256f77 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set.go @@ -468,6 +468,17 @@ func HasReplicationSetChangedWithoutState(before, after ReplicationSet) bool { }) } +// Has HasReplicationSetChangedWithoutStateOrAddr returns false if two replications sets +// are the same (with possibly different timestamps, instance states, and ip addresses), +// true if they differ in any other way (number of instances, tokens, zones, ...). +func HasReplicationSetChangedWithoutStateOrAddr(before, after ReplicationSet) bool { + return hasReplicationSetChangedExcluding(before, after, func(i *InstanceDesc) { + i.Timestamp = 0 + i.State = PENDING + i.Addr = "" + }) +} + // Do comparison of replicasets, but apply a function first // to be able to exclude (reset) some values func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude func(*InstanceDesc)) bool { @@ -478,8 +489,8 @@ func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude fun return true } - sort.Sort(ByAddr(beforeInstances)) - sort.Sort(ByAddr(afterInstances)) + sort.Sort(ByID(beforeInstances)) + sort.Sort(ByID(afterInstances)) for i := 0; i < len(beforeInstances); i++ { b := beforeInstances[i] diff --git a/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go b/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go index 2363825076f..bd2ed9970a5 100644 --- a/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go +++ b/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go @@ -8,10 +8,6 @@ import ( "sort" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "golang.org/x/exp/slices" ) @@ -22,11 +18,10 @@ const ( ) var ( - instanceIDRegex = regexp.MustCompile(`^(.*)-(\d+)$`) + instanceIDRegex = regexp.MustCompile(`^(.*-)(\d+)$`) errorBadInstanceIDFormat = func(instanceID string) error { return fmt.Errorf("unable to extract instance id from %q", instanceID) } - errorNoPreviousInstance = fmt.Errorf("impossible to find the instance preceding the target instance, because it is the first instance") errorMissingPreviousInstance = func(requiredInstanceID string) error { return fmt.Errorf("the instance %q has not been registered to the ring or has no tokens yet", requiredInstanceID) @@ -49,15 +44,13 @@ var ( ) type SpreadMinimizingTokenGenerator struct { - instanceID int - instance string - zoneID int - spreadMinimizingZones []string - canJoinEnabled bool - logger log.Logger + instanceID int + instancePrefix string + zoneID int + canJoinEnabled bool } -func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZones []string, canJoinEnabled bool, logger log.Logger) (*SpreadMinimizingTokenGenerator, error) { +func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZones []string, canJoinEnabled bool) (*SpreadMinimizingTokenGenerator, error) { if len(spreadMinimizingZones) <= 0 || len(spreadMinimizingZones) > maxZonesCount { return nil, errorZoneCountOutOfBound(len(spreadMinimizingZones)) } @@ -66,52 +59,35 @@ func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZo if !slices.IsSorted(sortedZones) { sort.Strings(sortedZones) } - instanceID, err := parseInstanceID(instance) + zoneID, err := findZoneID(zone, sortedZones) if err != nil { return nil, err } - zoneID, err := findZoneID(zone, sortedZones) + + prefix, instanceID, err := parseInstanceID(instance) if err != nil { return nil, err } - tokenGenerator := &SpreadMinimizingTokenGenerator{ - instanceID: instanceID, - instance: instance, - zoneID: zoneID, - spreadMinimizingZones: sortedZones, - canJoinEnabled: canJoinEnabled, - logger: logger, - } - return tokenGenerator, nil + return NewSpreadMinimizingTokenGeneratorForInstanceAndZoneID(prefix, instanceID, zoneID, canJoinEnabled), nil } -func parseInstanceID(instanceID string) (int, error) { - parts := instanceIDRegex.FindStringSubmatch(instanceID) - if len(parts) != 3 { - return 0, errorBadInstanceIDFormat(instanceID) +func NewSpreadMinimizingTokenGeneratorForInstanceAndZoneID(instancePrefix string, instanceID, zoneID int, canJoinEnabled bool) *SpreadMinimizingTokenGenerator { + return &SpreadMinimizingTokenGenerator{ + instanceID: instanceID, + instancePrefix: instancePrefix, + zoneID: zoneID, + canJoinEnabled: canJoinEnabled, } - return strconv.Atoi(parts[2]) } -// previousInstance determines the string id of the instance preceding the given instance string id. -// If it is impossible to parse the given instanceID, or it is impossible to determine its predecessor -// because the passed instanceID has a bad format, or has no predecessor, an error is returned. -// For examples, my-instance-1 is preceded by instance my-instance-0, but my-instance-0 has no -// predecessor because its index is 0. -func previousInstance(instanceID string) (string, error) { +func parseInstanceID(instanceID string) (string, int, error) { parts := instanceIDRegex.FindStringSubmatch(instanceID) if len(parts) != 3 { - return "", errorBadInstanceIDFormat(instanceID) - } - id, err := strconv.Atoi(parts[2]) - if err != nil { - return "", err - } - if id == 0 { - return "", errorNoPreviousInstance + return "", 0, errorBadInstanceIDFormat(instanceID) } - return fmt.Sprintf("%s-%d", parts[1], id-1), nil + val, err := strconv.Atoi(parts[2]) + return parts[1], val, err } // findZoneID gets a zone name and a slice of sorted zones, @@ -193,7 +169,11 @@ func (t *SpreadMinimizingTokenGenerator) GenerateTokens(requestedTokensCount int used[v] = true } - allTokens := t.generateAllTokens() + allTokens, err := t.generateAllTokens() + if err != nil { + // we were unable to generate required tokens, so we panic. + panic(err) + } uniqueTokens := make(Tokens, 0, requestedTokensCount) // allTokens is a sorted slice of tokens for instance t.cfg.InstanceID in zone t.cfg.zone @@ -214,11 +194,14 @@ func (t *SpreadMinimizingTokenGenerator) GenerateTokens(requestedTokensCount int // placed in the ring that already contains instances with all the ids lower that t.instanceID // is optimal. // Calls to this method will always return the same set of tokens. -func (t *SpreadMinimizingTokenGenerator) generateAllTokens() Tokens { - tokensByInstanceID := t.generateTokensByInstanceID() +func (t *SpreadMinimizingTokenGenerator) generateAllTokens() (Tokens, error) { + tokensByInstanceID, err := t.generateTokensByInstanceID() + if err != nil { + return nil, err + } allTokens := tokensByInstanceID[t.instanceID] slices.Sort(allTokens) - return allTokens + return allTokens, nil } // generateTokensByInstanceID generates the optimal number of tokens (optimalTokenPerInstance), @@ -226,13 +209,13 @@ func (t *SpreadMinimizingTokenGenerator) generateAllTokens() Tokens { // (with id t.instanceID). Generated tokens are not sorted, but they are distributed in such a // way that registered ownership of all the instances is optimal. // Calls to this method will always return the same set of tokens. -func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]Tokens { +func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() (map[int]Tokens, error) { firstInstanceTokens := t.generateFirstInstanceTokens() tokensByInstanceID := make(map[int]Tokens, t.instanceID+1) tokensByInstanceID[0] = firstInstanceTokens if t.instanceID == 0 { - return tokensByInstanceID + return tokensByInstanceID, nil } // tokensQueues is a slice of priority queues. Slice indexes correspond @@ -272,10 +255,8 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To optimalTokenOwnership := t.optimalTokenOwnership(optimalInstanceOwnership, currInstanceOwnership, uint32(optimalTokensPerInstance-addedTokens)) highestOwnershipInstance := instanceQueue.Peek() if highestOwnershipInstance == nil || highestOwnershipInstance.ownership <= float64(optimalTokenOwnership) { - level.Warn(t.logger).Log("msg", "it was impossible to add a token because the instance with the highest ownership cannot satisfy the request", "added tokens", addedTokens+1, "highest ownership", highestOwnershipInstance.ownership, "requested ownership", optimalTokenOwnership) - // if this happens, it means that we cannot accommodate other tokens, so we panic - err := fmt.Errorf("it was impossible to add %dth token for instance with id %d in zone %s because the instance with the highest ownership cannot satisfy the requested ownership %d", addedTokens+1, i, t.spreadMinimizingZones[t.zoneID], optimalTokenOwnership) - panic(err) + // if this happens, it means that we cannot accommodate other tokens + return nil, fmt.Errorf("it was impossible to add %dth token for instance with id %d in zone id %d because the instance with the highest ownership cannot satisfy the requested ownership %d", addedTokens+1, i, t.zoneID, optimalTokenOwnership) } tokensQueue := tokensQueues[highestOwnershipInstance.item.instanceID] highestOwnershipToken := tokensQueue.Peek() @@ -288,10 +269,8 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To token := highestOwnershipToken.item newToken, err := t.calculateNewToken(token, optimalTokenOwnership) if err != nil { - level.Error(t.logger).Log("msg", "it was impossible to calculate a new token because an error occurred", "err", err) - // if this happens, it means that we cannot accommodate additional tokens, so we panic - err := fmt.Errorf("it was impossible to calculate the %dth token for instance with id %d in zone %s", addedTokens+1, i, t.spreadMinimizingZones[t.zoneID]) - panic(err) + // if this happens, it means that we cannot accommodate additional tokens + return nil, fmt.Errorf("it was impossible to calculate the %dth token for instance with id %d in zone id %d", addedTokens+1, i, t.zoneID) } tokens = append(tokens, newToken) // add the new token to currInstanceTokenQueue @@ -317,7 +296,7 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To tokensByInstanceID[i] = tokens // if this is the last iteration we return, so we avoid to call additional heap.Pushs if i == t.instanceID { - return tokensByInstanceID + return tokensByInstanceID, nil } // If there were some ignored instances, we put them back on the queue. @@ -331,7 +310,7 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To heap.Push(&instanceQueue, newRingInstanceOwnershipInfo(i, currInstanceOwnership)) } - return tokensByInstanceID + return tokensByInstanceID, nil } func (t *SpreadMinimizingTokenGenerator) CanJoin(instances map[string]InstanceDesc) error { @@ -339,13 +318,10 @@ func (t *SpreadMinimizingTokenGenerator) CanJoin(instances map[string]InstanceDe return nil } - prevInstance, err := previousInstance(t.instance) - if err != nil { - if errors.Is(err, errorNoPreviousInstance) { - return nil - } - return err + if t.instanceID == 0 { + return nil } + prevInstance := fmt.Sprintf("%s%d", t.instancePrefix, t.instanceID-1) instanceDesc, ok := instances[prevInstance] if ok && len(instanceDesc.Tokens) != 0 { return nil diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go index b9d67ad51f3..a2ab54556cb 100644 --- a/vendor/github.com/grafana/dskit/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -28,7 +28,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/config" "github.com/prometheus/exporter-toolkit/web" - "github.com/soheilhy/cmux" "golang.org/x/net/netutil" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -114,7 +113,6 @@ type Config struct { HTTPMiddleware []middleware.Interface `yaml:"-"` Router *mux.Router `yaml:"-"` DoNotAddDefaultHTTPMiddleware bool `yaml:"-"` - RouteHTTPToGRPC bool `yaml:"-"` GRPCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` @@ -220,13 +218,6 @@ type Server struct { grpcListener net.Listener httpListener net.Listener - // These fields are used to support grpc over the http server - // if RouteHTTPToGRPC is set. the fields are kept here - // so they can be initialized in New() and started in Run() - grpchttpmux cmux.CMux - grpcOnHTTPListener net.Listener - GRPCOnHTTPServer *grpc.Server - HTTP *mux.Router HTTPServer *http.Server GRPC *grpc.Server @@ -278,15 +269,6 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { httpListener = netutil.LimitListener(httpListener, cfg.HTTPConnLimit) } - var grpcOnHTTPListener net.Listener - var grpchttpmux cmux.CMux - if cfg.RouteHTTPToGRPC { - grpchttpmux = cmux.New(httpListener) - - httpListener = grpchttpmux.Match(cmux.HTTP1Fast("PATCH")) - grpcOnHTTPListener = grpchttpmux.Match(cmux.HTTP2()) - } - network = cfg.GRPCListenNetwork if network == "" { network = DefaultNetwork @@ -437,7 +419,6 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpcOptions = append(grpcOptions, grpc.Creds(grpcCreds)) } grpcServer := grpc.NewServer(grpcOptions...) - grpcOnHTTPServer := grpc.NewServer(grpcOptions...) httpMiddleware, err := BuildHTTPMiddleware(cfg, router, metrics, logger) if err != nil { @@ -461,20 +442,17 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { } return &Server{ - cfg: cfg, - httpListener: httpListener, - grpcListener: grpcListener, - grpcOnHTTPListener: grpcOnHTTPListener, - handler: handler, - grpchttpmux: grpchttpmux, - - HTTP: router, - HTTPServer: httpServer, - GRPC: grpcServer, - GRPCOnHTTPServer: grpcOnHTTPServer, - Log: logger, - Registerer: cfg.registererOrDefault(), - Gatherer: gatherer, + cfg: cfg, + httpListener: httpListener, + grpcListener: grpcListener, + handler: handler, + + HTTP: router, + HTTPServer: httpServer, + GRPC: grpcServer, + Log: logger, + Registerer: cfg.registererOrDefault(), + Gatherer: gatherer, }, nil } @@ -572,18 +550,6 @@ func (s *Server) Run() error { handleGRPCError(err, errChan) }() - // grpchttpmux will only be set if grpchttpmux RouteHTTPToGRPC is set - if s.grpchttpmux != nil { - go func() { - err := s.grpchttpmux.Serve() - handleGRPCError(err, errChan) - }() - go func() { - err := s.GRPCOnHTTPServer.Serve(s.grpcOnHTTPListener) - handleGRPCError(err, errChan) - }() - } - return <-errChan } diff --git a/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go b/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go index 08653eda38a..c6a87250142 100644 --- a/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go +++ b/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go @@ -167,3 +167,17 @@ func (s *SpanLogger) getLogger() log.Logger { } return logger } + +// SetSpanAndLogTag sets a tag on the span used by this SpanLogger, and appends a key/value pair to the logger used for +// future log lines emitted by this SpanLogger. +// +// It is not safe to call this method from multiple goroutines simultaneously. +// It is safe to call this method at the same time as calling other SpanLogger methods, however, this may produce +// inconsistent results (eg. some log lines may be emitted with the provided key/value pair, and others may not). +func (s *SpanLogger) SetSpanAndLogTag(key string, value interface{}) { + s.Span.SetTag(key, value) + + logger := s.getLogger() + wrappedLogger := log.With(logger, key, value) + s.logger.Store(&wrappedLogger) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0f6fc1b8bd2..aa3b73747a6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -488,7 +488,7 @@ github.com/gorilla/mux # github.com/gorilla/websocket v1.5.0 ## explicit; go 1.12 github.com/gorilla/websocket -# github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f => ../dskit +# github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8 ## explicit; go 1.20 github.com/grafana/dskit/backoff github.com/grafana/dskit/cancellation @@ -1924,4 +1924,3 @@ gopkg.in/yaml.v3 # k8s.io/client-go => k8s.io/client-go v0.25.0 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91 # golang.org/x/net => golang.org/x/net v0.17.0 -# github.com/grafana/dskit => ../dskit From 73374ee76eb9fcb53905303d420ac90b291b23a1 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 17 Jan 2024 10:20:15 -0500 Subject: [PATCH 07/12] cleanup Signed-off-by: Joe Elliott --- cmd/tempo/app/modules.go | 9 --------- cmd/tempo/app/server_service.go | 29 +++++------------------------ 2 files changed, 5 insertions(+), 33 deletions(-) diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index 6ce3333545c..939b3c8e4b4 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -349,15 +349,6 @@ func (t *App) initQueryFrontend() (services.Service, error) { } t.frontend = v1 - // jpe - add stream over http support here - // - update docs to reflect new defaults - // - remove stream_over_http_enabled config in integration tests - // - review impact on GET - // - remove support from dskit - // - restore default = false - // - remove ws support - // - review dskit server settings, do i need to copy any to the router? - // create query frontend queryFrontend, err := frontend.New(t.cfg.Frontend, cortexTripper, t.Overrides, t.store, t.cacheProvider, t.cfg.HTTPAPIPrefix, log.Logger, prometheus.DefaultRegisterer) if err != nil { diff --git a/cmd/tempo/app/server_service.go b/cmd/tempo/app/server_service.go index d441c29eb2a..20e6dcfd084 100644 --- a/cmd/tempo/app/server_service.go +++ b/cmd/tempo/app/server_service.go @@ -5,7 +5,6 @@ import ( "fmt" "net/http" "strings" - "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -24,18 +23,15 @@ type TempoServer interface { HTTP() *mux.Router GRPC() *grpc.Server Log() log.Logger - EnableHTTP2() StartAndReturnService(cfg server.Config, supportGRPCOnHTTP bool, servicesToWaitFor func() []services.Service) (services.Service, error) } +// todo: evaluate whether the internal server should be included as part of this type tempoServer struct { mux *mux.Router // all tempo http routes are added here externalServer *server.Server // the standard server that all HTTP/GRPC requests are served on - // jpe: put internal server here as well? - - enableHTTP2 sync.Once } func newTempoServer() *tempoServer { @@ -57,12 +53,6 @@ func (s *tempoServer) Log() log.Logger { return s.externalServer.Log } -func (s *tempoServer) EnableHTTP2() { - s.enableHTTP2.Do(func() { - s.externalServer.HTTPServer.Handler = h2c.NewHandler(s.externalServer.HTTPServer.Handler, &http2.Server{}) - }) -} - func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP bool, servicesToWaitFor func() []services.Service) (services.Service, error) { var err error @@ -72,7 +62,7 @@ func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP cfg.Router = s.mux if supportGRPCOnHTTP { cfg.Router = nil - cfg.DoNotAddDefaultHTTPMiddleware = true // we don't want instrumentation on the "root" router, we want it on our mux + cfg.DoNotAddDefaultHTTPMiddleware = true // we don't want instrumentation on the "root" router, we want it on our mux. it will be added below. } DisableSignalHandling(&cfg) @@ -83,17 +73,8 @@ func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP // now that we have created the server and service let's setup our grpc/http router if necessary if supportGRPCOnHTTP { - s.EnableHTTP2() - // jpe - this works as well - // s.externalServer.HTTP.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - // // route to GRPC server if it's a GRPC request - // if req.ProtoMajor == 2 && strings.Contains(req.Header.Get("Content-Type"), "application/grpc") { // jpe - both? i don't think grafana sends the content-type header - // s.externalServer.GRPC.ServeHTTP(w, req) - // return - // } - - // w.WriteHeader(http.StatusNotFound) - // }) + // for grpc to work we must enable h2c on the external server + s.externalServer.HTTPServer.Handler = h2c.NewHandler(s.externalServer.HTTPServer.Handler, &http2.Server{}) // recreate dskit instrumentation here cfg.DoNotAddDefaultHTTPMiddleware = false @@ -104,7 +85,7 @@ func (s *tempoServer) StartAndReturnService(cfg server.Config, supportGRPCOnHTTP router := middleware.Merge(httpMiddleware...).Wrap(s.mux) s.externalServer.HTTP.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) { // route to GRPC server if it's a GRPC request - if req.ProtoMajor == 2 && strings.Contains(req.Header.Get("Content-Type"), "application/grpc") { // jpe - both? i don't think grafana sends the content-type header + if req.ProtoMajor == 2 && strings.Contains(req.Header.Get("Content-Type"), "application/grpc") { s.externalServer.GRPC.ServeHTTP(w, req) return } From 730352192aacf74c85177034cbbc4124a40c3e76 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 17 Jan 2024 10:25:08 -0500 Subject: [PATCH 08/12] changelog Signed-off-by: Joe Elliott --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc2521c72de..cd5f777dd1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ * [BUGFIX] Sanitize name in mapped dimensions in span-metrics processor [#3171](https://github.com/grafana/tempo/pull/3171) (@mapno) * [BUGFIX] Fixed an issue where cached footers were requested then ignored. [#3196](https://github.com/grafana/tempo/pull/3196) (@joe-elliott) * [BUGFIX] Fix panic in autocomplete when query condition had wrong type [#3277](https://github.com/grafana/tempo/pull/3277) (@mapno) +* [BUGFIX] Fix TLS when GRPC is enabled on HTTP [#3300](https://github.com/grafana/tempo/pull/3300) (@joe-elliott) ## v2.3.1 / 2023-11-28 From 5a57ddbf42e05f59f5c11c94375ed0bbf35ef043 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 17 Jan 2024 10:30:20 -0500 Subject: [PATCH 09/12] lint Signed-off-by: Joe Elliott --- integration/e2e/ca/ca.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/integration/e2e/ca/ca.go b/integration/e2e/ca/ca.go index 575ab445cb8..81f1bd8a8ec 100644 --- a/integration/e2e/ca/ca.go +++ b/integration/e2e/ca/ca.go @@ -92,7 +92,6 @@ func SetupCertificates(t *testing.T) KeyMaterial { require.NoError(t, err) _, err = io.Copy(dst, src2) require.NoError(t, err) - }() client1CertFile := filepath.Join(testCADir, "client-1.crt") @@ -179,7 +178,7 @@ func (ca *ca) writeCACertificate(path string) error { return err } - return writeExclusivePEMFile(path, "CERTIFICATE", 0644, derBytes) + return writeExclusivePEMFile(path, "CERTIFICATE", 0o644, derBytes) } func (ca *ca) writeCertificate(template *x509.Certificate, certPath string, keyPath string) error { @@ -193,7 +192,7 @@ func (ca *ca) writeCertificate(template *x509.Certificate, certPath string, keyP return err } - if err := writeExclusivePEMFile(keyPath, "PRIVATE KEY", 0600, keyBytes); err != nil { + if err := writeExclusivePEMFile(keyPath, "PRIVATE KEY", 0o600, keyBytes); err != nil { return err } @@ -209,5 +208,5 @@ func (ca *ca) writeCertificate(template *x509.Certificate, certPath string, keyP return err } - return writeExclusivePEMFile(certPath, "CERTIFICATE", 0644, derBytes) + return writeExclusivePEMFile(certPath, "CERTIFICATE", 0o644, derBytes) } From 3ed6a32cba561589f9dac6aad1df081a9d6b0a1e Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 17 Jan 2024 10:30:49 -0500 Subject: [PATCH 10/12] update servless mods Signed-off-by: Joe Elliott --- cmd/tempo-serverless/cloud-run/go.mod | 6 +++--- cmd/tempo-serverless/cloud-run/go.sum | 13 ++++++------- cmd/tempo-serverless/lambda/go.mod | 6 +++--- cmd/tempo-serverless/lambda/go.sum | 13 ++++++------- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/cmd/tempo-serverless/cloud-run/go.mod b/cmd/tempo-serverless/cloud-run/go.mod index 0e522ea54e8..32cb321378e 100644 --- a/cmd/tempo-serverless/cloud-run/go.mod +++ b/cmd/tempo-serverless/cloud-run/go.mod @@ -62,8 +62,9 @@ require ( github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f // indirect + github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8 // indirect github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.6 // indirect github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -72,7 +73,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/compress v1.17.3 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -110,7 +111,6 @@ require ( github.com/segmentio/encoding v0.3.6 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/soheilhy/cmux v0.1.5 // indirect github.com/sony/gobreaker v0.4.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.9.5 // indirect diff --git a/cmd/tempo-serverless/cloud-run/go.sum b/cmd/tempo-serverless/cloud-run/go.sum index 4b200b51743..96fe75eb57b 100644 --- a/cmd/tempo-serverless/cloud-run/go.sum +++ b/cmd/tempo-serverless/cloud-run/go.sum @@ -266,10 +266,12 @@ github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNakWv5/tKwBHuLy3icnIeCo9gQr4= -github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= +github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8 h1:qduBYOZAR5/RUO6yOlq1qYSw4tqeS3YeNxIHpQ4JIW8= +github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8/go.mod h1:x5DMwyr1kyirtHOxoFSZ7RnyOgHdGh03ZruupdPetQM= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzedH7MZzRZt5/lsAHch6Z3L2ZGn5FA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= @@ -305,8 +307,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= +github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -423,8 +425,6 @@ github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYM github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -589,7 +589,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= diff --git a/cmd/tempo-serverless/lambda/go.mod b/cmd/tempo-serverless/lambda/go.mod index 523c2f5b2fd..b91ba51811a 100644 --- a/cmd/tempo-serverless/lambda/go.mod +++ b/cmd/tempo-serverless/lambda/go.mod @@ -65,8 +65,9 @@ require ( github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f // indirect + github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8 // indirect github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.6 // indirect github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -75,7 +76,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/compress v1.17.3 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -114,7 +115,6 @@ require ( github.com/segmentio/encoding v0.3.6 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/soheilhy/cmux v0.1.5 // indirect github.com/sony/gobreaker v0.4.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.9.5 // indirect diff --git a/cmd/tempo-serverless/lambda/go.sum b/cmd/tempo-serverless/lambda/go.sum index 33e8097d0d8..be969e1f0ce 100644 --- a/cmd/tempo-serverless/lambda/go.sum +++ b/cmd/tempo-serverless/lambda/go.sum @@ -270,10 +270,12 @@ github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNakWv5/tKwBHuLy3icnIeCo9gQr4= -github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= +github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8 h1:qduBYOZAR5/RUO6yOlq1qYSw4tqeS3YeNxIHpQ4JIW8= +github.com/grafana/dskit v0.0.0-20240116202611-824e75a28ee8/go.mod h1:x5DMwyr1kyirtHOxoFSZ7RnyOgHdGh03ZruupdPetQM= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzedH7MZzRZt5/lsAHch6Z3L2ZGn5FA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= @@ -309,8 +311,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= +github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -429,8 +431,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -597,7 +597,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= From 2e547e680c28b0deb1e0fbb414db81eb4cda4b55 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 17 Jan 2024 10:48:44 -0500 Subject: [PATCH 11/12] satisfy interface updates Signed-off-by: Joe Elliott --- modules/distributor/distributor_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/distributor/distributor_test.go b/modules/distributor/distributor_test.go index f7d7470cd38..5edbeb87ace 100644 --- a/modules/distributor/distributor_test.go +++ b/modules/distributor/distributor_test.go @@ -1506,3 +1506,7 @@ func (r mockRing) CleanupShuffleShardCache(string) { func (r mockRing) GetInstanceState(string) (ring.InstanceState, error) { return ring.ACTIVE, nil } + +func (r mockRing) GetTokenRangesForInstance(instanceID string) (ring.TokenRanges, error) { + return nil, nil +} From 67569ee9f033f7448c81699215970e8067b5534b Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 17 Jan 2024 10:52:09 -0500 Subject: [PATCH 12/12] lint Signed-off-by: Joe Elliott --- modules/distributor/distributor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/distributor/distributor_test.go b/modules/distributor/distributor_test.go index 5edbeb87ace..8e4529936ee 100644 --- a/modules/distributor/distributor_test.go +++ b/modules/distributor/distributor_test.go @@ -1507,6 +1507,6 @@ func (r mockRing) GetInstanceState(string) (ring.InstanceState, error) { return ring.ACTIVE, nil } -func (r mockRing) GetTokenRangesForInstance(instanceID string) (ring.TokenRanges, error) { +func (r mockRing) GetTokenRangesForInstance(_ string) (ring.TokenRanges, error) { return nil, nil }